2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains all of the code that is specific to the HFI chip
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
69 #define NUM_IB_PORTS 1
72 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
75 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76 module_param(num_vls, uint, S_IRUGO);
77 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
86 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87 module_param(rcv_intr_timeout, uint, S_IRUGO);
88 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
90 uint rcv_intr_count = 16; /* same as qib */
91 module_param(rcv_intr_count, uint, S_IRUGO);
92 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
94 ushort link_crc_mask = SUPPORTED_CRCS;
95 module_param(link_crc_mask, ushort, S_IRUGO);
96 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
99 module_param_named(loopback, loopback, uint, S_IRUGO);
100 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
102 /* Other driver tunables */
103 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104 static ushort crc_14b_sideband = 1;
105 static uint use_flr = 1;
106 uint quick_linkup; /* skip LNI */
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
116 /* str must be a string constant */
117 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
120 /* Send Error Consequences */
121 #define SEC_WRITE_DROPPED 0x1
122 #define SEC_PACKET_DROPPED 0x2
123 #define SEC_SC_HALTED 0x4 /* per-context only */
124 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
126 #define DEFAULT_KRCVQS 2
127 #define MIN_KERNEL_KCTXTS 2
128 #define FIRST_KERNEL_KCTXT 1
131 * RSM instance allocation
133 * 1 - User Fecn Handling
136 #define RSM_INS_VERBS 0
137 #define RSM_INS_FECN 1
138 #define RSM_INS_VNIC 2
140 /* Bit offset into the GUID which carries HFI id information */
141 #define GUID_HFI_INDEX_SHIFT 39
143 /* extract the emulation revision */
144 #define emulator_rev(dd) ((dd)->irev >> 8)
145 /* parallel and serial emulation versions are 3 and 4 respectively */
146 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
147 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
149 /* RSM fields for Verbs */
151 #define IB_PACKET_TYPE 2ull
152 #define QW_SHIFT 6ull
154 #define QPN_WIDTH 7ull
156 /* LRH.BTH: QW 0, OFFSET 48 - for match */
157 #define LRH_BTH_QW 0ull
158 #define LRH_BTH_BIT_OFFSET 48ull
159 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
160 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
161 #define LRH_BTH_SELECT
162 #define LRH_BTH_MASK 3ull
163 #define LRH_BTH_VALUE 2ull
165 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
166 #define LRH_SC_QW 0ull
167 #define LRH_SC_BIT_OFFSET 56ull
168 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
169 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
170 #define LRH_SC_MASK 128ull
171 #define LRH_SC_VALUE 0ull
173 /* SC[n..0] QW 0, OFFSET 60 - for select */
174 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
176 /* QPN[m+n:1] QW 1, OFFSET 1 */
177 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
179 /* RSM fields for Vnic */
180 /* L2_TYPE: QW 0, OFFSET 61 - for match */
181 #define L2_TYPE_QW 0ull
182 #define L2_TYPE_BIT_OFFSET 61ull
183 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
184 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
185 #define L2_TYPE_MASK 3ull
186 #define L2_16B_VALUE 2ull
188 /* L4_TYPE QW 1, OFFSET 0 - for match */
189 #define L4_TYPE_QW 1ull
190 #define L4_TYPE_BIT_OFFSET 0ull
191 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
192 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
193 #define L4_16B_TYPE_MASK 0xFFull
194 #define L4_16B_ETH_VALUE 0x78ull
196 /* 16B VESWID - for select */
197 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
198 /* 16B ENTROPY - for select */
199 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
201 /* defines to build power on SC2VL table */
213 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
214 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
215 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
216 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
217 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
218 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
219 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
220 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
223 #define DC_SC_VL_VAL( \
242 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
243 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
244 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
245 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
246 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
247 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
248 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
249 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
250 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
251 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
252 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
253 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
254 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
255 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
256 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
257 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
260 /* all CceStatus sub-block freeze bits */
261 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
262 | CCE_STATUS_RXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_FROZE_SMASK \
264 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
265 /* all CceStatus sub-block TXE pause bits */
266 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
267 | CCE_STATUS_TXE_PAUSED_SMASK \
268 | CCE_STATUS_SDMA_PAUSED_SMASK)
269 /* all CceStatus sub-block RXE pause bits */
270 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
272 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
273 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
278 static struct flag_table cce_err_status_flags[] = {
279 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
280 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
281 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
282 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
283 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
284 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
285 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
286 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
287 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
288 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
289 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
290 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
291 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
292 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
293 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
294 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
295 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
296 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
297 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
298 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
299 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
300 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
301 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
302 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
303 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
304 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
305 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
306 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
307 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
308 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
309 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
310 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
311 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
312 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
313 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
314 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
315 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
316 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
317 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
318 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
319 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
320 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
321 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
322 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
323 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
324 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
325 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
326 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
327 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
328 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
329 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
330 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
331 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
332 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
333 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
334 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
335 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
336 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
337 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
338 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
339 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
340 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
341 /*31*/ FLAG_ENTRY0("LATriggered",
342 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
343 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
344 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
345 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
346 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
347 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
348 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
349 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
350 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
351 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
352 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
353 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
354 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
355 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
356 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
357 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
358 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
359 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
360 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
367 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
368 static struct flag_table misc_err_status_flags[] = {
369 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
370 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
371 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
372 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
373 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
374 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
375 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
376 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
377 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
378 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
379 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
380 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
381 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
385 * TXE PIO Error flags and consequences
387 static struct flag_table pio_err_status_flags[] = {
388 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
390 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
391 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
393 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
394 /* 2*/ FLAG_ENTRY("PioCsrParity",
396 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
397 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
399 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
400 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
402 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
403 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
405 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
406 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
408 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
409 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
411 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
412 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
414 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
415 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
417 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
418 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
420 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
421 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
423 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
424 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
426 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
427 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
429 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
430 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
432 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
433 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
435 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
436 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
438 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
439 /*17*/ FLAG_ENTRY("PioInitSmIn",
441 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
442 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
444 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
445 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
447 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
448 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
450 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
451 /*21*/ FLAG_ENTRY("PioWriteDataParity",
453 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
454 /*22*/ FLAG_ENTRY("PioStateMachine",
456 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
457 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
458 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
459 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
460 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
461 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
462 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
463 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
465 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
466 /*26*/ FLAG_ENTRY("PioVlfSopParity",
468 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
469 /*27*/ FLAG_ENTRY("PioVlFifoParity",
471 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
472 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
474 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
475 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
477 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
479 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
481 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
482 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
484 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
485 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
487 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
488 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
490 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
494 /* TXE PIO errors that cause an SPC freeze */
495 #define ALL_PIO_FREEZE_ERR \
496 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
524 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
527 * TXE SDMA Error flags
529 static struct flag_table sdma_err_status_flags[] = {
530 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
531 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
532 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
533 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
534 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
535 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
536 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
537 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
541 /* TXE SDMA errors that cause an SPC freeze */
542 #define ALL_SDMA_FREEZE_ERR \
543 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
545 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
547 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
548 #define PORT_DISCARD_EGRESS_ERRS \
549 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
551 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
554 * TXE Egress Error flags
556 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
557 static struct flag_table egress_err_status_flags[] = {
558 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
559 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
561 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
562 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
563 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
564 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
566 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
567 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
568 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
569 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
571 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
572 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
573 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
574 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
575 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
576 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
577 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
578 SEES(TX_SDMA0_DISALLOWED_PACKET)),
579 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
580 SEES(TX_SDMA1_DISALLOWED_PACKET)),
581 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
582 SEES(TX_SDMA2_DISALLOWED_PACKET)),
583 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
584 SEES(TX_SDMA3_DISALLOWED_PACKET)),
585 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
586 SEES(TX_SDMA4_DISALLOWED_PACKET)),
587 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
588 SEES(TX_SDMA5_DISALLOWED_PACKET)),
589 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
590 SEES(TX_SDMA6_DISALLOWED_PACKET)),
591 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
592 SEES(TX_SDMA7_DISALLOWED_PACKET)),
593 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
594 SEES(TX_SDMA8_DISALLOWED_PACKET)),
595 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
596 SEES(TX_SDMA9_DISALLOWED_PACKET)),
597 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
598 SEES(TX_SDMA10_DISALLOWED_PACKET)),
599 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
600 SEES(TX_SDMA11_DISALLOWED_PACKET)),
601 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
602 SEES(TX_SDMA12_DISALLOWED_PACKET)),
603 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
604 SEES(TX_SDMA13_DISALLOWED_PACKET)),
605 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
606 SEES(TX_SDMA14_DISALLOWED_PACKET)),
607 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
608 SEES(TX_SDMA15_DISALLOWED_PACKET)),
609 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
610 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
611 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
612 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
613 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
614 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
615 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
616 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
617 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
618 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
619 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
620 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
621 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
622 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
623 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
624 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
625 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
626 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
627 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
628 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
629 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
630 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
631 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
632 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
633 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
634 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
635 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
636 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
637 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
638 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
639 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
640 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
641 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
642 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
643 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
644 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
645 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
646 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
647 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
648 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
649 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
650 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
651 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
655 * TXE Egress Error Info flags
657 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
658 static struct flag_table egress_err_info_flags[] = {
659 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
660 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
661 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
664 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
665 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
666 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
667 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
668 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
669 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
670 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
671 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
672 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
673 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
674 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
675 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
676 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
677 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
678 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
679 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
680 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
683 /* TXE Egress errors that cause an SPC freeze */
684 #define ALL_TXE_EGRESS_FREEZE_ERR \
685 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
686 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
688 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
689 | SEES(TX_LAUNCH_CSR_PARITY) \
690 | SEES(TX_SBRD_CTL_CSR_PARITY) \
691 | SEES(TX_CONFIG_PARITY) \
692 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
700 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
701 | SEES(TX_CREDIT_RETURN_PARITY))
704 * TXE Send error flags
706 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
707 static struct flag_table send_err_status_flags[] = {
708 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
709 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
710 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
714 * TXE Send Context Error flags and consequences
716 static struct flag_table sc_err_status_flags[] = {
717 /* 0*/ FLAG_ENTRY("InconsistentSop",
718 SEC_PACKET_DROPPED | SEC_SC_HALTED,
719 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
720 /* 1*/ FLAG_ENTRY("DisallowedPacket",
721 SEC_PACKET_DROPPED | SEC_SC_HALTED,
722 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
723 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
724 SEC_WRITE_DROPPED | SEC_SC_HALTED,
725 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
726 /* 3*/ FLAG_ENTRY("WriteOverflow",
727 SEC_WRITE_DROPPED | SEC_SC_HALTED,
728 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
729 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
730 SEC_WRITE_DROPPED | SEC_SC_HALTED,
731 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
736 * RXE Receive Error flags
738 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
739 static struct flag_table rxe_err_status_flags[] = {
740 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
741 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
742 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
743 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
744 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
745 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
746 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
747 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
748 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
749 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
750 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
751 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
752 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
753 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
754 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
755 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
756 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
757 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
758 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
759 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
760 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
761 RXES(RBUF_BLOCK_LIST_READ_UNC)),
762 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
763 RXES(RBUF_BLOCK_LIST_READ_COR)),
764 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
765 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
766 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
767 RXES(RBUF_CSR_QENT_CNT_PARITY)),
768 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
769 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
770 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
771 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
772 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
773 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
774 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
775 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
776 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
777 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
778 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
779 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
780 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
781 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
782 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
783 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
784 RXES(RBUF_FL_INITDONE_PARITY)),
785 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
786 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
787 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
788 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
789 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
790 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
791 RXES(LOOKUP_DES_PART1_UNC_COR)),
792 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
793 RXES(LOOKUP_DES_PART2_PARITY)),
794 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
795 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
796 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
797 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
798 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
799 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
800 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
801 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
802 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
803 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
804 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
805 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
806 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
807 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
808 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
809 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
810 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
811 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
812 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
813 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
814 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
815 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
818 /* RXE errors that will trigger an SPC freeze */
819 #define ALL_RXE_FREEZE_ERR \
820 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
863 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
865 #define RXE_FREEZE_ABORT_MASK \
866 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
868 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
873 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
874 static struct flag_table dcc_err_flags[] = {
875 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
876 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
877 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
878 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
879 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
880 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
881 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
882 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
883 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
884 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
885 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
886 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
887 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
888 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
889 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
890 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
891 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
892 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
893 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
894 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
895 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
896 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
897 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
898 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
899 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
900 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
901 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
902 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
903 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
904 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
905 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
906 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
907 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
908 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
909 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
910 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
911 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
912 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
913 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
914 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
915 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
916 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
917 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
918 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
919 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
920 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
926 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
927 static struct flag_table lcb_err_flags[] = {
928 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
929 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
930 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
931 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
932 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
933 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
934 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
935 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
936 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
937 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
938 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
939 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
940 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
941 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
942 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
943 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
944 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
945 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
946 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
947 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
948 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
949 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
950 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
951 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
952 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
953 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
954 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
955 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
956 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
957 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
958 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
959 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
960 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
961 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
962 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
963 LCBE(REDUNDANT_FLIT_PARITY_ERR))
969 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
970 static struct flag_table dc8051_err_flags[] = {
971 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
972 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
973 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
974 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
975 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
976 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
977 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
978 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
979 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
980 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
981 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
985 * DC8051 Information Error flags
987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
989 static struct flag_table dc8051_info_err_flags[] = {
990 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
991 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
992 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
993 FLAG_ENTRY0("Serdes internal loopback failure",
994 FAILED_SERDES_INTERNAL_LOOPBACK),
995 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
996 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
997 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
998 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
999 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1001 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1002 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
1003 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1004 FLAG_ENTRY0("External Device Request Timeout",
1005 EXTERNAL_DEVICE_REQ_TIMEOUT),
1009 * DC8051 Information Host Information flags
1011 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1013 static struct flag_table dc8051_info_host_msg_flags[] = {
1014 FLAG_ENTRY0("Host request done", 0x0001),
1015 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1016 FLAG_ENTRY0("BC SMA message", 0x0004),
1017 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019 FLAG_ENTRY0("External device config request", 0x0020),
1020 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022 FLAG_ENTRY0("Link going down", 0x0100),
1023 FLAG_ENTRY0("Link width downgraded", 0x0200),
1026 static u32 encoded_size(u32 size);
1027 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1028 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1029 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1031 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1032 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1033 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1034 u8 *remote_tx_rate, u16 *link_widths);
1035 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1036 u8 *flag_bits, u16 *link_widths);
1037 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1039 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1040 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042 u8 *tx_polarity_inversion,
1043 u8 *rx_polarity_inversion, u8 *max_rate);
1044 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045 unsigned int context, u64 err_status);
1046 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047 static void handle_dcc_err(struct hfi1_devdata *dd,
1048 unsigned int context, u64 err_status);
1049 static void handle_lcb_err(struct hfi1_devdata *dd,
1050 unsigned int context, u64 err_status);
1051 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1059 static void set_partition_keys(struct hfi1_pportdata *ppd);
1060 static const char *link_state_name(u32 state);
1061 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1063 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1065 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066 static int thermal_init(struct hfi1_devdata *dd);
1068 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1071 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1072 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1073 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1075 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1076 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1077 static void handle_temp_err(struct hfi1_devdata *dd);
1078 static void dc_shutdown(struct hfi1_devdata *dd);
1079 static void dc_start(struct hfi1_devdata *dd);
1080 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1082 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1083 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1084 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087 * Error interrupt table entry. This is used as input to the interrupt
1088 * "clear down" routine used for all second tier error interrupt register.
1089 * Second tier interrupt registers have a single bit representing them
1090 * in the top-level CceIntStatus.
1092 struct err_reg_info {
1093 u32 status; /* status CSR offset */
1094 u32 clear; /* clear CSR offset */
1095 u32 mask; /* mask CSR offset */
1096 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1100 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1101 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1102 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1105 * Helpers for building HFI and DC error interrupt table entries. Different
1106 * helpers are needed because of inconsistent register names.
1108 #define EE(reg, handler, desc) \
1109 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1111 #define DC_EE1(reg, handler, desc) \
1112 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1113 #define DC_EE2(reg, handler, desc) \
1114 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1117 * Table of the "misc" grouping of error interrupts. Each entry refers to
1118 * another register containing more information.
1120 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1121 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1122 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1123 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1124 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1125 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1126 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1127 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1128 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1129 /* the rest are reserved */
1133 * Index into the Various section of the interrupt sources
1134 * corresponding to the Critical Temperature interrupt.
1136 #define TCRIT_INT_SOURCE 4
1139 * SDMA error interrupt entry - refers to another register containing more
1142 static const struct err_reg_info sdma_eng_err =
1143 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1145 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1146 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1147 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1148 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1149 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1150 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1151 /* rest are reserved */
1155 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1156 * register can not be derived from the MTU value because 10K is not
1157 * a power of 2. Therefore, we need a constant. Everything else can
1160 #define DCC_CFG_PORT_MTU_CAP_10240 7
1163 * Table of the DC grouping of error interrupts. Each entry refers to
1164 * another register containing more information.
1166 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1167 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1168 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1169 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1170 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1171 /* the rest are reserved */
1181 * csr to read for name (if applicable)
1186 * offset into dd or ppd to store the counter's value
1196 * accessor for stat element, context either dd or ppd
1198 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1199 int mode, u64 data);
1202 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1203 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1205 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1215 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1217 (counter * 8 + RCV_COUNTER_ARRAY32), \
1218 0, flags | CNTR_32BIT, \
1219 port_access_u32_csr)
1221 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1223 (counter * 8 + RCV_COUNTER_ARRAY32), \
1224 0, flags | CNTR_32BIT, \
1228 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1230 (counter * 8 + RCV_COUNTER_ARRAY64), \
1232 port_access_u64_csr)
1234 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1236 (counter * 8 + RCV_COUNTER_ARRAY64), \
1240 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1241 #define OVR_ELM(ctx) \
1242 CNTR_ELEM("RcvHdrOvr" #ctx, \
1243 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1244 0, CNTR_NORMAL, port_access_u64_csr)
1247 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1249 (counter * 8 + SEND_COUNTER_ARRAY32), \
1250 0, flags | CNTR_32BIT, \
1251 port_access_u32_csr)
1254 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1256 (counter * 8 + SEND_COUNTER_ARRAY64), \
1258 port_access_u64_csr)
1260 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1262 counter * 8 + SEND_COUNTER_ARRAY64, \
1268 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1270 (counter * 8 + CCE_COUNTER_ARRAY32), \
1271 0, flags | CNTR_32BIT, \
1274 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1276 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1277 0, flags | CNTR_32BIT, \
1281 #define DC_PERF_CNTR(name, counter, flags) \
1288 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1296 #define SW_IBP_CNTR(name, cntr) \
1304 * hfi_addr_from_offset - return addr for readq/writeq
1305 * @dd - the dd device
1306 * @offset - the offset of the CSR within bar0
1308 * This routine selects the appropriate base address
1309 * based on the indicated offset.
1311 static inline void __iomem *hfi1_addr_from_offset(
1312 const struct hfi1_devdata *dd,
1315 if (offset >= dd->base2_start)
1316 return dd->kregbase2 + (offset - dd->base2_start);
1317 return dd->kregbase1 + offset;
1321 * read_csr - read CSR at the indicated offset
1322 * @dd - the dd device
1323 * @offset - the offset of the CSR within bar0
1325 * Return: the value read or all FF's if there
1328 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1330 if (dd->flags & HFI1_PRESENT)
1331 return readq(hfi1_addr_from_offset(dd, offset));
1336 * write_csr - write CSR at the indicated offset
1337 * @dd - the dd device
1338 * @offset - the offset of the CSR within bar0
1339 * @value - value to write
1341 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1343 if (dd->flags & HFI1_PRESENT) {
1344 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1346 /* avoid write to RcvArray */
1347 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1349 writeq(value, base);
1354 * get_csr_addr - return te iomem address for offset
1355 * @dd - the dd device
1356 * @offset - the offset of the CSR within bar0
1358 * Return: The iomem address to use in subsequent
1359 * writeq/readq operations.
1361 void __iomem *get_csr_addr(
1362 const struct hfi1_devdata *dd,
1365 if (dd->flags & HFI1_PRESENT)
1366 return hfi1_addr_from_offset(dd, offset);
1370 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1371 int mode, u64 value)
1375 if (mode == CNTR_MODE_R) {
1376 ret = read_csr(dd, csr);
1377 } else if (mode == CNTR_MODE_W) {
1378 write_csr(dd, csr, value);
1381 dd_dev_err(dd, "Invalid cntr register access mode");
1385 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1390 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1391 void *context, int vl, int mode, u64 data)
1393 struct hfi1_devdata *dd = context;
1394 u64 csr = entry->csr;
1396 if (entry->flags & CNTR_SDMA) {
1397 if (vl == CNTR_INVALID_VL)
1401 if (vl != CNTR_INVALID_VL)
1404 return read_write_csr(dd, csr, mode, data);
1407 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1408 void *context, int idx, int mode, u64 data)
1410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1412 if (dd->per_sdma && idx < dd->num_sdma)
1413 return dd->per_sdma[idx].err_cnt;
1417 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1418 void *context, int idx, int mode, u64 data)
1420 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1422 if (dd->per_sdma && idx < dd->num_sdma)
1423 return dd->per_sdma[idx].sdma_int_cnt;
1427 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1428 void *context, int idx, int mode, u64 data)
1430 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1432 if (dd->per_sdma && idx < dd->num_sdma)
1433 return dd->per_sdma[idx].idle_int_cnt;
1437 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1438 void *context, int idx, int mode,
1441 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1443 if (dd->per_sdma && idx < dd->num_sdma)
1444 return dd->per_sdma[idx].progress_int_cnt;
1448 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1449 int vl, int mode, u64 data)
1451 struct hfi1_devdata *dd = context;
1454 u64 csr = entry->csr;
1456 if (entry->flags & CNTR_VL) {
1457 if (vl == CNTR_INVALID_VL)
1461 if (vl != CNTR_INVALID_VL)
1465 val = read_write_csr(dd, csr, mode, data);
1469 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1470 int vl, int mode, u64 data)
1472 struct hfi1_devdata *dd = context;
1473 u32 csr = entry->csr;
1476 if (vl != CNTR_INVALID_VL)
1478 if (mode == CNTR_MODE_R)
1479 ret = read_lcb_csr(dd, csr, &data);
1480 else if (mode == CNTR_MODE_W)
1481 ret = write_lcb_csr(dd, csr, data);
1484 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1488 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1493 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1494 int vl, int mode, u64 data)
1496 struct hfi1_pportdata *ppd = context;
1498 if (vl != CNTR_INVALID_VL)
1500 return read_write_csr(ppd->dd, entry->csr, mode, data);
1503 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1504 void *context, int vl, int mode, u64 data)
1506 struct hfi1_pportdata *ppd = context;
1508 u64 csr = entry->csr;
1510 if (entry->flags & CNTR_VL) {
1511 if (vl == CNTR_INVALID_VL)
1515 if (vl != CNTR_INVALID_VL)
1518 val = read_write_csr(ppd->dd, csr, mode, data);
1522 /* Software defined */
1523 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1528 if (mode == CNTR_MODE_R) {
1530 } else if (mode == CNTR_MODE_W) {
1534 dd_dev_err(dd, "Invalid cntr sw access mode");
1538 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1543 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1544 int vl, int mode, u64 data)
1546 struct hfi1_pportdata *ppd = context;
1548 if (vl != CNTR_INVALID_VL)
1550 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1553 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1554 int vl, int mode, u64 data)
1556 struct hfi1_pportdata *ppd = context;
1558 if (vl != CNTR_INVALID_VL)
1560 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1563 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1564 void *context, int vl, int mode,
1567 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1569 if (vl != CNTR_INVALID_VL)
1571 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1574 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1575 void *context, int vl, int mode, u64 data)
1577 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1581 if (vl == CNTR_INVALID_VL)
1582 counter = &ppd->port_xmit_discards;
1583 else if (vl >= 0 && vl < C_VL_COUNT)
1584 counter = &ppd->port_xmit_discards_vl[vl];
1588 return read_write_sw(ppd->dd, counter, mode, data);
1591 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1592 void *context, int vl, int mode,
1595 struct hfi1_pportdata *ppd = context;
1597 if (vl != CNTR_INVALID_VL)
1600 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1604 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1605 void *context, int vl, int mode, u64 data)
1607 struct hfi1_pportdata *ppd = context;
1609 if (vl != CNTR_INVALID_VL)
1612 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1616 u64 get_all_cpu_total(u64 __percpu *cntr)
1621 for_each_possible_cpu(cpu)
1622 counter += *per_cpu_ptr(cntr, cpu);
1626 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1628 int vl, int mode, u64 data)
1632 if (vl != CNTR_INVALID_VL)
1635 if (mode == CNTR_MODE_R) {
1636 ret = get_all_cpu_total(cntr) - *z_val;
1637 } else if (mode == CNTR_MODE_W) {
1638 /* A write can only zero the counter */
1640 *z_val = get_all_cpu_total(cntr);
1642 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1644 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1651 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1652 void *context, int vl, int mode, u64 data)
1654 struct hfi1_devdata *dd = context;
1656 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1660 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1661 void *context, int vl, int mode, u64 data)
1663 struct hfi1_devdata *dd = context;
1665 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1669 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1670 void *context, int vl, int mode, u64 data)
1672 struct hfi1_devdata *dd = context;
1674 return dd->verbs_dev.n_piowait;
1677 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1678 void *context, int vl, int mode, u64 data)
1680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1682 return dd->verbs_dev.n_piodrain;
1685 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1686 void *context, int vl, int mode, u64 data)
1688 struct hfi1_devdata *dd = context;
1690 return dd->verbs_dev.n_txwait;
1693 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1694 void *context, int vl, int mode, u64 data)
1696 struct hfi1_devdata *dd = context;
1698 return dd->verbs_dev.n_kmem_wait;
1701 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1702 void *context, int vl, int mode, u64 data)
1704 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1706 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1710 /* Software counters for the error status bits within MISC_ERR_STATUS */
1711 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1712 void *context, int vl, int mode,
1715 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1717 return dd->misc_err_status_cnt[12];
1720 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1721 void *context, int vl, int mode,
1724 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1726 return dd->misc_err_status_cnt[11];
1729 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1730 void *context, int vl, int mode,
1733 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1735 return dd->misc_err_status_cnt[10];
1738 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1739 void *context, int vl,
1742 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1744 return dd->misc_err_status_cnt[9];
1747 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1748 void *context, int vl, int mode,
1751 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1753 return dd->misc_err_status_cnt[8];
1756 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1757 const struct cntr_entry *entry,
1758 void *context, int vl, int mode, u64 data)
1760 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1762 return dd->misc_err_status_cnt[7];
1765 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1766 void *context, int vl,
1769 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1771 return dd->misc_err_status_cnt[6];
1774 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1775 void *context, int vl, int mode,
1778 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1780 return dd->misc_err_status_cnt[5];
1783 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1784 void *context, int vl, int mode,
1787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1789 return dd->misc_err_status_cnt[4];
1792 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1793 void *context, int vl,
1796 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1798 return dd->misc_err_status_cnt[3];
1801 static u64 access_misc_csr_write_bad_addr_err_cnt(
1802 const struct cntr_entry *entry,
1803 void *context, int vl, int mode, u64 data)
1805 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1807 return dd->misc_err_status_cnt[2];
1810 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1811 void *context, int vl,
1814 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1816 return dd->misc_err_status_cnt[1];
1819 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1820 void *context, int vl, int mode,
1823 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1825 return dd->misc_err_status_cnt[0];
1829 * Software counter for the aggregate of
1830 * individual CceErrStatus counters
1832 static u64 access_sw_cce_err_status_aggregated_cnt(
1833 const struct cntr_entry *entry,
1834 void *context, int vl, int mode, u64 data)
1836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1838 return dd->sw_cce_err_status_aggregate;
1842 * Software counters corresponding to each of the
1843 * error status bits within CceErrStatus
1845 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1846 void *context, int vl, int mode,
1849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1851 return dd->cce_err_status_cnt[40];
1854 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1855 void *context, int vl, int mode,
1858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1860 return dd->cce_err_status_cnt[39];
1863 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1864 void *context, int vl, int mode,
1867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1869 return dd->cce_err_status_cnt[38];
1872 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1873 void *context, int vl, int mode,
1876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1878 return dd->cce_err_status_cnt[37];
1881 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1882 void *context, int vl, int mode,
1885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1887 return dd->cce_err_status_cnt[36];
1890 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1891 const struct cntr_entry *entry,
1892 void *context, int vl, int mode, u64 data)
1894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1896 return dd->cce_err_status_cnt[35];
1899 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1900 const struct cntr_entry *entry,
1901 void *context, int vl, int mode, u64 data)
1903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1905 return dd->cce_err_status_cnt[34];
1908 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1909 void *context, int vl,
1912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1914 return dd->cce_err_status_cnt[33];
1917 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1918 void *context, int vl, int mode,
1921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1923 return dd->cce_err_status_cnt[32];
1926 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1927 void *context, int vl, int mode, u64 data)
1929 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1931 return dd->cce_err_status_cnt[31];
1934 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1935 void *context, int vl, int mode,
1938 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1940 return dd->cce_err_status_cnt[30];
1943 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1944 void *context, int vl, int mode,
1947 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1949 return dd->cce_err_status_cnt[29];
1952 static u64 access_pcic_transmit_back_parity_err_cnt(
1953 const struct cntr_entry *entry,
1954 void *context, int vl, int mode, u64 data)
1956 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1958 return dd->cce_err_status_cnt[28];
1961 static u64 access_pcic_transmit_front_parity_err_cnt(
1962 const struct cntr_entry *entry,
1963 void *context, int vl, int mode, u64 data)
1965 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1967 return dd->cce_err_status_cnt[27];
1970 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1971 void *context, int vl, int mode,
1974 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1976 return dd->cce_err_status_cnt[26];
1979 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1980 void *context, int vl, int mode,
1983 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1985 return dd->cce_err_status_cnt[25];
1988 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1989 void *context, int vl, int mode,
1992 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1994 return dd->cce_err_status_cnt[24];
1997 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1998 void *context, int vl, int mode,
2001 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2003 return dd->cce_err_status_cnt[23];
2006 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2007 void *context, int vl,
2010 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2012 return dd->cce_err_status_cnt[22];
2015 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2016 void *context, int vl, int mode,
2019 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2021 return dd->cce_err_status_cnt[21];
2024 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2025 const struct cntr_entry *entry,
2026 void *context, int vl, int mode, u64 data)
2028 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2030 return dd->cce_err_status_cnt[20];
2033 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2034 void *context, int vl,
2037 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2039 return dd->cce_err_status_cnt[19];
2042 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2043 void *context, int vl, int mode,
2046 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2048 return dd->cce_err_status_cnt[18];
2051 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2052 void *context, int vl, int mode,
2055 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2057 return dd->cce_err_status_cnt[17];
2060 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2061 void *context, int vl, int mode,
2064 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2066 return dd->cce_err_status_cnt[16];
2069 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2070 void *context, int vl, int mode,
2073 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2075 return dd->cce_err_status_cnt[15];
2078 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2079 void *context, int vl,
2082 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2084 return dd->cce_err_status_cnt[14];
2087 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2088 void *context, int vl, int mode,
2091 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2093 return dd->cce_err_status_cnt[13];
2096 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2097 const struct cntr_entry *entry,
2098 void *context, int vl, int mode, u64 data)
2100 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2102 return dd->cce_err_status_cnt[12];
2105 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2106 const struct cntr_entry *entry,
2107 void *context, int vl, int mode, u64 data)
2109 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2111 return dd->cce_err_status_cnt[11];
2114 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2115 const struct cntr_entry *entry,
2116 void *context, int vl, int mode, u64 data)
2118 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2120 return dd->cce_err_status_cnt[10];
2123 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2124 const struct cntr_entry *entry,
2125 void *context, int vl, int mode, u64 data)
2127 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2129 return dd->cce_err_status_cnt[9];
2132 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2133 const struct cntr_entry *entry,
2134 void *context, int vl, int mode, u64 data)
2136 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2138 return dd->cce_err_status_cnt[8];
2141 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2142 void *context, int vl,
2145 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2147 return dd->cce_err_status_cnt[7];
2150 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2151 const struct cntr_entry *entry,
2152 void *context, int vl, int mode, u64 data)
2154 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2156 return dd->cce_err_status_cnt[6];
2159 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2160 void *context, int vl, int mode,
2163 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2165 return dd->cce_err_status_cnt[5];
2168 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2169 void *context, int vl, int mode,
2172 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2174 return dd->cce_err_status_cnt[4];
2177 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2178 const struct cntr_entry *entry,
2179 void *context, int vl, int mode, u64 data)
2181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2183 return dd->cce_err_status_cnt[3];
2186 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2187 void *context, int vl,
2190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2192 return dd->cce_err_status_cnt[2];
2195 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2196 void *context, int vl,
2199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2201 return dd->cce_err_status_cnt[1];
2204 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2205 void *context, int vl, int mode,
2208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2210 return dd->cce_err_status_cnt[0];
2214 * Software counters corresponding to each of the
2215 * error status bits within RcvErrStatus
2217 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2218 void *context, int vl, int mode,
2221 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2223 return dd->rcv_err_status_cnt[63];
2226 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2227 void *context, int vl,
2230 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2232 return dd->rcv_err_status_cnt[62];
2235 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2236 void *context, int vl, int mode,
2239 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2241 return dd->rcv_err_status_cnt[61];
2244 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2245 void *context, int vl, int mode,
2248 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2250 return dd->rcv_err_status_cnt[60];
2253 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2254 void *context, int vl,
2257 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2259 return dd->rcv_err_status_cnt[59];
2262 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2263 void *context, int vl,
2266 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2268 return dd->rcv_err_status_cnt[58];
2271 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2272 void *context, int vl, int mode,
2275 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2277 return dd->rcv_err_status_cnt[57];
2280 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2281 void *context, int vl, int mode,
2284 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2286 return dd->rcv_err_status_cnt[56];
2289 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2290 void *context, int vl, int mode,
2293 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2295 return dd->rcv_err_status_cnt[55];
2298 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2299 const struct cntr_entry *entry,
2300 void *context, int vl, int mode, u64 data)
2302 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2304 return dd->rcv_err_status_cnt[54];
2307 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2308 const struct cntr_entry *entry,
2309 void *context, int vl, int mode, u64 data)
2311 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2313 return dd->rcv_err_status_cnt[53];
2316 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2317 void *context, int vl,
2320 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2322 return dd->rcv_err_status_cnt[52];
2325 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2326 void *context, int vl,
2329 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2331 return dd->rcv_err_status_cnt[51];
2334 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2335 void *context, int vl,
2338 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2340 return dd->rcv_err_status_cnt[50];
2343 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2344 void *context, int vl,
2347 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2349 return dd->rcv_err_status_cnt[49];
2352 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2353 void *context, int vl,
2356 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2358 return dd->rcv_err_status_cnt[48];
2361 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2362 void *context, int vl,
2365 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2367 return dd->rcv_err_status_cnt[47];
2370 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2371 void *context, int vl, int mode,
2374 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2376 return dd->rcv_err_status_cnt[46];
2379 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2380 const struct cntr_entry *entry,
2381 void *context, int vl, int mode, u64 data)
2383 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2385 return dd->rcv_err_status_cnt[45];
2388 static u64 access_rx_lookup_csr_parity_err_cnt(
2389 const struct cntr_entry *entry,
2390 void *context, int vl, int mode, u64 data)
2392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2394 return dd->rcv_err_status_cnt[44];
2397 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2398 const struct cntr_entry *entry,
2399 void *context, int vl, int mode, u64 data)
2401 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2403 return dd->rcv_err_status_cnt[43];
2406 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2407 const struct cntr_entry *entry,
2408 void *context, int vl, int mode, u64 data)
2410 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2412 return dd->rcv_err_status_cnt[42];
2415 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2416 const struct cntr_entry *entry,
2417 void *context, int vl, int mode, u64 data)
2419 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2421 return dd->rcv_err_status_cnt[41];
2424 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2425 const struct cntr_entry *entry,
2426 void *context, int vl, int mode, u64 data)
2428 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2430 return dd->rcv_err_status_cnt[40];
2433 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2434 const struct cntr_entry *entry,
2435 void *context, int vl, int mode, u64 data)
2437 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2439 return dd->rcv_err_status_cnt[39];
2442 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2443 const struct cntr_entry *entry,
2444 void *context, int vl, int mode, u64 data)
2446 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2448 return dd->rcv_err_status_cnt[38];
2451 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2452 const struct cntr_entry *entry,
2453 void *context, int vl, int mode, u64 data)
2455 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2457 return dd->rcv_err_status_cnt[37];
2460 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2461 const struct cntr_entry *entry,
2462 void *context, int vl, int mode, u64 data)
2464 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2466 return dd->rcv_err_status_cnt[36];
2469 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2470 const struct cntr_entry *entry,
2471 void *context, int vl, int mode, u64 data)
2473 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2475 return dd->rcv_err_status_cnt[35];
2478 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2479 const struct cntr_entry *entry,
2480 void *context, int vl, int mode, u64 data)
2482 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2484 return dd->rcv_err_status_cnt[34];
2487 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2488 const struct cntr_entry *entry,
2489 void *context, int vl, int mode, u64 data)
2491 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2493 return dd->rcv_err_status_cnt[33];
2496 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2497 void *context, int vl, int mode,
2500 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2502 return dd->rcv_err_status_cnt[32];
2505 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2506 void *context, int vl, int mode,
2509 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2511 return dd->rcv_err_status_cnt[31];
2514 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2515 void *context, int vl, int mode,
2518 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2520 return dd->rcv_err_status_cnt[30];
2523 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2524 void *context, int vl, int mode,
2527 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2529 return dd->rcv_err_status_cnt[29];
2532 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2533 void *context, int vl,
2536 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2538 return dd->rcv_err_status_cnt[28];
2541 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2542 const struct cntr_entry *entry,
2543 void *context, int vl, int mode, u64 data)
2545 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2547 return dd->rcv_err_status_cnt[27];
2550 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2551 const struct cntr_entry *entry,
2552 void *context, int vl, int mode, u64 data)
2554 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2556 return dd->rcv_err_status_cnt[26];
2559 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2560 const struct cntr_entry *entry,
2561 void *context, int vl, int mode, u64 data)
2563 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2565 return dd->rcv_err_status_cnt[25];
2568 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2569 const struct cntr_entry *entry,
2570 void *context, int vl, int mode, u64 data)
2572 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2574 return dd->rcv_err_status_cnt[24];
2577 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2578 const struct cntr_entry *entry,
2579 void *context, int vl, int mode, u64 data)
2581 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2583 return dd->rcv_err_status_cnt[23];
2586 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2587 const struct cntr_entry *entry,
2588 void *context, int vl, int mode, u64 data)
2590 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2592 return dd->rcv_err_status_cnt[22];
2595 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2596 const struct cntr_entry *entry,
2597 void *context, int vl, int mode, u64 data)
2599 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2601 return dd->rcv_err_status_cnt[21];
2604 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2605 const struct cntr_entry *entry,
2606 void *context, int vl, int mode, u64 data)
2608 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2610 return dd->rcv_err_status_cnt[20];
2613 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2614 const struct cntr_entry *entry,
2615 void *context, int vl, int mode, u64 data)
2617 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2619 return dd->rcv_err_status_cnt[19];
2622 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2623 void *context, int vl,
2626 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2628 return dd->rcv_err_status_cnt[18];
2631 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2632 void *context, int vl,
2635 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2637 return dd->rcv_err_status_cnt[17];
2640 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2641 const struct cntr_entry *entry,
2642 void *context, int vl, int mode, u64 data)
2644 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2646 return dd->rcv_err_status_cnt[16];
2649 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2650 const struct cntr_entry *entry,
2651 void *context, int vl, int mode, u64 data)
2653 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2655 return dd->rcv_err_status_cnt[15];
2658 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2659 void *context, int vl,
2662 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2664 return dd->rcv_err_status_cnt[14];
2667 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2668 void *context, int vl,
2671 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2673 return dd->rcv_err_status_cnt[13];
2676 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2677 void *context, int vl, int mode,
2680 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2682 return dd->rcv_err_status_cnt[12];
2685 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2686 void *context, int vl, int mode,
2689 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2691 return dd->rcv_err_status_cnt[11];
2694 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2695 void *context, int vl, int mode,
2698 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2700 return dd->rcv_err_status_cnt[10];
2703 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2704 void *context, int vl, int mode,
2707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2709 return dd->rcv_err_status_cnt[9];
2712 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2713 void *context, int vl, int mode,
2716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2718 return dd->rcv_err_status_cnt[8];
2721 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2722 const struct cntr_entry *entry,
2723 void *context, int vl, int mode, u64 data)
2725 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2727 return dd->rcv_err_status_cnt[7];
2730 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2731 const struct cntr_entry *entry,
2732 void *context, int vl, int mode, u64 data)
2734 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2736 return dd->rcv_err_status_cnt[6];
2739 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2740 void *context, int vl, int mode,
2743 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2745 return dd->rcv_err_status_cnt[5];
2748 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2749 void *context, int vl, int mode,
2752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2754 return dd->rcv_err_status_cnt[4];
2757 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2758 void *context, int vl, int mode,
2761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2763 return dd->rcv_err_status_cnt[3];
2766 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2767 void *context, int vl, int mode,
2770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2772 return dd->rcv_err_status_cnt[2];
2775 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2776 void *context, int vl, int mode,
2779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2781 return dd->rcv_err_status_cnt[1];
2784 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2785 void *context, int vl, int mode,
2788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2790 return dd->rcv_err_status_cnt[0];
2794 * Software counters corresponding to each of the
2795 * error status bits within SendPioErrStatus
2797 static u64 access_pio_pec_sop_head_parity_err_cnt(
2798 const struct cntr_entry *entry,
2799 void *context, int vl, int mode, u64 data)
2801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2803 return dd->send_pio_err_status_cnt[35];
2806 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2807 const struct cntr_entry *entry,
2808 void *context, int vl, int mode, u64 data)
2810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2812 return dd->send_pio_err_status_cnt[34];
2815 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2816 const struct cntr_entry *entry,
2817 void *context, int vl, int mode, u64 data)
2819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2821 return dd->send_pio_err_status_cnt[33];
2824 static u64 access_pio_current_free_cnt_parity_err_cnt(
2825 const struct cntr_entry *entry,
2826 void *context, int vl, int mode, u64 data)
2828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2830 return dd->send_pio_err_status_cnt[32];
2833 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2834 void *context, int vl, int mode,
2837 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2839 return dd->send_pio_err_status_cnt[31];
2842 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2843 void *context, int vl, int mode,
2846 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2848 return dd->send_pio_err_status_cnt[30];
2851 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2852 void *context, int vl, int mode,
2855 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2857 return dd->send_pio_err_status_cnt[29];
2860 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2861 const struct cntr_entry *entry,
2862 void *context, int vl, int mode, u64 data)
2864 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2866 return dd->send_pio_err_status_cnt[28];
2869 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2870 void *context, int vl, int mode,
2873 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2875 return dd->send_pio_err_status_cnt[27];
2878 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2879 void *context, int vl, int mode,
2882 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2884 return dd->send_pio_err_status_cnt[26];
2887 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2888 void *context, int vl,
2891 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2893 return dd->send_pio_err_status_cnt[25];
2896 static u64 access_pio_block_qw_count_parity_err_cnt(
2897 const struct cntr_entry *entry,
2898 void *context, int vl, int mode, u64 data)
2900 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2902 return dd->send_pio_err_status_cnt[24];
2905 static u64 access_pio_write_qw_valid_parity_err_cnt(
2906 const struct cntr_entry *entry,
2907 void *context, int vl, int mode, u64 data)
2909 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2911 return dd->send_pio_err_status_cnt[23];
2914 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2915 void *context, int vl, int mode,
2918 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2920 return dd->send_pio_err_status_cnt[22];
2923 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2924 void *context, int vl,
2927 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2929 return dd->send_pio_err_status_cnt[21];
2932 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2933 void *context, int vl,
2936 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2938 return dd->send_pio_err_status_cnt[20];
2941 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2942 void *context, int vl,
2945 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2947 return dd->send_pio_err_status_cnt[19];
2950 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2951 const struct cntr_entry *entry,
2952 void *context, int vl, int mode, u64 data)
2954 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2956 return dd->send_pio_err_status_cnt[18];
2959 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2960 void *context, int vl, int mode,
2963 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2965 return dd->send_pio_err_status_cnt[17];
2968 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2969 void *context, int vl, int mode,
2972 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2974 return dd->send_pio_err_status_cnt[16];
2977 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2978 const struct cntr_entry *entry,
2979 void *context, int vl, int mode, u64 data)
2981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2983 return dd->send_pio_err_status_cnt[15];
2986 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2987 const struct cntr_entry *entry,
2988 void *context, int vl, int mode, u64 data)
2990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2992 return dd->send_pio_err_status_cnt[14];
2995 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2996 const struct cntr_entry *entry,
2997 void *context, int vl, int mode, u64 data)
2999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3001 return dd->send_pio_err_status_cnt[13];
3004 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3005 const struct cntr_entry *entry,
3006 void *context, int vl, int mode, u64 data)
3008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3010 return dd->send_pio_err_status_cnt[12];
3013 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3014 const struct cntr_entry *entry,
3015 void *context, int vl, int mode, u64 data)
3017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3019 return dd->send_pio_err_status_cnt[11];
3022 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3023 const struct cntr_entry *entry,
3024 void *context, int vl, int mode, u64 data)
3026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3028 return dd->send_pio_err_status_cnt[10];
3031 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3032 const struct cntr_entry *entry,
3033 void *context, int vl, int mode, u64 data)
3035 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3037 return dd->send_pio_err_status_cnt[9];
3040 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3041 const struct cntr_entry *entry,
3042 void *context, int vl, int mode, u64 data)
3044 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3046 return dd->send_pio_err_status_cnt[8];
3049 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3050 const struct cntr_entry *entry,
3051 void *context, int vl, int mode, u64 data)
3053 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3055 return dd->send_pio_err_status_cnt[7];
3058 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3059 void *context, int vl, int mode,
3062 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3064 return dd->send_pio_err_status_cnt[6];
3067 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3068 void *context, int vl, int mode,
3071 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3073 return dd->send_pio_err_status_cnt[5];
3076 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3077 void *context, int vl, int mode,
3080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3082 return dd->send_pio_err_status_cnt[4];
3085 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3086 void *context, int vl, int mode,
3089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3091 return dd->send_pio_err_status_cnt[3];
3094 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3095 void *context, int vl, int mode,
3098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3100 return dd->send_pio_err_status_cnt[2];
3103 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3104 void *context, int vl,
3107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3109 return dd->send_pio_err_status_cnt[1];
3112 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3113 void *context, int vl, int mode,
3116 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3118 return dd->send_pio_err_status_cnt[0];
3122 * Software counters corresponding to each of the
3123 * error status bits within SendDmaErrStatus
3125 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3126 const struct cntr_entry *entry,
3127 void *context, int vl, int mode, u64 data)
3129 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3131 return dd->send_dma_err_status_cnt[3];
3134 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3135 const struct cntr_entry *entry,
3136 void *context, int vl, int mode, u64 data)
3138 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3140 return dd->send_dma_err_status_cnt[2];
3143 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3144 void *context, int vl, int mode,
3147 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3149 return dd->send_dma_err_status_cnt[1];
3152 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3153 void *context, int vl, int mode,
3156 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3158 return dd->send_dma_err_status_cnt[0];
3162 * Software counters corresponding to each of the
3163 * error status bits within SendEgressErrStatus
3165 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3166 const struct cntr_entry *entry,
3167 void *context, int vl, int mode, u64 data)
3169 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3171 return dd->send_egress_err_status_cnt[63];
3174 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3175 const struct cntr_entry *entry,
3176 void *context, int vl, int mode, u64 data)
3178 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3180 return dd->send_egress_err_status_cnt[62];
3183 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3184 void *context, int vl, int mode,
3187 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3189 return dd->send_egress_err_status_cnt[61];
3192 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3193 void *context, int vl,
3196 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3198 return dd->send_egress_err_status_cnt[60];
3201 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3202 const struct cntr_entry *entry,
3203 void *context, int vl, int mode, u64 data)
3205 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3207 return dd->send_egress_err_status_cnt[59];
3210 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3211 void *context, int vl, int mode,
3214 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3216 return dd->send_egress_err_status_cnt[58];
3219 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3220 void *context, int vl, int mode,
3223 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3225 return dd->send_egress_err_status_cnt[57];
3228 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3229 void *context, int vl, int mode,
3232 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3234 return dd->send_egress_err_status_cnt[56];
3237 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3238 void *context, int vl, int mode,
3241 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3243 return dd->send_egress_err_status_cnt[55];
3246 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3247 void *context, int vl, int mode,
3250 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3252 return dd->send_egress_err_status_cnt[54];
3255 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3256 void *context, int vl, int mode,
3259 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3261 return dd->send_egress_err_status_cnt[53];
3264 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3265 void *context, int vl, int mode,
3268 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3270 return dd->send_egress_err_status_cnt[52];
3273 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3274 void *context, int vl, int mode,
3277 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3279 return dd->send_egress_err_status_cnt[51];
3282 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3283 void *context, int vl, int mode,
3286 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3288 return dd->send_egress_err_status_cnt[50];
3291 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3292 void *context, int vl, int mode,
3295 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3297 return dd->send_egress_err_status_cnt[49];
3300 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3301 void *context, int vl, int mode,
3304 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3306 return dd->send_egress_err_status_cnt[48];
3309 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3310 void *context, int vl, int mode,
3313 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3315 return dd->send_egress_err_status_cnt[47];
3318 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3319 void *context, int vl, int mode,
3322 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3324 return dd->send_egress_err_status_cnt[46];
3327 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3328 void *context, int vl, int mode,
3331 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3333 return dd->send_egress_err_status_cnt[45];
3336 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3337 void *context, int vl,
3340 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3342 return dd->send_egress_err_status_cnt[44];
3345 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3346 const struct cntr_entry *entry,
3347 void *context, int vl, int mode, u64 data)
3349 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3351 return dd->send_egress_err_status_cnt[43];
3354 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3355 void *context, int vl, int mode,
3358 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3360 return dd->send_egress_err_status_cnt[42];
3363 static u64 access_tx_credit_return_partiy_err_cnt(
3364 const struct cntr_entry *entry,
3365 void *context, int vl, int mode, u64 data)
3367 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3369 return dd->send_egress_err_status_cnt[41];
3372 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3373 const struct cntr_entry *entry,
3374 void *context, int vl, int mode, u64 data)
3376 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3378 return dd->send_egress_err_status_cnt[40];
3381 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3382 const struct cntr_entry *entry,
3383 void *context, int vl, int mode, u64 data)
3385 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3387 return dd->send_egress_err_status_cnt[39];
3390 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3391 const struct cntr_entry *entry,
3392 void *context, int vl, int mode, u64 data)
3394 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3396 return dd->send_egress_err_status_cnt[38];
3399 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3400 const struct cntr_entry *entry,
3401 void *context, int vl, int mode, u64 data)
3403 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3405 return dd->send_egress_err_status_cnt[37];
3408 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3409 const struct cntr_entry *entry,
3410 void *context, int vl, int mode, u64 data)
3412 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3414 return dd->send_egress_err_status_cnt[36];
3417 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3418 const struct cntr_entry *entry,
3419 void *context, int vl, int mode, u64 data)
3421 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3423 return dd->send_egress_err_status_cnt[35];
3426 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3427 const struct cntr_entry *entry,
3428 void *context, int vl, int mode, u64 data)
3430 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3432 return dd->send_egress_err_status_cnt[34];
3435 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3436 const struct cntr_entry *entry,
3437 void *context, int vl, int mode, u64 data)
3439 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3441 return dd->send_egress_err_status_cnt[33];
3444 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3445 const struct cntr_entry *entry,
3446 void *context, int vl, int mode, u64 data)
3448 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3450 return dd->send_egress_err_status_cnt[32];
3453 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3454 const struct cntr_entry *entry,
3455 void *context, int vl, int mode, u64 data)
3457 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3459 return dd->send_egress_err_status_cnt[31];
3462 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3463 const struct cntr_entry *entry,
3464 void *context, int vl, int mode, u64 data)
3466 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3468 return dd->send_egress_err_status_cnt[30];
3471 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3472 const struct cntr_entry *entry,
3473 void *context, int vl, int mode, u64 data)
3475 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3477 return dd->send_egress_err_status_cnt[29];
3480 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3481 const struct cntr_entry *entry,
3482 void *context, int vl, int mode, u64 data)
3484 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3486 return dd->send_egress_err_status_cnt[28];
3489 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3490 const struct cntr_entry *entry,
3491 void *context, int vl, int mode, u64 data)
3493 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3495 return dd->send_egress_err_status_cnt[27];
3498 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3499 const struct cntr_entry *entry,
3500 void *context, int vl, int mode, u64 data)
3502 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3504 return dd->send_egress_err_status_cnt[26];
3507 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3508 const struct cntr_entry *entry,
3509 void *context, int vl, int mode, u64 data)
3511 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3513 return dd->send_egress_err_status_cnt[25];
3516 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3517 const struct cntr_entry *entry,
3518 void *context, int vl, int mode, u64 data)
3520 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3522 return dd->send_egress_err_status_cnt[24];
3525 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3526 const struct cntr_entry *entry,
3527 void *context, int vl, int mode, u64 data)
3529 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3531 return dd->send_egress_err_status_cnt[23];
3534 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3535 const struct cntr_entry *entry,
3536 void *context, int vl, int mode, u64 data)
3538 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3540 return dd->send_egress_err_status_cnt[22];
3543 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3544 const struct cntr_entry *entry,
3545 void *context, int vl, int mode, u64 data)
3547 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3549 return dd->send_egress_err_status_cnt[21];
3552 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3553 const struct cntr_entry *entry,
3554 void *context, int vl, int mode, u64 data)
3556 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3558 return dd->send_egress_err_status_cnt[20];
3561 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3562 const struct cntr_entry *entry,
3563 void *context, int vl, int mode, u64 data)
3565 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3567 return dd->send_egress_err_status_cnt[19];
3570 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3571 const struct cntr_entry *entry,
3572 void *context, int vl, int mode, u64 data)
3574 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3576 return dd->send_egress_err_status_cnt[18];
3579 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3580 const struct cntr_entry *entry,
3581 void *context, int vl, int mode, u64 data)
3583 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3585 return dd->send_egress_err_status_cnt[17];
3588 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3589 const struct cntr_entry *entry,
3590 void *context, int vl, int mode, u64 data)
3592 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3594 return dd->send_egress_err_status_cnt[16];
3597 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3598 void *context, int vl, int mode,
3601 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3603 return dd->send_egress_err_status_cnt[15];
3606 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3607 void *context, int vl,
3610 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3612 return dd->send_egress_err_status_cnt[14];
3615 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3616 void *context, int vl, int mode,
3619 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3621 return dd->send_egress_err_status_cnt[13];
3624 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3625 void *context, int vl, int mode,
3628 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3630 return dd->send_egress_err_status_cnt[12];
3633 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3634 const struct cntr_entry *entry,
3635 void *context, int vl, int mode, u64 data)
3637 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3639 return dd->send_egress_err_status_cnt[11];
3642 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3643 void *context, int vl, int mode,
3646 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3648 return dd->send_egress_err_status_cnt[10];
3651 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3652 void *context, int vl, int mode,
3655 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3657 return dd->send_egress_err_status_cnt[9];
3660 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3661 const struct cntr_entry *entry,
3662 void *context, int vl, int mode, u64 data)
3664 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3666 return dd->send_egress_err_status_cnt[8];
3669 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3670 const struct cntr_entry *entry,
3671 void *context, int vl, int mode, u64 data)
3673 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3675 return dd->send_egress_err_status_cnt[7];
3678 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3679 void *context, int vl, int mode,
3682 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3684 return dd->send_egress_err_status_cnt[6];
3687 static u64 access_tx_incorrect_link_state_err_cnt(
3688 const struct cntr_entry *entry,
3689 void *context, int vl, int mode, u64 data)
3691 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3693 return dd->send_egress_err_status_cnt[5];
3696 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3697 void *context, int vl, int mode,
3700 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3702 return dd->send_egress_err_status_cnt[4];
3705 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3706 const struct cntr_entry *entry,
3707 void *context, int vl, int mode, u64 data)
3709 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3711 return dd->send_egress_err_status_cnt[3];
3714 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3715 void *context, int vl, int mode,
3718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3720 return dd->send_egress_err_status_cnt[2];
3723 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3724 const struct cntr_entry *entry,
3725 void *context, int vl, int mode, u64 data)
3727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3729 return dd->send_egress_err_status_cnt[1];
3732 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3733 const struct cntr_entry *entry,
3734 void *context, int vl, int mode, u64 data)
3736 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3738 return dd->send_egress_err_status_cnt[0];
3742 * Software counters corresponding to each of the
3743 * error status bits within SendErrStatus
3745 static u64 access_send_csr_write_bad_addr_err_cnt(
3746 const struct cntr_entry *entry,
3747 void *context, int vl, int mode, u64 data)
3749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3751 return dd->send_err_status_cnt[2];
3754 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3755 void *context, int vl,
3758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3760 return dd->send_err_status_cnt[1];
3763 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3764 void *context, int vl, int mode,
3767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3769 return dd->send_err_status_cnt[0];
3773 * Software counters corresponding to each of the
3774 * error status bits within SendCtxtErrStatus
3776 static u64 access_pio_write_out_of_bounds_err_cnt(
3777 const struct cntr_entry *entry,
3778 void *context, int vl, int mode, u64 data)
3780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3782 return dd->sw_ctxt_err_status_cnt[4];
3785 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3786 void *context, int vl, int mode,
3789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3791 return dd->sw_ctxt_err_status_cnt[3];
3794 static u64 access_pio_write_crosses_boundary_err_cnt(
3795 const struct cntr_entry *entry,
3796 void *context, int vl, int mode, u64 data)
3798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3800 return dd->sw_ctxt_err_status_cnt[2];
3803 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3804 void *context, int vl,
3807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3809 return dd->sw_ctxt_err_status_cnt[1];
3812 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3813 void *context, int vl, int mode,
3816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3818 return dd->sw_ctxt_err_status_cnt[0];
3822 * Software counters corresponding to each of the
3823 * error status bits within SendDmaEngErrStatus
3825 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3826 const struct cntr_entry *entry,
3827 void *context, int vl, int mode, u64 data)
3829 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3831 return dd->sw_send_dma_eng_err_status_cnt[23];
3834 static u64 access_sdma_header_storage_cor_err_cnt(
3835 const struct cntr_entry *entry,
3836 void *context, int vl, int mode, u64 data)
3838 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3840 return dd->sw_send_dma_eng_err_status_cnt[22];
3843 static u64 access_sdma_packet_tracking_cor_err_cnt(
3844 const struct cntr_entry *entry,
3845 void *context, int vl, int mode, u64 data)
3847 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3849 return dd->sw_send_dma_eng_err_status_cnt[21];
3852 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3853 void *context, int vl, int mode,
3856 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3858 return dd->sw_send_dma_eng_err_status_cnt[20];
3861 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3862 void *context, int vl, int mode,
3865 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3867 return dd->sw_send_dma_eng_err_status_cnt[19];
3870 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3871 const struct cntr_entry *entry,
3872 void *context, int vl, int mode, u64 data)
3874 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3876 return dd->sw_send_dma_eng_err_status_cnt[18];
3879 static u64 access_sdma_header_storage_unc_err_cnt(
3880 const struct cntr_entry *entry,
3881 void *context, int vl, int mode, u64 data)
3883 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3885 return dd->sw_send_dma_eng_err_status_cnt[17];
3888 static u64 access_sdma_packet_tracking_unc_err_cnt(
3889 const struct cntr_entry *entry,
3890 void *context, int vl, int mode, u64 data)
3892 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3894 return dd->sw_send_dma_eng_err_status_cnt[16];
3897 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3898 void *context, int vl, int mode,
3901 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3903 return dd->sw_send_dma_eng_err_status_cnt[15];
3906 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3907 void *context, int vl, int mode,
3910 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3912 return dd->sw_send_dma_eng_err_status_cnt[14];
3915 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3916 void *context, int vl, int mode,
3919 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3921 return dd->sw_send_dma_eng_err_status_cnt[13];
3924 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3925 void *context, int vl, int mode,
3928 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3930 return dd->sw_send_dma_eng_err_status_cnt[12];
3933 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3934 void *context, int vl, int mode,
3937 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3939 return dd->sw_send_dma_eng_err_status_cnt[11];
3942 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3943 void *context, int vl, int mode,
3946 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3948 return dd->sw_send_dma_eng_err_status_cnt[10];
3951 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3952 void *context, int vl, int mode,
3955 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3957 return dd->sw_send_dma_eng_err_status_cnt[9];
3960 static u64 access_sdma_packet_desc_overflow_err_cnt(
3961 const struct cntr_entry *entry,
3962 void *context, int vl, int mode, u64 data)
3964 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3966 return dd->sw_send_dma_eng_err_status_cnt[8];
3969 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3970 void *context, int vl,
3973 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3975 return dd->sw_send_dma_eng_err_status_cnt[7];
3978 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3979 void *context, int vl, int mode, u64 data)
3981 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3983 return dd->sw_send_dma_eng_err_status_cnt[6];
3986 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3987 void *context, int vl, int mode,
3990 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3992 return dd->sw_send_dma_eng_err_status_cnt[5];
3995 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3996 void *context, int vl, int mode,
3999 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4001 return dd->sw_send_dma_eng_err_status_cnt[4];
4004 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4005 const struct cntr_entry *entry,
4006 void *context, int vl, int mode, u64 data)
4008 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4010 return dd->sw_send_dma_eng_err_status_cnt[3];
4013 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4014 void *context, int vl, int mode,
4017 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4019 return dd->sw_send_dma_eng_err_status_cnt[2];
4022 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4023 void *context, int vl, int mode,
4026 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4028 return dd->sw_send_dma_eng_err_status_cnt[1];
4031 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4032 void *context, int vl, int mode,
4035 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4037 return dd->sw_send_dma_eng_err_status_cnt[0];
4040 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4041 void *context, int vl, int mode,
4044 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4047 u64 csr = entry->csr;
4049 val = read_write_csr(dd, csr, mode, data);
4050 if (mode == CNTR_MODE_R) {
4051 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4052 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4053 } else if (mode == CNTR_MODE_W) {
4054 dd->sw_rcv_bypass_packet_errors = 0;
4056 dd_dev_err(dd, "Invalid cntr register access mode");
4062 #define def_access_sw_cpu(cntr) \
4063 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4064 void *context, int vl, int mode, u64 data) \
4066 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4067 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4068 ppd->ibport_data.rvp.cntr, vl, \
4072 def_access_sw_cpu(rc_acks);
4073 def_access_sw_cpu(rc_qacks);
4074 def_access_sw_cpu(rc_delayed_comp);
4076 #define def_access_ibp_counter(cntr) \
4077 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4078 void *context, int vl, int mode, u64 data) \
4080 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4082 if (vl != CNTR_INVALID_VL) \
4085 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4089 def_access_ibp_counter(loop_pkts);
4090 def_access_ibp_counter(rc_resends);
4091 def_access_ibp_counter(rnr_naks);
4092 def_access_ibp_counter(other_naks);
4093 def_access_ibp_counter(rc_timeouts);
4094 def_access_ibp_counter(pkt_drops);
4095 def_access_ibp_counter(dmawait);
4096 def_access_ibp_counter(rc_seqnak);
4097 def_access_ibp_counter(rc_dupreq);
4098 def_access_ibp_counter(rdma_seq);
4099 def_access_ibp_counter(unaligned);
4100 def_access_ibp_counter(seq_naks);
4102 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4103 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4104 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4106 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4108 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4109 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4111 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4113 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4114 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4115 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4116 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4117 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4119 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4121 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4123 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4125 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4127 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4129 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4130 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4131 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4132 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4133 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4135 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4136 access_dc_rcv_err_cnt),
4137 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4139 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4141 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4143 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4144 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4145 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4146 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4148 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4149 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4150 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4152 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4154 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4156 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4158 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4160 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4162 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4164 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4165 CNTR_SYNTH | CNTR_VL),
4166 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4167 CNTR_SYNTH | CNTR_VL),
4168 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4169 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4170 CNTR_SYNTH | CNTR_VL),
4171 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4172 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4173 CNTR_SYNTH | CNTR_VL),
4174 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4176 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4177 CNTR_SYNTH | CNTR_VL),
4178 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4180 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4181 CNTR_SYNTH | CNTR_VL),
4183 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4185 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4187 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4189 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4191 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4193 [C_DC_CRC_MULT_LN] =
4194 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4196 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4198 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4200 [C_DC_SEQ_CRC_CNT] =
4201 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4203 [C_DC_ESC0_ONLY_CNT] =
4204 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4206 [C_DC_ESC0_PLUS1_CNT] =
4207 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4209 [C_DC_ESC0_PLUS2_CNT] =
4210 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4212 [C_DC_REINIT_FROM_PEER_CNT] =
4213 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4215 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4217 [C_DC_MISC_FLG_CNT] =
4218 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4220 [C_DC_PRF_GOOD_LTP_CNT] =
4221 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4222 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4223 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4225 [C_DC_PRF_RX_FLIT_CNT] =
4226 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4227 [C_DC_PRF_TX_FLIT_CNT] =
4228 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4229 [C_DC_PRF_CLK_CNTR] =
4230 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4231 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4232 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4233 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4234 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4236 [C_DC_PG_STS_TX_SBE_CNT] =
4237 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4238 [C_DC_PG_STS_TX_MBE_CNT] =
4239 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4241 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4242 access_sw_cpu_intr),
4243 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4244 access_sw_cpu_rcv_limit),
4245 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4246 access_sw_vtx_wait),
4247 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4248 access_sw_pio_wait),
4249 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4250 access_sw_pio_drain),
4251 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4252 access_sw_kmem_wait),
4253 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4254 access_sw_send_schedule),
4255 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4256 SEND_DMA_DESC_FETCHED_CNT, 0,
4257 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4258 dev_access_u32_csr),
4259 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4260 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4261 access_sde_int_cnt),
4262 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4263 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4264 access_sde_err_cnt),
4265 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4266 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4267 access_sde_idle_int_cnt),
4268 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4269 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4270 access_sde_progress_int_cnt),
4271 /* MISC_ERR_STATUS */
4272 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4274 access_misc_pll_lock_fail_err_cnt),
4275 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4277 access_misc_mbist_fail_err_cnt),
4278 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4280 access_misc_invalid_eep_cmd_err_cnt),
4281 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4283 access_misc_efuse_done_parity_err_cnt),
4284 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4286 access_misc_efuse_write_err_cnt),
4287 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4289 access_misc_efuse_read_bad_addr_err_cnt),
4290 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4292 access_misc_efuse_csr_parity_err_cnt),
4293 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4295 access_misc_fw_auth_failed_err_cnt),
4296 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4298 access_misc_key_mismatch_err_cnt),
4299 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4301 access_misc_sbus_write_failed_err_cnt),
4302 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4304 access_misc_csr_write_bad_addr_err_cnt),
4305 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4307 access_misc_csr_read_bad_addr_err_cnt),
4308 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4310 access_misc_csr_parity_err_cnt),
4312 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4314 access_sw_cce_err_status_aggregated_cnt),
4315 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4317 access_cce_msix_csr_parity_err_cnt),
4318 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4320 access_cce_int_map_unc_err_cnt),
4321 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4323 access_cce_int_map_cor_err_cnt),
4324 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4326 access_cce_msix_table_unc_err_cnt),
4327 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4329 access_cce_msix_table_cor_err_cnt),
4330 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4332 access_cce_rxdma_conv_fifo_parity_err_cnt),
4333 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4335 access_cce_rcpl_async_fifo_parity_err_cnt),
4336 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4338 access_cce_seg_write_bad_addr_err_cnt),
4339 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4341 access_cce_seg_read_bad_addr_err_cnt),
4342 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4344 access_la_triggered_cnt),
4345 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4347 access_cce_trgt_cpl_timeout_err_cnt),
4348 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4350 access_pcic_receive_parity_err_cnt),
4351 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4353 access_pcic_transmit_back_parity_err_cnt),
4354 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4356 access_pcic_transmit_front_parity_err_cnt),
4357 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4359 access_pcic_cpl_dat_q_unc_err_cnt),
4360 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4362 access_pcic_cpl_hd_q_unc_err_cnt),
4363 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4365 access_pcic_post_dat_q_unc_err_cnt),
4366 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4368 access_pcic_post_hd_q_unc_err_cnt),
4369 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4371 access_pcic_retry_sot_mem_unc_err_cnt),
4372 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4374 access_pcic_retry_mem_unc_err),
4375 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4377 access_pcic_n_post_dat_q_parity_err_cnt),
4378 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4380 access_pcic_n_post_h_q_parity_err_cnt),
4381 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4383 access_pcic_cpl_dat_q_cor_err_cnt),
4384 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4386 access_pcic_cpl_hd_q_cor_err_cnt),
4387 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4389 access_pcic_post_dat_q_cor_err_cnt),
4390 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4392 access_pcic_post_hd_q_cor_err_cnt),
4393 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4395 access_pcic_retry_sot_mem_cor_err_cnt),
4396 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4398 access_pcic_retry_mem_cor_err_cnt),
4399 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4400 "CceCli1AsyncFifoDbgParityError", 0, 0,
4402 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4403 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4404 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4406 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4408 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4409 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4411 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4412 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4413 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4415 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4416 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4418 access_cce_cli2_async_fifo_parity_err_cnt),
4419 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4421 access_cce_csr_cfg_bus_parity_err_cnt),
4422 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4424 access_cce_cli0_async_fifo_parity_err_cnt),
4425 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4427 access_cce_rspd_data_parity_err_cnt),
4428 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4430 access_cce_trgt_access_err_cnt),
4431 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4433 access_cce_trgt_async_fifo_parity_err_cnt),
4434 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4436 access_cce_csr_write_bad_addr_err_cnt),
4437 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4439 access_cce_csr_read_bad_addr_err_cnt),
4440 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4442 access_ccs_csr_parity_err_cnt),
4445 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4447 access_rx_csr_parity_err_cnt),
4448 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4450 access_rx_csr_write_bad_addr_err_cnt),
4451 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4453 access_rx_csr_read_bad_addr_err_cnt),
4454 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4456 access_rx_dma_csr_unc_err_cnt),
4457 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4459 access_rx_dma_dq_fsm_encoding_err_cnt),
4460 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4462 access_rx_dma_eq_fsm_encoding_err_cnt),
4463 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4465 access_rx_dma_csr_parity_err_cnt),
4466 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4468 access_rx_rbuf_data_cor_err_cnt),
4469 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4471 access_rx_rbuf_data_unc_err_cnt),
4472 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4474 access_rx_dma_data_fifo_rd_cor_err_cnt),
4475 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4477 access_rx_dma_data_fifo_rd_unc_err_cnt),
4478 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4480 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4481 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4483 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4484 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4486 access_rx_rbuf_desc_part2_cor_err_cnt),
4487 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4489 access_rx_rbuf_desc_part2_unc_err_cnt),
4490 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4492 access_rx_rbuf_desc_part1_cor_err_cnt),
4493 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4495 access_rx_rbuf_desc_part1_unc_err_cnt),
4496 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4498 access_rx_hq_intr_fsm_err_cnt),
4499 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4501 access_rx_hq_intr_csr_parity_err_cnt),
4502 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4504 access_rx_lookup_csr_parity_err_cnt),
4505 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4507 access_rx_lookup_rcv_array_cor_err_cnt),
4508 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4510 access_rx_lookup_rcv_array_unc_err_cnt),
4511 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4513 access_rx_lookup_des_part2_parity_err_cnt),
4514 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4516 access_rx_lookup_des_part1_unc_cor_err_cnt),
4517 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4519 access_rx_lookup_des_part1_unc_err_cnt),
4520 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4522 access_rx_rbuf_next_free_buf_cor_err_cnt),
4523 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4525 access_rx_rbuf_next_free_buf_unc_err_cnt),
4526 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4527 "RxRbufFlInitWrAddrParityErr", 0, 0,
4529 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4530 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4532 access_rx_rbuf_fl_initdone_parity_err_cnt),
4533 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4535 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4536 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4538 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4539 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4541 access_rx_rbuf_empty_err_cnt),
4542 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4544 access_rx_rbuf_full_err_cnt),
4545 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4547 access_rbuf_bad_lookup_err_cnt),
4548 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4550 access_rbuf_ctx_id_parity_err_cnt),
4551 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4553 access_rbuf_csr_qeopdw_parity_err_cnt),
4554 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4555 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4557 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4558 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4559 "RxRbufCsrQTlPtrParityErr", 0, 0,
4561 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4562 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4564 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4565 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4567 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4568 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4570 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4571 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4573 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4574 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4575 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4577 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4578 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4580 access_rx_rbuf_block_list_read_cor_err_cnt),
4581 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4583 access_rx_rbuf_block_list_read_unc_err_cnt),
4584 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4586 access_rx_rbuf_lookup_des_cor_err_cnt),
4587 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4589 access_rx_rbuf_lookup_des_unc_err_cnt),
4590 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4591 "RxRbufLookupDesRegUncCorErr", 0, 0,
4593 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4594 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4596 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4597 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4599 access_rx_rbuf_free_list_cor_err_cnt),
4600 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4602 access_rx_rbuf_free_list_unc_err_cnt),
4603 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4605 access_rx_rcv_fsm_encoding_err_cnt),
4606 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4608 access_rx_dma_flag_cor_err_cnt),
4609 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4611 access_rx_dma_flag_unc_err_cnt),
4612 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4614 access_rx_dc_sop_eop_parity_err_cnt),
4615 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4617 access_rx_rcv_csr_parity_err_cnt),
4618 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4620 access_rx_rcv_qp_map_table_cor_err_cnt),
4621 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4623 access_rx_rcv_qp_map_table_unc_err_cnt),
4624 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4626 access_rx_rcv_data_cor_err_cnt),
4627 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4629 access_rx_rcv_data_unc_err_cnt),
4630 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4632 access_rx_rcv_hdr_cor_err_cnt),
4633 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4635 access_rx_rcv_hdr_unc_err_cnt),
4636 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4638 access_rx_dc_intf_parity_err_cnt),
4639 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4641 access_rx_dma_csr_cor_err_cnt),
4642 /* SendPioErrStatus */
4643 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4645 access_pio_pec_sop_head_parity_err_cnt),
4646 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4648 access_pio_pcc_sop_head_parity_err_cnt),
4649 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4651 access_pio_last_returned_cnt_parity_err_cnt),
4652 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4654 access_pio_current_free_cnt_parity_err_cnt),
4655 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4657 access_pio_reserved_31_err_cnt),
4658 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4660 access_pio_reserved_30_err_cnt),
4661 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4663 access_pio_ppmc_sop_len_err_cnt),
4664 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4666 access_pio_ppmc_bqc_mem_parity_err_cnt),
4667 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4669 access_pio_vl_fifo_parity_err_cnt),
4670 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4672 access_pio_vlf_sop_parity_err_cnt),
4673 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4675 access_pio_vlf_v1_len_parity_err_cnt),
4676 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4678 access_pio_block_qw_count_parity_err_cnt),
4679 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4681 access_pio_write_qw_valid_parity_err_cnt),
4682 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4684 access_pio_state_machine_err_cnt),
4685 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4687 access_pio_write_data_parity_err_cnt),
4688 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4690 access_pio_host_addr_mem_cor_err_cnt),
4691 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4693 access_pio_host_addr_mem_unc_err_cnt),
4694 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4696 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4697 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4699 access_pio_init_sm_in_err_cnt),
4700 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4702 access_pio_ppmc_pbl_fifo_err_cnt),
4703 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4705 access_pio_credit_ret_fifo_parity_err_cnt),
4706 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4708 access_pio_v1_len_mem_bank1_cor_err_cnt),
4709 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4711 access_pio_v1_len_mem_bank0_cor_err_cnt),
4712 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4714 access_pio_v1_len_mem_bank1_unc_err_cnt),
4715 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4717 access_pio_v1_len_mem_bank0_unc_err_cnt),
4718 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4720 access_pio_sm_pkt_reset_parity_err_cnt),
4721 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4723 access_pio_pkt_evict_fifo_parity_err_cnt),
4724 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4725 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4727 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4728 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4730 access_pio_sbrdctl_crrel_parity_err_cnt),
4731 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4733 access_pio_pec_fifo_parity_err_cnt),
4734 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4736 access_pio_pcc_fifo_parity_err_cnt),
4737 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4739 access_pio_sb_mem_fifo1_err_cnt),
4740 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4742 access_pio_sb_mem_fifo0_err_cnt),
4743 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4745 access_pio_csr_parity_err_cnt),
4746 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4748 access_pio_write_addr_parity_err_cnt),
4749 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4751 access_pio_write_bad_ctxt_err_cnt),
4752 /* SendDmaErrStatus */
4753 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4755 access_sdma_pcie_req_tracking_cor_err_cnt),
4756 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4758 access_sdma_pcie_req_tracking_unc_err_cnt),
4759 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4761 access_sdma_csr_parity_err_cnt),
4762 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4764 access_sdma_rpy_tag_err_cnt),
4765 /* SendEgressErrStatus */
4766 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4768 access_tx_read_pio_memory_csr_unc_err_cnt),
4769 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4771 access_tx_read_sdma_memory_csr_err_cnt),
4772 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4774 access_tx_egress_fifo_cor_err_cnt),
4775 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4777 access_tx_read_pio_memory_cor_err_cnt),
4778 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4780 access_tx_read_sdma_memory_cor_err_cnt),
4781 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4783 access_tx_sb_hdr_cor_err_cnt),
4784 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4786 access_tx_credit_overrun_err_cnt),
4787 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4789 access_tx_launch_fifo8_cor_err_cnt),
4790 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4792 access_tx_launch_fifo7_cor_err_cnt),
4793 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4795 access_tx_launch_fifo6_cor_err_cnt),
4796 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4798 access_tx_launch_fifo5_cor_err_cnt),
4799 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4801 access_tx_launch_fifo4_cor_err_cnt),
4802 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4804 access_tx_launch_fifo3_cor_err_cnt),
4805 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4807 access_tx_launch_fifo2_cor_err_cnt),
4808 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4810 access_tx_launch_fifo1_cor_err_cnt),
4811 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4813 access_tx_launch_fifo0_cor_err_cnt),
4814 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4816 access_tx_credit_return_vl_err_cnt),
4817 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4819 access_tx_hcrc_insertion_err_cnt),
4820 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4822 access_tx_egress_fifo_unc_err_cnt),
4823 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4825 access_tx_read_pio_memory_unc_err_cnt),
4826 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4828 access_tx_read_sdma_memory_unc_err_cnt),
4829 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4831 access_tx_sb_hdr_unc_err_cnt),
4832 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4834 access_tx_credit_return_partiy_err_cnt),
4835 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4837 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4838 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4840 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4841 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4843 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4844 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4846 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4847 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4849 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4850 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4852 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4853 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4855 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4856 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4858 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4859 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4861 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4862 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4864 access_tx_sdma15_disallowed_packet_err_cnt),
4865 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4867 access_tx_sdma14_disallowed_packet_err_cnt),
4868 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4870 access_tx_sdma13_disallowed_packet_err_cnt),
4871 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4873 access_tx_sdma12_disallowed_packet_err_cnt),
4874 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4876 access_tx_sdma11_disallowed_packet_err_cnt),
4877 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4879 access_tx_sdma10_disallowed_packet_err_cnt),
4880 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4882 access_tx_sdma9_disallowed_packet_err_cnt),
4883 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4885 access_tx_sdma8_disallowed_packet_err_cnt),
4886 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4888 access_tx_sdma7_disallowed_packet_err_cnt),
4889 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4891 access_tx_sdma6_disallowed_packet_err_cnt),
4892 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4894 access_tx_sdma5_disallowed_packet_err_cnt),
4895 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4897 access_tx_sdma4_disallowed_packet_err_cnt),
4898 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4900 access_tx_sdma3_disallowed_packet_err_cnt),
4901 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4903 access_tx_sdma2_disallowed_packet_err_cnt),
4904 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4906 access_tx_sdma1_disallowed_packet_err_cnt),
4907 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4909 access_tx_sdma0_disallowed_packet_err_cnt),
4910 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4912 access_tx_config_parity_err_cnt),
4913 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4915 access_tx_sbrd_ctl_csr_parity_err_cnt),
4916 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4918 access_tx_launch_csr_parity_err_cnt),
4919 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4921 access_tx_illegal_vl_err_cnt),
4922 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4923 "TxSbrdCtlStateMachineParityErr", 0, 0,
4925 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4926 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4928 access_egress_reserved_10_err_cnt),
4929 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4931 access_egress_reserved_9_err_cnt),
4932 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4934 access_tx_sdma_launch_intf_parity_err_cnt),
4935 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4937 access_tx_pio_launch_intf_parity_err_cnt),
4938 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4940 access_egress_reserved_6_err_cnt),
4941 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4943 access_tx_incorrect_link_state_err_cnt),
4944 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4946 access_tx_linkdown_err_cnt),
4947 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4948 "EgressFifoUnderrunOrParityErr", 0, 0,
4950 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4951 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4953 access_egress_reserved_2_err_cnt),
4954 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4956 access_tx_pkt_integrity_mem_unc_err_cnt),
4957 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4959 access_tx_pkt_integrity_mem_cor_err_cnt),
4961 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4963 access_send_csr_write_bad_addr_err_cnt),
4964 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4966 access_send_csr_read_bad_addr_err_cnt),
4967 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4969 access_send_csr_parity_cnt),
4970 /* SendCtxtErrStatus */
4971 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4973 access_pio_write_out_of_bounds_err_cnt),
4974 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4976 access_pio_write_overflow_err_cnt),
4977 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4979 access_pio_write_crosses_boundary_err_cnt),
4980 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4982 access_pio_disallowed_packet_err_cnt),
4983 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4985 access_pio_inconsistent_sop_err_cnt),
4986 /* SendDmaEngErrStatus */
4987 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4989 access_sdma_header_request_fifo_cor_err_cnt),
4990 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4992 access_sdma_header_storage_cor_err_cnt),
4993 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4995 access_sdma_packet_tracking_cor_err_cnt),
4996 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4998 access_sdma_assembly_cor_err_cnt),
4999 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5001 access_sdma_desc_table_cor_err_cnt),
5002 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5004 access_sdma_header_request_fifo_unc_err_cnt),
5005 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5007 access_sdma_header_storage_unc_err_cnt),
5008 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5010 access_sdma_packet_tracking_unc_err_cnt),
5011 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5013 access_sdma_assembly_unc_err_cnt),
5014 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5016 access_sdma_desc_table_unc_err_cnt),
5017 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5019 access_sdma_timeout_err_cnt),
5020 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5022 access_sdma_header_length_err_cnt),
5023 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5025 access_sdma_header_address_err_cnt),
5026 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5028 access_sdma_header_select_err_cnt),
5029 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5031 access_sdma_reserved_9_err_cnt),
5032 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5034 access_sdma_packet_desc_overflow_err_cnt),
5035 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5037 access_sdma_length_mismatch_err_cnt),
5038 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5040 access_sdma_halt_err_cnt),
5041 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5043 access_sdma_mem_read_err_cnt),
5044 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5046 access_sdma_first_desc_err_cnt),
5047 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5049 access_sdma_tail_out_of_bounds_err_cnt),
5050 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5052 access_sdma_too_long_err_cnt),
5053 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5055 access_sdma_gen_mismatch_err_cnt),
5056 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5058 access_sdma_wrong_dw_err_cnt),
5061 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5062 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5064 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5066 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5068 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5070 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5072 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5074 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5076 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5077 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5078 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5079 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5080 CNTR_SYNTH | CNTR_VL),
5081 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5082 CNTR_SYNTH | CNTR_VL),
5083 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5084 CNTR_SYNTH | CNTR_VL),
5085 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5086 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5087 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5088 access_sw_link_dn_cnt),
5089 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5090 access_sw_link_up_cnt),
5091 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5092 access_sw_unknown_frame_cnt),
5093 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5094 access_sw_xmit_discards),
5095 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5096 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5097 access_sw_xmit_discards),
5098 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5099 access_xmit_constraint_errs),
5100 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5101 access_rcv_constraint_errs),
5102 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5103 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5104 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5105 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5106 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5107 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5108 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5109 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5110 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5111 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5112 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5113 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5114 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5115 access_sw_cpu_rc_acks),
5116 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5117 access_sw_cpu_rc_qacks),
5118 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5119 access_sw_cpu_rc_delayed_comp),
5120 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5121 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5122 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5123 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5124 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5125 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5126 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5127 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5128 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5129 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5130 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5131 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5132 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5133 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5134 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5135 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5136 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5137 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5138 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5139 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5140 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5141 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5142 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5143 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5144 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5145 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5146 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5147 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5148 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5149 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5150 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5151 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5152 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5153 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5154 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5155 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5156 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5157 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5158 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5159 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5160 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5161 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5162 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5163 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5164 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5165 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5166 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5167 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5168 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5169 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5170 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5171 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5172 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5173 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5174 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5175 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5176 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5177 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5178 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5179 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5180 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5181 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5182 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5183 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5184 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5185 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5186 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5187 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5188 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5189 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5190 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5191 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5192 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5193 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5194 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5195 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5196 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5197 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5198 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5199 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5202 /* ======================================================================== */
5204 /* return true if this is chip revision revision a */
5205 int is_ax(struct hfi1_devdata *dd)
5208 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5209 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5210 return (chip_rev_minor & 0xf0) == 0;
5213 /* return true if this is chip revision revision b */
5214 int is_bx(struct hfi1_devdata *dd)
5217 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5218 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5219 return (chip_rev_minor & 0xF0) == 0x10;
5223 * Append string s to buffer buf. Arguments curp and len are the current
5224 * position and remaining length, respectively.
5226 * return 0 on success, 1 on out of room
5228 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5232 int result = 0; /* success */
5235 /* add a comma, if first in the buffer */
5238 result = 1; /* out of room */
5245 /* copy the string */
5246 while ((c = *s++) != 0) {
5248 result = 1; /* out of room */
5256 /* write return values */
5264 * Using the given flag table, print a comma separated string into
5265 * the buffer. End in '*' if the buffer is too short.
5267 static char *flag_string(char *buf, int buf_len, u64 flags,
5268 struct flag_table *table, int table_size)
5276 /* make sure there is at least 2 so we can form "*" */
5280 len--; /* leave room for a nul */
5281 for (i = 0; i < table_size; i++) {
5282 if (flags & table[i].flag) {
5283 no_room = append_str(buf, &p, &len, table[i].str);
5286 flags &= ~table[i].flag;
5290 /* any undocumented bits left? */
5291 if (!no_room && flags) {
5292 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5293 no_room = append_str(buf, &p, &len, extra);
5296 /* add * if ran out of room */
5298 /* may need to back up to add space for a '*' */
5304 /* add final nul - space already allocated above */
5309 /* first 8 CCE error interrupt source names */
5310 static const char * const cce_misc_names[] = {
5311 "CceErrInt", /* 0 */
5312 "RxeErrInt", /* 1 */
5313 "MiscErrInt", /* 2 */
5314 "Reserved3", /* 3 */
5315 "PioErrInt", /* 4 */
5316 "SDmaErrInt", /* 5 */
5317 "EgressErrInt", /* 6 */
5322 * Return the miscellaneous error interrupt name.
5324 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5326 if (source < ARRAY_SIZE(cce_misc_names))
5327 strncpy(buf, cce_misc_names[source], bsize);
5329 snprintf(buf, bsize, "Reserved%u",
5330 source + IS_GENERAL_ERR_START);
5336 * Return the SDMA engine error interrupt name.
5338 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5340 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5345 * Return the send context error interrupt name.
5347 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5349 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5353 static const char * const various_names[] = {
5362 * Return the various interrupt name.
5364 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5366 if (source < ARRAY_SIZE(various_names))
5367 strncpy(buf, various_names[source], bsize);
5369 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5374 * Return the DC interrupt name.
5376 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5378 static const char * const dc_int_names[] = {
5382 "lbm" /* local block merge */
5385 if (source < ARRAY_SIZE(dc_int_names))
5386 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5388 snprintf(buf, bsize, "DCInt%u", source);
5392 static const char * const sdma_int_names[] = {
5399 * Return the SDMA engine interrupt name.
5401 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5403 /* what interrupt */
5404 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5406 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5408 if (likely(what < 3))
5409 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5411 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5416 * Return the receive available interrupt name.
5418 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5420 snprintf(buf, bsize, "RcvAvailInt%u", source);
5425 * Return the receive urgent interrupt name.
5427 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5429 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5434 * Return the send credit interrupt name.
5436 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5438 snprintf(buf, bsize, "SendCreditInt%u", source);
5443 * Return the reserved interrupt name.
5445 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5447 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5451 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5453 return flag_string(buf, buf_len, flags,
5454 cce_err_status_flags,
5455 ARRAY_SIZE(cce_err_status_flags));
5458 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5460 return flag_string(buf, buf_len, flags,
5461 rxe_err_status_flags,
5462 ARRAY_SIZE(rxe_err_status_flags));
5465 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5467 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5468 ARRAY_SIZE(misc_err_status_flags));
5471 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5473 return flag_string(buf, buf_len, flags,
5474 pio_err_status_flags,
5475 ARRAY_SIZE(pio_err_status_flags));
5478 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5480 return flag_string(buf, buf_len, flags,
5481 sdma_err_status_flags,
5482 ARRAY_SIZE(sdma_err_status_flags));
5485 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5487 return flag_string(buf, buf_len, flags,
5488 egress_err_status_flags,
5489 ARRAY_SIZE(egress_err_status_flags));
5492 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5494 return flag_string(buf, buf_len, flags,
5495 egress_err_info_flags,
5496 ARRAY_SIZE(egress_err_info_flags));
5499 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5501 return flag_string(buf, buf_len, flags,
5502 send_err_status_flags,
5503 ARRAY_SIZE(send_err_status_flags));
5506 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5512 * For most these errors, there is nothing that can be done except
5513 * report or record it.
5515 dd_dev_info(dd, "CCE Error: %s\n",
5516 cce_err_status_string(buf, sizeof(buf), reg));
5518 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5519 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5520 /* this error requires a manual drop into SPC freeze mode */
5522 start_freeze_handling(dd->pport, FREEZE_SELF);
5525 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5526 if (reg & (1ull << i)) {
5527 incr_cntr64(&dd->cce_err_status_cnt[i]);
5528 /* maintain a counter over all cce_err_status errors */
5529 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5535 * Check counters for receive errors that do not have an interrupt
5536 * associated with them.
5538 #define RCVERR_CHECK_TIME 10
5539 static void update_rcverr_timer(unsigned long opaque)
5541 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5542 struct hfi1_pportdata *ppd = dd->pport;
5543 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5545 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5546 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5547 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5548 set_link_down_reason(
5549 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5550 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5551 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5553 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5555 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5558 static int init_rcverr(struct hfi1_devdata *dd)
5560 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5561 /* Assume the hardware counter has been reset */
5562 dd->rcv_ovfl_cnt = 0;
5563 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5566 static void free_rcverr(struct hfi1_devdata *dd)
5568 if (dd->rcverr_timer.data)
5569 del_timer_sync(&dd->rcverr_timer);
5570 dd->rcverr_timer.data = 0;
5573 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5578 dd_dev_info(dd, "Receive Error: %s\n",
5579 rxe_err_status_string(buf, sizeof(buf), reg));
5581 if (reg & ALL_RXE_FREEZE_ERR) {
5585 * Freeze mode recovery is disabled for the errors
5586 * in RXE_FREEZE_ABORT_MASK
5588 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5589 flags = FREEZE_ABORT;
5591 start_freeze_handling(dd->pport, flags);
5594 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5595 if (reg & (1ull << i))
5596 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5600 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5605 dd_dev_info(dd, "Misc Error: %s",
5606 misc_err_status_string(buf, sizeof(buf), reg));
5607 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5608 if (reg & (1ull << i))
5609 incr_cntr64(&dd->misc_err_status_cnt[i]);
5613 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5618 dd_dev_info(dd, "PIO Error: %s\n",
5619 pio_err_status_string(buf, sizeof(buf), reg));
5621 if (reg & ALL_PIO_FREEZE_ERR)
5622 start_freeze_handling(dd->pport, 0);
5624 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5625 if (reg & (1ull << i))
5626 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5630 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5635 dd_dev_info(dd, "SDMA Error: %s\n",
5636 sdma_err_status_string(buf, sizeof(buf), reg));
5638 if (reg & ALL_SDMA_FREEZE_ERR)
5639 start_freeze_handling(dd->pport, 0);
5641 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5642 if (reg & (1ull << i))
5643 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5647 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5649 incr_cntr64(&ppd->port_xmit_discards);
5652 static void count_port_inactive(struct hfi1_devdata *dd)
5654 __count_port_discards(dd->pport);
5658 * We have had a "disallowed packet" error during egress. Determine the
5659 * integrity check which failed, and update relevant error counter, etc.
5661 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5662 * bit of state per integrity check, and so we can miss the reason for an
5663 * egress error if more than one packet fails the same integrity check
5664 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5666 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5669 struct hfi1_pportdata *ppd = dd->pport;
5670 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5671 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5674 /* clear down all observed info as quickly as possible after read */
5675 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5678 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5679 info, egress_err_info_string(buf, sizeof(buf), info), src);
5681 /* Eventually add other counters for each bit */
5682 if (info & PORT_DISCARD_EGRESS_ERRS) {
5686 * Count all applicable bits as individual errors and
5687 * attribute them to the packet that triggered this handler.
5688 * This may not be completely accurate due to limitations
5689 * on the available hardware error information. There is
5690 * a single information register and any number of error
5691 * packets may have occurred and contributed to it before
5692 * this routine is called. This means that:
5693 * a) If multiple packets with the same error occur before
5694 * this routine is called, earlier packets are missed.
5695 * There is only a single bit for each error type.
5696 * b) Errors may not be attributed to the correct VL.
5697 * The driver is attributing all bits in the info register
5698 * to the packet that triggered this call, but bits
5699 * could be an accumulation of different packets with
5701 * c) A single error packet may have multiple counts attached
5702 * to it. There is no way for the driver to know if
5703 * multiple bits set in the info register are due to a
5704 * single packet or multiple packets. The driver assumes
5707 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5708 for (i = 0; i < weight; i++) {
5709 __count_port_discards(ppd);
5710 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5711 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5713 incr_cntr64(&ppd->port_xmit_discards_vl
5720 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5721 * register. Does it represent a 'port inactive' error?
5723 static inline int port_inactive_err(u64 posn)
5725 return (posn >= SEES(TX_LINKDOWN) &&
5726 posn <= SEES(TX_INCORRECT_LINK_STATE));
5730 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5731 * register. Does it represent a 'disallowed packet' error?
5733 static inline int disallowed_pkt_err(int posn)
5735 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5736 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5740 * Input value is a bit position of one of the SDMA engine disallowed
5741 * packet errors. Return which engine. Use of this must be guarded by
5742 * disallowed_pkt_err().
5744 static inline int disallowed_pkt_engine(int posn)
5746 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5750 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5753 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5755 struct sdma_vl_map *m;
5759 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5763 m = rcu_dereference(dd->sdma_map);
5764 vl = m->engine_to_vl[engine];
5771 * Translate the send context (sofware index) into a VL. Return -1 if the
5772 * translation cannot be done.
5774 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5776 struct send_context_info *sci;
5777 struct send_context *sc;
5780 sci = &dd->send_contexts[sw_index];
5782 /* there is no information for user (PSM) and ack contexts */
5783 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5789 if (dd->vld[15].sc == sc)
5791 for (i = 0; i < num_vls; i++)
5792 if (dd->vld[i].sc == sc)
5798 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5800 u64 reg_copy = reg, handled = 0;
5804 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5805 start_freeze_handling(dd->pport, 0);
5806 else if (is_ax(dd) &&
5807 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5808 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5809 start_freeze_handling(dd->pport, 0);
5812 int posn = fls64(reg_copy);
5813 /* fls64() returns a 1-based offset, we want it zero based */
5814 int shift = posn - 1;
5815 u64 mask = 1ULL << shift;
5817 if (port_inactive_err(shift)) {
5818 count_port_inactive(dd);
5820 } else if (disallowed_pkt_err(shift)) {
5821 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5823 handle_send_egress_err_info(dd, vl);
5832 dd_dev_info(dd, "Egress Error: %s\n",
5833 egress_err_status_string(buf, sizeof(buf), reg));
5835 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5836 if (reg & (1ull << i))
5837 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5841 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5846 dd_dev_info(dd, "Send Error: %s\n",
5847 send_err_status_string(buf, sizeof(buf), reg));
5849 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5850 if (reg & (1ull << i))
5851 incr_cntr64(&dd->send_err_status_cnt[i]);
5856 * The maximum number of times the error clear down will loop before
5857 * blocking a repeating error. This value is arbitrary.
5859 #define MAX_CLEAR_COUNT 20
5862 * Clear and handle an error register. All error interrupts are funneled
5863 * through here to have a central location to correctly handle single-
5864 * or multi-shot errors.
5866 * For non per-context registers, call this routine with a context value
5867 * of 0 so the per-context offset is zero.
5869 * If the handler loops too many times, assume that something is wrong
5870 * and can't be fixed, so mask the error bits.
5872 static void interrupt_clear_down(struct hfi1_devdata *dd,
5874 const struct err_reg_info *eri)
5879 /* read in a loop until no more errors are seen */
5882 reg = read_kctxt_csr(dd, context, eri->status);
5885 write_kctxt_csr(dd, context, eri->clear, reg);
5886 if (likely(eri->handler))
5887 eri->handler(dd, context, reg);
5889 if (count > MAX_CLEAR_COUNT) {
5892 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5895 * Read-modify-write so any other masked bits
5898 mask = read_kctxt_csr(dd, context, eri->mask);
5900 write_kctxt_csr(dd, context, eri->mask, mask);
5907 * CCE block "misc" interrupt. Source is < 16.
5909 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5911 const struct err_reg_info *eri = &misc_errs[source];
5914 interrupt_clear_down(dd, 0, eri);
5916 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5921 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5923 return flag_string(buf, buf_len, flags,
5924 sc_err_status_flags,
5925 ARRAY_SIZE(sc_err_status_flags));
5929 * Send context error interrupt. Source (hw_context) is < 160.
5931 * All send context errors cause the send context to halt. The normal
5932 * clear-down mechanism cannot be used because we cannot clear the
5933 * error bits until several other long-running items are done first.
5934 * This is OK because with the context halted, nothing else is going
5935 * to happen on it anyway.
5937 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5938 unsigned int hw_context)
5940 struct send_context_info *sci;
5941 struct send_context *sc;
5947 sw_index = dd->hw_to_sw[hw_context];
5948 if (sw_index >= dd->num_send_contexts) {
5950 "out of range sw index %u for send context %u\n",
5951 sw_index, hw_context);
5954 sci = &dd->send_contexts[sw_index];
5957 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5958 sw_index, hw_context);
5962 /* tell the software that a halt has begun */
5963 sc_stop(sc, SCF_HALTED);
5965 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5967 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5968 send_context_err_status_string(flags, sizeof(flags),
5971 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5972 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5975 * Automatically restart halted kernel contexts out of interrupt
5976 * context. User contexts must ask the driver to restart the context.
5978 if (sc->type != SC_USER)
5979 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5982 * Update the counters for the corresponding status bits.
5983 * Note that these particular counters are aggregated over all
5986 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5987 if (status & (1ull << i))
5988 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5992 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5993 unsigned int source, u64 status)
5995 struct sdma_engine *sde;
5998 sde = &dd->per_sdma[source];
5999 #ifdef CONFIG_SDMA_VERBOSITY
6000 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6001 slashstrip(__FILE__), __LINE__, __func__);
6002 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6003 sde->this_idx, source, (unsigned long long)status);
6006 sdma_engine_error(sde, status);
6009 * Update the counters for the corresponding status bits.
6010 * Note that these particular counters are aggregated over
6011 * all 16 DMA engines.
6013 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6014 if (status & (1ull << i))
6015 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6020 * CCE block SDMA error interrupt. Source is < 16.
6022 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6024 #ifdef CONFIG_SDMA_VERBOSITY
6025 struct sdma_engine *sde = &dd->per_sdma[source];
6027 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6028 slashstrip(__FILE__), __LINE__, __func__);
6029 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6031 sdma_dumpstate(sde);
6033 interrupt_clear_down(dd, source, &sdma_eng_err);
6037 * CCE block "various" interrupt. Source is < 8.
6039 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6041 const struct err_reg_info *eri = &various_err[source];
6044 * TCritInt cannot go through interrupt_clear_down()
6045 * because it is not a second tier interrupt. The handler
6046 * should be called directly.
6048 if (source == TCRIT_INT_SOURCE)
6049 handle_temp_err(dd);
6050 else if (eri->handler)
6051 interrupt_clear_down(dd, 0, eri);
6054 "%s: Unimplemented/reserved interrupt %d\n",
6058 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6060 /* src_ctx is always zero */
6061 struct hfi1_pportdata *ppd = dd->pport;
6062 unsigned long flags;
6063 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6065 if (reg & QSFP_HFI0_MODPRST_N) {
6066 if (!qsfp_mod_present(ppd)) {
6067 dd_dev_info(dd, "%s: QSFP module removed\n",
6070 ppd->driver_link_ready = 0;
6072 * Cable removed, reset all our information about the
6073 * cache and cable capabilities
6076 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6078 * We don't set cache_refresh_required here as we expect
6079 * an interrupt when a cable is inserted
6081 ppd->qsfp_info.cache_valid = 0;
6082 ppd->qsfp_info.reset_needed = 0;
6083 ppd->qsfp_info.limiting_active = 0;
6084 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6086 /* Invert the ModPresent pin now to detect plug-in */
6087 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6088 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6090 if ((ppd->offline_disabled_reason >
6092 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6093 (ppd->offline_disabled_reason ==
6094 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6095 ppd->offline_disabled_reason =
6097 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6099 if (ppd->host_link_state == HLS_DN_POLL) {
6101 * The link is still in POLL. This means
6102 * that the normal link down processing
6103 * will not happen. We have to do it here
6104 * before turning the DC off.
6106 queue_work(ppd->link_wq, &ppd->link_down_work);
6109 dd_dev_info(dd, "%s: QSFP module inserted\n",
6112 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6113 ppd->qsfp_info.cache_valid = 0;
6114 ppd->qsfp_info.cache_refresh_required = 1;
6115 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6119 * Stop inversion of ModPresent pin to detect
6120 * removal of the cable
6122 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6123 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6124 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6126 ppd->offline_disabled_reason =
6127 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6131 if (reg & QSFP_HFI0_INT_N) {
6132 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6134 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6135 ppd->qsfp_info.check_interrupt_flags = 1;
6136 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6139 /* Schedule the QSFP work only if there is a cable attached. */
6140 if (qsfp_mod_present(ppd))
6141 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6144 static int request_host_lcb_access(struct hfi1_devdata *dd)
6148 ret = do_8051_command(dd, HCMD_MISC,
6149 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6150 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6151 if (ret != HCMD_SUCCESS) {
6152 dd_dev_err(dd, "%s: command failed with error %d\n",
6155 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6158 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6162 ret = do_8051_command(dd, HCMD_MISC,
6163 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6164 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6165 if (ret != HCMD_SUCCESS) {
6166 dd_dev_err(dd, "%s: command failed with error %d\n",
6169 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6173 * Set the LCB selector - allow host access. The DCC selector always
6174 * points to the host.
6176 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6178 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6179 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6180 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6184 * Clear the LCB selector - allow 8051 access. The DCC selector always
6185 * points to the host.
6187 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6189 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6190 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6194 * Acquire LCB access from the 8051. If the host already has access,
6195 * just increment a counter. Otherwise, inform the 8051 that the
6196 * host is taking access.
6200 * -EBUSY if the 8051 has control and cannot be disturbed
6201 * -errno if unable to acquire access from the 8051
6203 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6205 struct hfi1_pportdata *ppd = dd->pport;
6209 * Use the host link state lock so the operation of this routine
6210 * { link state check, selector change, count increment } can occur
6211 * as a unit against a link state change. Otherwise there is a
6212 * race between the state change and the count increment.
6215 mutex_lock(&ppd->hls_lock);
6217 while (!mutex_trylock(&ppd->hls_lock))
6221 /* this access is valid only when the link is up */
6222 if (ppd->host_link_state & HLS_DOWN) {
6223 dd_dev_info(dd, "%s: link state %s not up\n",
6224 __func__, link_state_name(ppd->host_link_state));
6229 if (dd->lcb_access_count == 0) {
6230 ret = request_host_lcb_access(dd);
6233 "%s: unable to acquire LCB access, err %d\n",
6237 set_host_lcb_access(dd);
6239 dd->lcb_access_count++;
6241 mutex_unlock(&ppd->hls_lock);
6246 * Release LCB access by decrementing the use count. If the count is moving
6247 * from 1 to 0, inform 8051 that it has control back.
6251 * -errno if unable to release access to the 8051
6253 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6258 * Use the host link state lock because the acquire needed it.
6259 * Here, we only need to keep { selector change, count decrement }
6263 mutex_lock(&dd->pport->hls_lock);
6265 while (!mutex_trylock(&dd->pport->hls_lock))
6269 if (dd->lcb_access_count == 0) {
6270 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6275 if (dd->lcb_access_count == 1) {
6276 set_8051_lcb_access(dd);
6277 ret = request_8051_lcb_access(dd);
6280 "%s: unable to release LCB access, err %d\n",
6282 /* restore host access if the grant didn't work */
6283 set_host_lcb_access(dd);
6287 dd->lcb_access_count--;
6289 mutex_unlock(&dd->pport->hls_lock);
6294 * Initialize LCB access variables and state. Called during driver load,
6295 * after most of the initialization is finished.
6297 * The DC default is LCB access on for the host. The driver defaults to
6298 * leaving access to the 8051. Assign access now - this constrains the call
6299 * to this routine to be after all LCB set-up is done. In particular, after
6300 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6302 static void init_lcb_access(struct hfi1_devdata *dd)
6304 dd->lcb_access_count = 0;
6308 * Write a response back to a 8051 request.
6310 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6312 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6313 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6315 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6316 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6320 * Handle host requests from the 8051.
6322 static void handle_8051_request(struct hfi1_pportdata *ppd)
6324 struct hfi1_devdata *dd = ppd->dd;
6329 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6330 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6331 return; /* no request */
6333 /* zero out COMPLETED so the response is seen */
6334 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6336 /* extract request details */
6337 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6338 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6339 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6340 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6343 case HREQ_LOAD_CONFIG:
6344 case HREQ_SAVE_CONFIG:
6345 case HREQ_READ_CONFIG:
6346 case HREQ_SET_TX_EQ_ABS:
6347 case HREQ_SET_TX_EQ_REL:
6349 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6351 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6353 case HREQ_CONFIG_DONE:
6354 hreq_response(dd, HREQ_SUCCESS, 0);
6357 case HREQ_INTERFACE_TEST:
6358 hreq_response(dd, HREQ_SUCCESS, data);
6361 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6362 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6368 * Set up allocation unit vaulue.
6370 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6372 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6374 /* do not modify other values in the register */
6375 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6376 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6377 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6381 * Set up initial VL15 credits of the remote. Assumes the rest of
6382 * the CM credit registers are zero from a previous global or credit reset.
6383 * Shared limit for VL15 will always be 0.
6385 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6387 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6389 /* set initial values for total and shared credit limit */
6390 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6391 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6394 * Set total limit to be equal to VL15 credits.
6395 * Leave shared limit at 0.
6397 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6398 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6400 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6401 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6405 * Zero all credit details from the previous connection and
6406 * reset the CM manager's internal counters.
6408 void reset_link_credits(struct hfi1_devdata *dd)
6412 /* remove all previous VL credit limits */
6413 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6414 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6415 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6416 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6417 /* reset the CM block */
6418 pio_send_control(dd, PSC_CM_RESET);
6419 /* reset cached value */
6420 dd->vl15buf_cached = 0;
6423 /* convert a vCU to a CU */
6424 static u32 vcu_to_cu(u8 vcu)
6429 /* convert a CU to a vCU */
6430 static u8 cu_to_vcu(u32 cu)
6435 /* convert a vAU to an AU */
6436 static u32 vau_to_au(u8 vau)
6438 return 8 * (1 << vau);
6441 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6443 ppd->sm_trap_qp = 0x0;
6448 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6450 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6454 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6455 write_csr(dd, DC_LCB_CFG_RUN, 0);
6456 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6457 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6458 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6459 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6460 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6461 reg = read_csr(dd, DCC_CFG_RESET);
6462 write_csr(dd, DCC_CFG_RESET, reg |
6463 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6464 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6465 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6467 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6468 write_csr(dd, DCC_CFG_RESET, reg);
6469 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6474 * This routine should be called after the link has been transitioned to
6475 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6478 * The expectation is that the caller of this routine would have taken
6479 * care of properly transitioning the link into the correct state.
6480 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6481 * before calling this function.
6483 static void _dc_shutdown(struct hfi1_devdata *dd)
6485 lockdep_assert_held(&dd->dc8051_lock);
6487 if (dd->dc_shutdown)
6490 dd->dc_shutdown = 1;
6491 /* Shutdown the LCB */
6492 lcb_shutdown(dd, 1);
6494 * Going to OFFLINE would have causes the 8051 to put the
6495 * SerDes into reset already. Just need to shut down the 8051,
6498 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6501 static void dc_shutdown(struct hfi1_devdata *dd)
6503 mutex_lock(&dd->dc8051_lock);
6505 mutex_unlock(&dd->dc8051_lock);
6509 * Calling this after the DC has been brought out of reset should not
6511 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6512 * before calling this function.
6514 static void _dc_start(struct hfi1_devdata *dd)
6516 lockdep_assert_held(&dd->dc8051_lock);
6518 if (!dd->dc_shutdown)
6521 /* Take the 8051 out of reset */
6522 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6523 /* Wait until 8051 is ready */
6524 if (wait_fm_ready(dd, TIMEOUT_8051_START))
6525 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6528 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6529 write_csr(dd, DCC_CFG_RESET, 0x10);
6530 /* lcb_shutdown() with abort=1 does not restore these */
6531 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6532 dd->dc_shutdown = 0;
6535 static void dc_start(struct hfi1_devdata *dd)
6537 mutex_lock(&dd->dc8051_lock);
6539 mutex_unlock(&dd->dc8051_lock);
6543 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6545 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6547 u64 rx_radr, tx_radr;
6550 if (dd->icode != ICODE_FPGA_EMULATION)
6554 * These LCB defaults on emulator _s are good, nothing to do here:
6555 * LCB_CFG_TX_FIFOS_RADR
6556 * LCB_CFG_RX_FIFOS_RADR
6558 * LCB_CFG_IGNORE_LOST_RCLK
6560 if (is_emulator_s(dd))
6562 /* else this is _p */
6564 version = emulator_rev(dd);
6566 version = 0x2d; /* all B0 use 0x2d or higher settings */
6568 if (version <= 0x12) {
6569 /* release 0x12 and below */
6572 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6573 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6574 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6577 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6578 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6579 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6581 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6582 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6584 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6585 } else if (version <= 0x18) {
6586 /* release 0x13 up to 0x18 */
6587 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6589 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6590 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6591 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6592 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6593 } else if (version == 0x19) {
6595 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6597 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6598 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6599 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6600 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6601 } else if (version == 0x1a) {
6603 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6605 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6606 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6607 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6608 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6609 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6611 /* release 0x1b and higher */
6612 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6614 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6615 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6616 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6617 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6620 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6621 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6622 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6623 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6624 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6628 * Handle a SMA idle message
6630 * This is a work-queue function outside of the interrupt.
6632 void handle_sma_message(struct work_struct *work)
6634 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6636 struct hfi1_devdata *dd = ppd->dd;
6641 * msg is bytes 1-4 of the 40-bit idle message - the command code
6644 ret = read_idle_sma(dd, &msg);
6647 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6649 * React to the SMA message. Byte[1] (0 for us) is the command.
6651 switch (msg & 0xff) {
6654 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6657 * Only expected in INIT or ARMED, discard otherwise.
6659 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6660 ppd->neighbor_normal = 1;
6662 case SMA_IDLE_ACTIVE:
6664 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6667 * Can activate the node. Discard otherwise.
6669 if (ppd->host_link_state == HLS_UP_ARMED &&
6670 ppd->is_active_optimize_enabled) {
6671 ppd->neighbor_normal = 1;
6672 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6676 "%s: received Active SMA idle message, couldn't set link to Active\n",
6682 "%s: received unexpected SMA idle message 0x%llx\n",
6688 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6691 unsigned long flags;
6693 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6694 rcvctrl = read_csr(dd, RCV_CTRL);
6697 write_csr(dd, RCV_CTRL, rcvctrl);
6698 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6701 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6703 adjust_rcvctrl(dd, add, 0);
6706 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6708 adjust_rcvctrl(dd, 0, clear);
6712 * Called from all interrupt handlers to start handling an SPC freeze.
6714 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6716 struct hfi1_devdata *dd = ppd->dd;
6717 struct send_context *sc;
6720 if (flags & FREEZE_SELF)
6721 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6723 /* enter frozen mode */
6724 dd->flags |= HFI1_FROZEN;
6726 /* notify all SDMA engines that they are going into a freeze */
6727 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6729 /* do halt pre-handling on all enabled send contexts */
6730 for (i = 0; i < dd->num_send_contexts; i++) {
6731 sc = dd->send_contexts[i].sc;
6732 if (sc && (sc->flags & SCF_ENABLED))
6733 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6736 /* Send context are frozen. Notify user space */
6737 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6739 if (flags & FREEZE_ABORT) {
6741 "Aborted freeze recovery. Please REBOOT system\n");
6744 /* queue non-interrupt handler */
6745 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6749 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6750 * depending on the "freeze" parameter.
6752 * No need to return an error if it times out, our only option
6753 * is to proceed anyway.
6755 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6757 unsigned long timeout;
6760 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6762 reg = read_csr(dd, CCE_STATUS);
6764 /* waiting until all indicators are set */
6765 if ((reg & ALL_FROZE) == ALL_FROZE)
6766 return; /* all done */
6768 /* waiting until all indicators are clear */
6769 if ((reg & ALL_FROZE) == 0)
6770 return; /* all done */
6773 if (time_after(jiffies, timeout)) {
6775 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6776 freeze ? "" : "un", reg & ALL_FROZE,
6777 freeze ? ALL_FROZE : 0ull);
6780 usleep_range(80, 120);
6785 * Do all freeze handling for the RXE block.
6787 static void rxe_freeze(struct hfi1_devdata *dd)
6790 struct hfi1_ctxtdata *rcd;
6793 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6795 /* disable all receive contexts */
6796 for (i = 0; i < dd->num_rcv_contexts; i++) {
6797 rcd = hfi1_rcd_get_by_index(dd, i);
6798 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6804 * Unfreeze handling for the RXE block - kernel contexts only.
6805 * This will also enable the port. User contexts will do unfreeze
6806 * handling on a per-context basis as they call into the driver.
6809 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6813 struct hfi1_ctxtdata *rcd;
6815 /* enable all kernel contexts */
6816 for (i = 0; i < dd->num_rcv_contexts; i++) {
6817 rcd = hfi1_rcd_get_by_index(dd, i);
6819 /* Ensure all non-user contexts(including vnic) are enabled */
6820 if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
6824 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6825 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6826 rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
6827 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6828 hfi1_rcvctrl(dd, rcvmask, rcd);
6833 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6837 * Non-interrupt SPC freeze handling.
6839 * This is a work-queue function outside of the triggering interrupt.
6841 void handle_freeze(struct work_struct *work)
6843 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6845 struct hfi1_devdata *dd = ppd->dd;
6847 /* wait for freeze indicators on all affected blocks */
6848 wait_for_freeze_status(dd, 1);
6850 /* SPC is now frozen */
6852 /* do send PIO freeze steps */
6855 /* do send DMA freeze steps */
6858 /* do send egress freeze steps - nothing to do */
6860 /* do receive freeze steps */
6864 * Unfreeze the hardware - clear the freeze, wait for each
6865 * block's frozen bit to clear, then clear the frozen flag.
6867 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6868 wait_for_freeze_status(dd, 0);
6871 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6872 wait_for_freeze_status(dd, 1);
6873 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6874 wait_for_freeze_status(dd, 0);
6877 /* do send PIO unfreeze steps for kernel contexts */
6878 pio_kernel_unfreeze(dd);
6880 /* do send DMA unfreeze steps */
6883 /* do send egress unfreeze steps - nothing to do */
6885 /* do receive unfreeze steps for kernel contexts */
6886 rxe_kernel_unfreeze(dd);
6889 * The unfreeze procedure touches global device registers when
6890 * it disables and re-enables RXE. Mark the device unfrozen
6891 * after all that is done so other parts of the driver waiting
6892 * for the device to unfreeze don't do things out of order.
6894 * The above implies that the meaning of HFI1_FROZEN flag is
6895 * "Device has gone into freeze mode and freeze mode handling
6896 * is still in progress."
6898 * The flag will be removed when freeze mode processing has
6901 dd->flags &= ~HFI1_FROZEN;
6902 wake_up(&dd->event_queue);
6904 /* no longer frozen */
6908 * Handle a link up interrupt from the 8051.
6910 * This is a work-queue function outside of the interrupt.
6912 void handle_link_up(struct work_struct *work)
6914 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6916 struct hfi1_devdata *dd = ppd->dd;
6918 set_link_state(ppd, HLS_UP_INIT);
6920 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6923 * OPA specifies that certain counters are cleared on a transition
6924 * to link up, so do that.
6926 clear_linkup_counters(dd);
6928 * And (re)set link up default values.
6930 set_linkup_defaults(ppd);
6933 * Set VL15 credits. Use cached value from verify cap interrupt.
6934 * In case of quick linkup or simulator, vl15 value will be set by
6935 * handle_linkup_change. VerifyCap interrupt handler will not be
6936 * called in those scenarios.
6938 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6939 set_up_vl15(dd, dd->vl15buf_cached);
6941 /* enforce link speed enabled */
6942 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6943 /* oops - current speed is not enabled, bounce */
6945 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6946 ppd->link_speed_active, ppd->link_speed_enabled);
6947 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6948 OPA_LINKDOWN_REASON_SPEED_POLICY);
6949 set_link_state(ppd, HLS_DN_OFFLINE);
6955 * Several pieces of LNI information were cached for SMA in ppd.
6956 * Reset these on link down
6958 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6960 ppd->neighbor_guid = 0;
6961 ppd->neighbor_port_number = 0;
6962 ppd->neighbor_type = 0;
6963 ppd->neighbor_fm_security = 0;
6966 static const char * const link_down_reason_strs[] = {
6967 [OPA_LINKDOWN_REASON_NONE] = "None",
6968 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
6969 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6970 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6971 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6972 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6973 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6974 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6975 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6976 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6977 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6978 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6979 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6980 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6981 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6982 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6983 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6984 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6985 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6986 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6987 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6988 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6989 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6990 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6991 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6992 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6993 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6994 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6995 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6996 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6997 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6998 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6999 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7000 "Excessive buffer overrun",
7001 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7002 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7003 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7004 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7005 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7006 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7007 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7008 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7009 "Local media not installed",
7010 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7011 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7012 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7013 "End to end not installed",
7014 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7015 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7016 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7017 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7018 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7019 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7022 /* return the neighbor link down reason string */
7023 static const char *link_down_reason_str(u8 reason)
7025 const char *str = NULL;
7027 if (reason < ARRAY_SIZE(link_down_reason_strs))
7028 str = link_down_reason_strs[reason];
7036 * Handle a link down interrupt from the 8051.
7038 * This is a work-queue function outside of the interrupt.
7040 void handle_link_down(struct work_struct *work)
7042 u8 lcl_reason, neigh_reason = 0;
7043 u8 link_down_reason;
7044 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7047 static const char ldr_str[] = "Link down reason: ";
7049 if ((ppd->host_link_state &
7050 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7051 ppd->port_type == PORT_TYPE_FIXED)
7052 ppd->offline_disabled_reason =
7053 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7055 /* Go offline first, then deal with reading/writing through 8051 */
7056 was_up = !!(ppd->host_link_state & HLS_UP);
7057 set_link_state(ppd, HLS_DN_OFFLINE);
7058 xchg(&ppd->is_link_down_queued, 0);
7062 /* link down reason is only valid if the link was up */
7063 read_link_down_reason(ppd->dd, &link_down_reason);
7064 switch (link_down_reason) {
7065 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7066 /* the link went down, no idle message reason */
7067 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7070 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7072 * The neighbor reason is only valid if an idle message
7073 * was received for it.
7075 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7076 dd_dev_info(ppd->dd,
7077 "%sNeighbor link down message %d, %s\n",
7078 ldr_str, neigh_reason,
7079 link_down_reason_str(neigh_reason));
7081 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7082 dd_dev_info(ppd->dd,
7083 "%sHost requested link to go offline\n",
7087 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7088 ldr_str, link_down_reason);
7093 * If no reason, assume peer-initiated but missed
7094 * LinkGoingDown idle flits.
7096 if (neigh_reason == 0)
7097 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7099 /* went down while polling or going up */
7100 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7103 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7105 /* inform the SMA when the link transitions from up to down */
7106 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7107 ppd->neigh_link_down_reason.sma == 0) {
7108 ppd->local_link_down_reason.sma =
7109 ppd->local_link_down_reason.latest;
7110 ppd->neigh_link_down_reason.sma =
7111 ppd->neigh_link_down_reason.latest;
7114 reset_neighbor_info(ppd);
7116 /* disable the port */
7117 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7120 * If there is no cable attached, turn the DC off. Otherwise,
7121 * start the link bring up.
7123 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7124 dc_shutdown(ppd->dd);
7129 void handle_link_bounce(struct work_struct *work)
7131 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7135 * Only do something if the link is currently up.
7137 if (ppd->host_link_state & HLS_UP) {
7138 set_link_state(ppd, HLS_DN_OFFLINE);
7141 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7142 __func__, link_state_name(ppd->host_link_state));
7147 * Mask conversion: Capability exchange to Port LTP. The capability
7148 * exchange has an implicit 16b CRC that is mandatory.
7150 static int cap_to_port_ltp(int cap)
7152 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7154 if (cap & CAP_CRC_14B)
7155 port_ltp |= PORT_LTP_CRC_MODE_14;
7156 if (cap & CAP_CRC_48B)
7157 port_ltp |= PORT_LTP_CRC_MODE_48;
7158 if (cap & CAP_CRC_12B_16B_PER_LANE)
7159 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7165 * Convert an OPA Port LTP mask to capability mask
7167 int port_ltp_to_cap(int port_ltp)
7171 if (port_ltp & PORT_LTP_CRC_MODE_14)
7172 cap_mask |= CAP_CRC_14B;
7173 if (port_ltp & PORT_LTP_CRC_MODE_48)
7174 cap_mask |= CAP_CRC_48B;
7175 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7176 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7182 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7184 static int lcb_to_port_ltp(int lcb_crc)
7188 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7189 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7190 else if (lcb_crc == LCB_CRC_48B)
7191 port_ltp = PORT_LTP_CRC_MODE_48;
7192 else if (lcb_crc == LCB_CRC_14B)
7193 port_ltp = PORT_LTP_CRC_MODE_14;
7195 port_ltp = PORT_LTP_CRC_MODE_16;
7201 * Our neighbor has indicated that we are allowed to act as a fabric
7202 * manager, so place the full management partition key in the second
7203 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7204 * that we should already have the limited management partition key in
7205 * array element 1, and also that the port is not yet up when
7206 * add_full_mgmt_pkey() is invoked.
7208 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7210 struct hfi1_devdata *dd = ppd->dd;
7212 /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
7213 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7214 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7215 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7216 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7217 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7218 hfi1_event_pkey_change(ppd->dd, ppd->port);
7221 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7223 if (ppd->pkeys[2] != 0) {
7225 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7226 hfi1_event_pkey_change(ppd->dd, ppd->port);
7231 * Convert the given link width to the OPA link width bitmask.
7233 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7238 * Simulator and quick linkup do not set the width.
7239 * Just set it to 4x without complaint.
7241 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7242 return OPA_LINK_WIDTH_4X;
7243 return 0; /* no lanes up */
7244 case 1: return OPA_LINK_WIDTH_1X;
7245 case 2: return OPA_LINK_WIDTH_2X;
7246 case 3: return OPA_LINK_WIDTH_3X;
7248 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7251 case 4: return OPA_LINK_WIDTH_4X;
7256 * Do a population count on the bottom nibble.
7258 static const u8 bit_counts[16] = {
7259 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7262 static inline u8 nibble_to_count(u8 nibble)
7264 return bit_counts[nibble & 0xf];
7268 * Read the active lane information from the 8051 registers and return
7271 * Active lane information is found in these 8051 registers:
7275 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7281 u8 tx_polarity_inversion;
7282 u8 rx_polarity_inversion;
7285 /* read the active lanes */
7286 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7287 &rx_polarity_inversion, &max_rate);
7288 read_local_lni(dd, &enable_lane_rx);
7290 /* convert to counts */
7291 tx = nibble_to_count(enable_lane_tx);
7292 rx = nibble_to_count(enable_lane_rx);
7295 * Set link_speed_active here, overriding what was set in
7296 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7297 * set the max_rate field in handle_verify_cap until v0.19.
7299 if ((dd->icode == ICODE_RTL_SILICON) &&
7300 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7301 /* max_rate: 0 = 12.5G, 1 = 25G */
7304 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7308 "%s: unexpected max rate %d, using 25Gb\n",
7309 __func__, (int)max_rate);
7312 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7318 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7319 enable_lane_tx, tx, enable_lane_rx, rx);
7320 *tx_width = link_width_to_bits(dd, tx);
7321 *rx_width = link_width_to_bits(dd, rx);
7325 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7326 * Valid after the end of VerifyCap and during LinkUp. Does not change
7327 * after link up. I.e. look elsewhere for downgrade information.
7330 * + bits [7:4] contain the number of active transmitters
7331 * + bits [3:0] contain the number of active receivers
7332 * These are numbers 1 through 4 and can be different values if the
7333 * link is asymmetric.
7335 * verify_cap_local_fm_link_width[0] retains its original value.
7337 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7341 u8 misc_bits, local_flags;
7342 u16 active_tx, active_rx;
7344 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7346 rx = (widths >> 8) & 0xf;
7348 *tx_width = link_width_to_bits(dd, tx);
7349 *rx_width = link_width_to_bits(dd, rx);
7351 /* print the active widths */
7352 get_link_widths(dd, &active_tx, &active_rx);
7356 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7357 * hardware information when the link first comes up.
7359 * The link width is not available until after VerifyCap.AllFramesReceived
7360 * (the trigger for handle_verify_cap), so this is outside that routine
7361 * and should be called when the 8051 signals linkup.
7363 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7365 u16 tx_width, rx_width;
7367 /* get end-of-LNI link widths */
7368 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7370 /* use tx_width as the link is supposed to be symmetric on link up */
7371 ppd->link_width_active = tx_width;
7372 /* link width downgrade active (LWD.A) starts out matching LW.A */
7373 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7374 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7375 /* per OPA spec, on link up LWD.E resets to LWD.S */
7376 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7377 /* cache the active egress rate (units {10^6 bits/sec]) */
7378 ppd->current_egress_rate = active_egress_rate(ppd);
7382 * Handle a verify capabilities interrupt from the 8051.
7384 * This is a work-queue function outside of the interrupt.
7386 void handle_verify_cap(struct work_struct *work)
7388 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7390 struct hfi1_devdata *dd = ppd->dd;
7392 u8 power_management;
7402 u16 active_tx, active_rx;
7403 u8 partner_supported_crc;
7407 set_link_state(ppd, HLS_VERIFY_CAP);
7409 lcb_shutdown(dd, 0);
7410 adjust_lcb_for_fpga_serdes(dd);
7412 read_vc_remote_phy(dd, &power_management, &continuous);
7413 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7414 &partner_supported_crc);
7415 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7416 read_remote_device_id(dd, &device_id, &device_rev);
7418 * And the 'MgmtAllowed' information, which is exchanged during
7419 * LNI, is also be available at this point.
7421 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7422 /* print the active widths */
7423 get_link_widths(dd, &active_tx, &active_rx);
7425 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7426 (int)power_management, (int)continuous);
7428 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7429 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7430 (int)partner_supported_crc);
7431 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7432 (u32)remote_tx_rate, (u32)link_widths);
7433 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7434 (u32)device_id, (u32)device_rev);
7436 * The peer vAU value just read is the peer receiver value. HFI does
7437 * not support a transmit vAU of 0 (AU == 8). We advertised that
7438 * with Z=1 in the fabric capabilities sent to the peer. The peer
7439 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7440 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7441 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7442 * subject to the Z value exception.
7446 set_up_vau(dd, vau);
7449 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7450 * credits value and wait for link-up interrupt ot set it.
7453 dd->vl15buf_cached = vl15buf;
7455 /* set up the LCB CRC mode */
7456 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7458 /* order is important: use the lowest bit in common */
7459 if (crc_mask & CAP_CRC_14B)
7460 crc_val = LCB_CRC_14B;
7461 else if (crc_mask & CAP_CRC_48B)
7462 crc_val = LCB_CRC_48B;
7463 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7464 crc_val = LCB_CRC_12B_16B_PER_LANE;
7466 crc_val = LCB_CRC_16B;
7468 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7469 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7470 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7472 /* set (14b only) or clear sideband credit */
7473 reg = read_csr(dd, SEND_CM_CTRL);
7474 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7475 write_csr(dd, SEND_CM_CTRL,
7476 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7478 write_csr(dd, SEND_CM_CTRL,
7479 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7482 ppd->link_speed_active = 0; /* invalid value */
7483 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7484 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7485 switch (remote_tx_rate) {
7487 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7490 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7494 /* actual rate is highest bit of the ANDed rates */
7495 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7498 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7500 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7502 if (ppd->link_speed_active == 0) {
7503 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7504 __func__, (int)remote_tx_rate);
7505 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7509 * Cache the values of the supported, enabled, and active
7510 * LTP CRC modes to return in 'portinfo' queries. But the bit
7511 * flags that are returned in the portinfo query differ from
7512 * what's in the link_crc_mask, crc_sizes, and crc_val
7513 * variables. Convert these here.
7515 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7516 /* supported crc modes */
7517 ppd->port_ltp_crc_mode |=
7518 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7519 /* enabled crc modes */
7520 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7521 /* active crc mode */
7523 /* set up the remote credit return table */
7524 assign_remote_cm_au_table(dd, vcu);
7527 * The LCB is reset on entry to handle_verify_cap(), so this must
7528 * be applied on every link up.
7530 * Adjust LCB error kill enable to kill the link if
7531 * these RBUF errors are seen:
7532 * REPLAY_BUF_MBE_SMASK
7533 * FLIT_INPUT_BUF_MBE_SMASK
7535 if (is_ax(dd)) { /* fixed in B0 */
7536 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7537 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7538 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7539 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7542 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7543 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7545 /* give 8051 access to the LCB CSRs */
7546 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7547 set_8051_lcb_access(dd);
7549 if (ppd->mgmt_allowed)
7550 add_full_mgmt_pkey(ppd);
7552 /* tell the 8051 to go to LinkUp */
7553 set_link_state(ppd, HLS_GOING_UP);
7557 * Apply the link width downgrade enabled policy against the current active
7560 * Called when the enabled policy changes or the active link widths change.
7562 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7569 /* use the hls lock to avoid a race with actual link up */
7572 mutex_lock(&ppd->hls_lock);
7573 /* only apply if the link is up */
7574 if (ppd->host_link_state & HLS_DOWN) {
7575 /* still going up..wait and retry */
7576 if (ppd->host_link_state & HLS_GOING_UP) {
7577 if (++tries < 1000) {
7578 mutex_unlock(&ppd->hls_lock);
7579 usleep_range(100, 120); /* arbitrary */
7583 "%s: giving up waiting for link state change\n",
7589 lwde = ppd->link_width_downgrade_enabled;
7591 if (refresh_widths) {
7592 get_link_widths(ppd->dd, &tx, &rx);
7593 ppd->link_width_downgrade_tx_active = tx;
7594 ppd->link_width_downgrade_rx_active = rx;
7597 if (ppd->link_width_downgrade_tx_active == 0 ||
7598 ppd->link_width_downgrade_rx_active == 0) {
7599 /* the 8051 reported a dead link as a downgrade */
7600 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7601 } else if (lwde == 0) {
7602 /* downgrade is disabled */
7604 /* bounce if not at starting active width */
7605 if ((ppd->link_width_active !=
7606 ppd->link_width_downgrade_tx_active) ||
7607 (ppd->link_width_active !=
7608 ppd->link_width_downgrade_rx_active)) {
7610 "Link downgrade is disabled and link has downgraded, downing link\n");
7612 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7613 ppd->link_width_active,
7614 ppd->link_width_downgrade_tx_active,
7615 ppd->link_width_downgrade_rx_active);
7618 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7619 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7620 /* Tx or Rx is outside the enabled policy */
7622 "Link is outside of downgrade allowed, downing link\n");
7624 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7625 lwde, ppd->link_width_downgrade_tx_active,
7626 ppd->link_width_downgrade_rx_active);
7631 mutex_unlock(&ppd->hls_lock);
7634 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7635 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7636 set_link_state(ppd, HLS_DN_OFFLINE);
7642 * Handle a link downgrade interrupt from the 8051.
7644 * This is a work-queue function outside of the interrupt.
7646 void handle_link_downgrade(struct work_struct *work)
7648 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7649 link_downgrade_work);
7651 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7652 apply_link_downgrade_policy(ppd, 1);
7655 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7657 return flag_string(buf, buf_len, flags, dcc_err_flags,
7658 ARRAY_SIZE(dcc_err_flags));
7661 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7663 return flag_string(buf, buf_len, flags, lcb_err_flags,
7664 ARRAY_SIZE(lcb_err_flags));
7667 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7669 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7670 ARRAY_SIZE(dc8051_err_flags));
7673 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7675 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7676 ARRAY_SIZE(dc8051_info_err_flags));
7679 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7681 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7682 ARRAY_SIZE(dc8051_info_host_msg_flags));
7685 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7687 struct hfi1_pportdata *ppd = dd->pport;
7688 u64 info, err, host_msg;
7689 int queue_link_down = 0;
7692 /* look at the flags */
7693 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7694 /* 8051 information set by firmware */
7695 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7696 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7697 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7698 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7700 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7701 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7704 * Handle error flags.
7706 if (err & FAILED_LNI) {
7708 * LNI error indications are cleared by the 8051
7709 * only when starting polling. Only pay attention
7710 * to them when in the states that occur during
7713 if (ppd->host_link_state
7714 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7715 queue_link_down = 1;
7716 dd_dev_info(dd, "Link error: %s\n",
7717 dc8051_info_err_string(buf,
7722 err &= ~(u64)FAILED_LNI;
7724 /* unknown frames can happen durning LNI, just count */
7725 if (err & UNKNOWN_FRAME) {
7726 ppd->unknown_frame_count++;
7727 err &= ~(u64)UNKNOWN_FRAME;
7730 /* report remaining errors, but do not do anything */
7731 dd_dev_err(dd, "8051 info error: %s\n",
7732 dc8051_info_err_string(buf, sizeof(buf),
7737 * Handle host message flags.
7739 if (host_msg & HOST_REQ_DONE) {
7741 * Presently, the driver does a busy wait for
7742 * host requests to complete. This is only an
7743 * informational message.
7744 * NOTE: The 8051 clears the host message
7745 * information *on the next 8051 command*.
7746 * Therefore, when linkup is achieved,
7747 * this flag will still be set.
7749 host_msg &= ~(u64)HOST_REQ_DONE;
7751 if (host_msg & BC_SMA_MSG) {
7752 queue_work(ppd->link_wq, &ppd->sma_message_work);
7753 host_msg &= ~(u64)BC_SMA_MSG;
7755 if (host_msg & LINKUP_ACHIEVED) {
7756 dd_dev_info(dd, "8051: Link up\n");
7757 queue_work(ppd->link_wq, &ppd->link_up_work);
7758 host_msg &= ~(u64)LINKUP_ACHIEVED;
7760 if (host_msg & EXT_DEVICE_CFG_REQ) {
7761 handle_8051_request(ppd);
7762 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7764 if (host_msg & VERIFY_CAP_FRAME) {
7765 queue_work(ppd->link_wq, &ppd->link_vc_work);
7766 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7768 if (host_msg & LINK_GOING_DOWN) {
7769 const char *extra = "";
7770 /* no downgrade action needed if going down */
7771 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7772 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7773 extra = " (ignoring downgrade)";
7775 dd_dev_info(dd, "8051: Link down%s\n", extra);
7776 queue_link_down = 1;
7777 host_msg &= ~(u64)LINK_GOING_DOWN;
7779 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7780 queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7781 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7784 /* report remaining messages, but do not do anything */
7785 dd_dev_info(dd, "8051 info host message: %s\n",
7786 dc8051_info_host_msg_string(buf,
7791 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7793 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7795 * Lost the 8051 heartbeat. If this happens, we
7796 * receive constant interrupts about it. Disable
7797 * the interrupt after the first.
7799 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7800 write_csr(dd, DC_DC8051_ERR_EN,
7801 read_csr(dd, DC_DC8051_ERR_EN) &
7802 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7804 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7807 /* report the error, but do not do anything */
7808 dd_dev_err(dd, "8051 error: %s\n",
7809 dc8051_err_string(buf, sizeof(buf), reg));
7812 if (queue_link_down) {
7814 * if the link is already going down or disabled, do not
7815 * queue another. If there's a link down entry already
7816 * queued, don't queue another one.
7818 if ((ppd->host_link_state &
7819 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7820 ppd->link_enabled == 0) {
7821 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7822 __func__, ppd->host_link_state,
7825 if (xchg(&ppd->is_link_down_queued, 1) == 1)
7827 "%s: link down request already queued\n",
7830 queue_work(ppd->link_wq, &ppd->link_down_work);
7835 static const char * const fm_config_txt[] = {
7837 "BadHeadDist: Distance violation between two head flits",
7839 "BadTailDist: Distance violation between two tail flits",
7841 "BadCtrlDist: Distance violation between two credit control flits",
7843 "BadCrdAck: Credits return for unsupported VL",
7845 "UnsupportedVLMarker: Received VL Marker",
7847 "BadPreempt: Exceeded the preemption nesting level",
7849 "BadControlFlit: Received unsupported control flit",
7852 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7855 static const char * const port_rcv_txt[] = {
7857 "BadPktLen: Illegal PktLen",
7859 "PktLenTooLong: Packet longer than PktLen",
7861 "PktLenTooShort: Packet shorter than PktLen",
7863 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7865 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7867 "BadL2: Illegal L2 opcode",
7869 "BadSC: Unsupported SC",
7871 "BadRC: Illegal RC",
7873 "PreemptError: Preempting with same VL",
7875 "PreemptVL15: Preempting a VL15 packet",
7878 #define OPA_LDR_FMCONFIG_OFFSET 16
7879 #define OPA_LDR_PORTRCV_OFFSET 0
7880 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7882 u64 info, hdr0, hdr1;
7885 struct hfi1_pportdata *ppd = dd->pport;
7889 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7890 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7891 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7892 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7893 /* set status bit */
7894 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7896 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7899 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7900 struct hfi1_pportdata *ppd = dd->pport;
7901 /* this counter saturates at (2^32) - 1 */
7902 if (ppd->link_downed < (u32)UINT_MAX)
7904 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7907 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7908 u8 reason_valid = 1;
7910 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7911 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7912 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7913 /* set status bit */
7914 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7924 extra = fm_config_txt[info];
7927 extra = fm_config_txt[info];
7928 if (ppd->port_error_action &
7929 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7932 * lcl_reason cannot be derived from info
7936 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7941 snprintf(buf, sizeof(buf), "reserved%lld", info);
7946 if (reason_valid && !do_bounce) {
7947 do_bounce = ppd->port_error_action &
7948 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7949 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7952 /* just report this */
7953 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7955 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7958 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7959 u8 reason_valid = 1;
7961 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7962 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7963 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7964 if (!(dd->err_info_rcvport.status_and_code &
7965 OPA_EI_STATUS_SMASK)) {
7966 dd->err_info_rcvport.status_and_code =
7967 info & OPA_EI_CODE_SMASK;
7968 /* set status bit */
7969 dd->err_info_rcvport.status_and_code |=
7970 OPA_EI_STATUS_SMASK;
7972 * save first 2 flits in the packet that caused
7975 dd->err_info_rcvport.packet_flit1 = hdr0;
7976 dd->err_info_rcvport.packet_flit2 = hdr1;
7989 extra = port_rcv_txt[info];
7993 snprintf(buf, sizeof(buf), "reserved%lld", info);
7998 if (reason_valid && !do_bounce) {
7999 do_bounce = ppd->port_error_action &
8000 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8001 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8004 /* just report this */
8005 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8006 " hdr0 0x%llx, hdr1 0x%llx\n",
8009 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8012 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8013 /* informative only */
8014 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8015 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8017 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8018 /* informative only */
8019 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8020 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8023 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8024 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8026 /* report any remaining errors */
8028 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8029 dcc_err_string(buf, sizeof(buf), reg));
8031 if (lcl_reason == 0)
8032 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8035 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8037 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8038 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8042 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8046 dd_dev_info(dd, "LCB Error: %s\n",
8047 lcb_err_string(buf, sizeof(buf), reg));
8051 * CCE block DC interrupt. Source is < 8.
8053 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8055 const struct err_reg_info *eri = &dc_errs[source];
8058 interrupt_clear_down(dd, 0, eri);
8059 } else if (source == 3 /* dc_lbm_int */) {
8061 * This indicates that a parity error has occurred on the
8062 * address/control lines presented to the LBM. The error
8063 * is a single pulse, there is no associated error flag,
8064 * and it is non-maskable. This is because if a parity
8065 * error occurs on the request the request is dropped.
8066 * This should never occur, but it is nice to know if it
8069 dd_dev_err(dd, "Parity error in DC LBM block\n");
8071 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8076 * TX block send credit interrupt. Source is < 160.
8078 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8080 sc_group_release_update(dd, source);
8084 * TX block SDMA interrupt. Source is < 48.
8086 * SDMA interrupts are grouped by type:
8089 * N - 2N-1 = SDmaProgress
8090 * 2N - 3N-1 = SDmaIdle
8092 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8094 /* what interrupt */
8095 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8097 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8099 #ifdef CONFIG_SDMA_VERBOSITY
8100 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8101 slashstrip(__FILE__), __LINE__, __func__);
8102 sdma_dumpstate(&dd->per_sdma[which]);
8105 if (likely(what < 3 && which < dd->num_sdma)) {
8106 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8108 /* should not happen */
8109 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8114 * RX block receive available interrupt. Source is < 160.
8116 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8118 struct hfi1_ctxtdata *rcd;
8121 if (likely(source < dd->num_rcv_contexts)) {
8122 rcd = hfi1_rcd_get_by_index(dd, source);
8124 /* Check for non-user contexts, including vnic */
8125 if ((source < dd->first_dyn_alloc_ctxt) ||
8126 (rcd->sc && (rcd->sc->type == SC_KERNEL)))
8127 rcd->do_interrupt(rcd, 0);
8129 handle_user_interrupt(rcd);
8134 /* received an interrupt, but no rcd */
8135 err_detail = "dataless";
8137 /* received an interrupt, but are not using that context */
8138 err_detail = "out of range";
8140 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8141 err_detail, source);
8145 * RX block receive urgent interrupt. Source is < 160.
8147 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8149 struct hfi1_ctxtdata *rcd;
8152 if (likely(source < dd->num_rcv_contexts)) {
8153 rcd = hfi1_rcd_get_by_index(dd, source);
8155 /* only pay attention to user urgent interrupts */
8156 if ((source >= dd->first_dyn_alloc_ctxt) &&
8157 (!rcd->sc || (rcd->sc->type == SC_USER)))
8158 handle_user_interrupt(rcd);
8163 /* received an interrupt, but no rcd */
8164 err_detail = "dataless";
8166 /* received an interrupt, but are not using that context */
8167 err_detail = "out of range";
8169 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8170 err_detail, source);
8174 * Reserved range interrupt. Should not be called in normal operation.
8176 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8180 dd_dev_err(dd, "unexpected %s interrupt\n",
8181 is_reserved_name(name, sizeof(name), source));
8184 static const struct is_table is_table[] = {
8187 * name func interrupt func
8189 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8190 is_misc_err_name, is_misc_err_int },
8191 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8192 is_sdma_eng_err_name, is_sdma_eng_err_int },
8193 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8194 is_sendctxt_err_name, is_sendctxt_err_int },
8195 { IS_SDMA_START, IS_SDMA_END,
8196 is_sdma_eng_name, is_sdma_eng_int },
8197 { IS_VARIOUS_START, IS_VARIOUS_END,
8198 is_various_name, is_various_int },
8199 { IS_DC_START, IS_DC_END,
8200 is_dc_name, is_dc_int },
8201 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8202 is_rcv_avail_name, is_rcv_avail_int },
8203 { IS_RCVURGENT_START, IS_RCVURGENT_END,
8204 is_rcv_urgent_name, is_rcv_urgent_int },
8205 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8206 is_send_credit_name, is_send_credit_int},
8207 { IS_RESERVED_START, IS_RESERVED_END,
8208 is_reserved_name, is_reserved_int},
8212 * Interrupt source interrupt - called when the given source has an interrupt.
8213 * Source is a bit index into an array of 64-bit integers.
8215 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8217 const struct is_table *entry;
8219 /* avoids a double compare by walking the table in-order */
8220 for (entry = &is_table[0]; entry->is_name; entry++) {
8221 if (source < entry->end) {
8222 trace_hfi1_interrupt(dd, entry, source);
8223 entry->is_int(dd, source - entry->start);
8227 /* fell off the end */
8228 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8232 * General interrupt handler. This is able to correctly handle
8233 * all interrupts in case INTx is used.
8235 static irqreturn_t general_interrupt(int irq, void *data)
8237 struct hfi1_devdata *dd = data;
8238 u64 regs[CCE_NUM_INT_CSRS];
8242 this_cpu_inc(*dd->int_counter);
8244 /* phase 1: scan and clear all handled interrupts */
8245 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8246 if (dd->gi_mask[i] == 0) {
8247 regs[i] = 0; /* used later */
8250 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8252 /* only clear if anything is set */
8254 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8257 /* phase 2: call the appropriate handler */
8258 for_each_set_bit(bit, (unsigned long *)®s[0],
8259 CCE_NUM_INT_CSRS * 64) {
8260 is_interrupt(dd, bit);
8266 static irqreturn_t sdma_interrupt(int irq, void *data)
8268 struct sdma_engine *sde = data;
8269 struct hfi1_devdata *dd = sde->dd;
8272 #ifdef CONFIG_SDMA_VERBOSITY
8273 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8274 slashstrip(__FILE__), __LINE__, __func__);
8275 sdma_dumpstate(sde);
8278 this_cpu_inc(*dd->int_counter);
8280 /* This read_csr is really bad in the hot path */
8281 status = read_csr(dd,
8282 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8284 if (likely(status)) {
8285 /* clear the interrupt(s) */
8287 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8290 /* handle the interrupt(s) */
8291 sdma_engine_interrupt(sde, status);
8293 dd_dev_err_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8300 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8301 * to insure that the write completed. This does NOT guarantee that
8302 * queued DMA writes to memory from the chip are pushed.
8304 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8306 struct hfi1_devdata *dd = rcd->dd;
8307 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8309 mmiowb(); /* make sure everything before is written */
8310 write_csr(dd, addr, rcd->imask);
8311 /* force the above write on the chip and get a value back */
8312 (void)read_csr(dd, addr);
8315 /* force the receive interrupt */
8316 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8318 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8322 * Return non-zero if a packet is present.
8324 * This routine is called when rechecking for packets after the RcvAvail
8325 * interrupt has been cleared down. First, do a quick check of memory for
8326 * a packet present. If not found, use an expensive CSR read of the context
8327 * tail to determine the actual tail. The CSR read is necessary because there
8328 * is no method to push pending DMAs to memory other than an interrupt and we
8329 * are trying to determine if we need to force an interrupt.
8331 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8336 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8337 present = (rcd->seq_cnt ==
8338 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8339 else /* is RDMA rtail */
8340 present = (rcd->head != get_rcvhdrtail(rcd));
8345 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8346 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8347 return rcd->head != tail;
8351 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8352 * This routine will try to handle packets immediately (latency), but if
8353 * it finds too many, it will invoke the thread handler (bandwitdh). The
8354 * chip receive interrupt is *not* cleared down until this or the thread (if
8355 * invoked) is finished. The intent is to avoid extra interrupts while we
8356 * are processing packets anyway.
8358 static irqreturn_t receive_context_interrupt(int irq, void *data)
8360 struct hfi1_ctxtdata *rcd = data;
8361 struct hfi1_devdata *dd = rcd->dd;
8365 trace_hfi1_receive_interrupt(dd, rcd);
8366 this_cpu_inc(*dd->int_counter);
8367 aspm_ctx_disable(rcd);
8369 /* receive interrupt remains blocked while processing packets */
8370 disposition = rcd->do_interrupt(rcd, 0);
8373 * Too many packets were seen while processing packets in this
8374 * IRQ handler. Invoke the handler thread. The receive interrupt
8377 if (disposition == RCV_PKT_LIMIT)
8378 return IRQ_WAKE_THREAD;
8381 * The packet processor detected no more packets. Clear the receive
8382 * interrupt and recheck for a packet packet that may have arrived
8383 * after the previous check and interrupt clear. If a packet arrived,
8384 * force another interrupt.
8386 clear_recv_intr(rcd);
8387 present = check_packet_present(rcd);
8389 force_recv_intr(rcd);
8395 * Receive packet thread handler. This expects to be invoked with the
8396 * receive interrupt still blocked.
8398 static irqreturn_t receive_context_thread(int irq, void *data)
8400 struct hfi1_ctxtdata *rcd = data;
8403 /* receive interrupt is still blocked from the IRQ handler */
8404 (void)rcd->do_interrupt(rcd, 1);
8407 * The packet processor will only return if it detected no more
8408 * packets. Hold IRQs here so we can safely clear the interrupt and
8409 * recheck for a packet that may have arrived after the previous
8410 * check and the interrupt clear. If a packet arrived, force another
8413 local_irq_disable();
8414 clear_recv_intr(rcd);
8415 present = check_packet_present(rcd);
8417 force_recv_intr(rcd);
8423 /* ========================================================================= */
8425 u32 read_physical_state(struct hfi1_devdata *dd)
8429 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8430 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8431 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8434 u32 read_logical_state(struct hfi1_devdata *dd)
8438 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8439 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8440 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8443 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8447 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8448 /* clear current state, set new state */
8449 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8450 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8451 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8455 * Use the 8051 to read a LCB CSR.
8457 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8462 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8463 if (acquire_lcb_access(dd, 0) == 0) {
8464 *data = read_csr(dd, addr);
8465 release_lcb_access(dd, 0);
8471 /* register is an index of LCB registers: (offset - base) / 8 */
8472 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8473 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8474 if (ret != HCMD_SUCCESS)
8480 * Provide a cache for some of the LCB registers in case the LCB is
8482 * (The LCB is unavailable in certain link states, for example.)
8489 static struct lcb_datum lcb_cache[] = {
8490 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8491 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8492 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8495 static void update_lcb_cache(struct hfi1_devdata *dd)
8501 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8502 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8504 /* Update if we get good data */
8505 if (likely(ret != -EBUSY))
8506 lcb_cache[i].val = val;
8510 static int read_lcb_cache(u32 off, u64 *val)
8514 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8515 if (lcb_cache[i].off == off) {
8516 *val = lcb_cache[i].val;
8521 pr_warn("%s bad offset 0x%x\n", __func__, off);
8526 * Read an LCB CSR. Access may not be in host control, so check.
8527 * Return 0 on success, -EBUSY on failure.
8529 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8531 struct hfi1_pportdata *ppd = dd->pport;
8533 /* if up, go through the 8051 for the value */
8534 if (ppd->host_link_state & HLS_UP)
8535 return read_lcb_via_8051(dd, addr, data);
8536 /* if going up or down, check the cache, otherwise, no access */
8537 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8538 if (read_lcb_cache(addr, data))
8543 /* otherwise, host has access */
8544 *data = read_csr(dd, addr);
8549 * Use the 8051 to write a LCB CSR.
8551 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8556 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8557 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8558 if (acquire_lcb_access(dd, 0) == 0) {
8559 write_csr(dd, addr, data);
8560 release_lcb_access(dd, 0);
8566 /* register is an index of LCB registers: (offset - base) / 8 */
8567 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8568 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8569 if (ret != HCMD_SUCCESS)
8575 * Write an LCB CSR. Access may not be in host control, so check.
8576 * Return 0 on success, -EBUSY on failure.
8578 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8580 struct hfi1_pportdata *ppd = dd->pport;
8582 /* if up, go through the 8051 for the value */
8583 if (ppd->host_link_state & HLS_UP)
8584 return write_lcb_via_8051(dd, addr, data);
8585 /* if going up or down, no access */
8586 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8588 /* otherwise, host has access */
8589 write_csr(dd, addr, data);
8595 * < 0 = Linux error, not able to get access
8596 * > 0 = 8051 command RETURN_CODE
8598 static int do_8051_command(
8599 struct hfi1_devdata *dd,
8606 unsigned long timeout;
8608 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8610 mutex_lock(&dd->dc8051_lock);
8612 /* We can't send any commands to the 8051 if it's in reset */
8613 if (dd->dc_shutdown) {
8614 return_code = -ENODEV;
8619 * If an 8051 host command timed out previously, then the 8051 is
8622 * On first timeout, attempt to reset and restart the entire DC
8623 * block (including 8051). (Is this too big of a hammer?)
8625 * If the 8051 times out a second time, the reset did not bring it
8626 * back to healthy life. In that case, fail any subsequent commands.
8628 if (dd->dc8051_timed_out) {
8629 if (dd->dc8051_timed_out > 1) {
8631 "Previous 8051 host command timed out, skipping command %u\n",
8633 return_code = -ENXIO;
8641 * If there is no timeout, then the 8051 command interface is
8642 * waiting for a command.
8646 * When writing a LCB CSR, out_data contains the full value to
8647 * to be written, while in_data contains the relative LCB
8648 * address in 7:0. Do the work here, rather than the caller,
8649 * of distrubting the write data to where it needs to go:
8652 * 39:00 -> in_data[47:8]
8653 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8654 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8656 if (type == HCMD_WRITE_LCB_CSR) {
8657 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8658 /* must preserve COMPLETED - it is tied to hardware */
8659 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8660 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8661 reg |= ((((*out_data) >> 40) & 0xff) <<
8662 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8663 | ((((*out_data) >> 48) & 0xffff) <<
8664 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8665 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8669 * Do two writes: the first to stabilize the type and req_data, the
8670 * second to activate.
8672 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8673 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8674 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8675 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8676 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8677 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8678 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8680 /* wait for completion, alternate: interrupt */
8681 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8683 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8684 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8687 if (time_after(jiffies, timeout)) {
8688 dd->dc8051_timed_out++;
8689 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8692 return_code = -ETIMEDOUT;
8699 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8700 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8701 if (type == HCMD_READ_LCB_CSR) {
8702 /* top 16 bits are in a different register */
8703 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8704 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8706 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8709 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8710 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8711 dd->dc8051_timed_out = 0;
8713 * Clear command for next user.
8715 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8718 mutex_unlock(&dd->dc8051_lock);
8722 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8724 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8727 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8728 u8 lane_id, u32 config_data)
8733 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8734 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8735 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8736 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8737 if (ret != HCMD_SUCCESS) {
8739 "load 8051 config: field id %d, lane %d, err %d\n",
8740 (int)field_id, (int)lane_id, ret);
8746 * Read the 8051 firmware "registers". Use the RAM directly. Always
8747 * set the result, even on error.
8748 * Return 0 on success, -errno on failure
8750 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8757 /* address start depends on the lane_id */
8759 addr = (4 * NUM_GENERAL_FIELDS)
8760 + (lane_id * 4 * NUM_LANE_FIELDS);
8763 addr += field_id * 4;
8765 /* read is in 8-byte chunks, hardware will truncate the address down */
8766 ret = read_8051_data(dd, addr, 8, &big_data);
8769 /* extract the 4 bytes we want */
8771 *result = (u32)(big_data >> 32);
8773 *result = (u32)big_data;
8776 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8777 __func__, lane_id, field_id);
8783 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8788 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8789 | power_management << POWER_MANAGEMENT_SHIFT;
8790 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8791 GENERAL_CONFIG, frame);
8794 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8795 u16 vl15buf, u8 crc_sizes)
8799 frame = (u32)vau << VAU_SHIFT
8801 | (u32)vcu << VCU_SHIFT
8802 | (u32)vl15buf << VL15BUF_SHIFT
8803 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8804 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8805 GENERAL_CONFIG, frame);
8808 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8809 u8 *flag_bits, u16 *link_widths)
8813 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8815 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8816 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8817 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8820 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8827 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8828 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8829 | (u32)link_widths << LINK_WIDTH_SHIFT;
8830 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8834 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8839 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8840 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8841 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8844 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8849 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8850 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8851 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8852 & REMOTE_DEVICE_REV_MASK;
8855 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8860 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8861 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8862 /* Clear, then set field */
8864 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8865 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8869 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8874 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8875 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8876 STS_FM_VERSION_MAJOR_MASK;
8877 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8878 STS_FM_VERSION_MINOR_MASK;
8880 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8881 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8882 STS_FM_VERSION_PATCH_MASK;
8885 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8890 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8891 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8892 & POWER_MANAGEMENT_MASK;
8893 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8894 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8897 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8898 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8902 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8903 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8904 *z = (frame >> Z_SHIFT) & Z_MASK;
8905 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8906 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8907 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8910 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8916 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8918 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8919 & REMOTE_TX_RATE_MASK;
8920 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8923 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8927 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8928 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8931 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8935 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8936 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8939 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8941 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8944 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8946 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8949 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8955 if (dd->pport->host_link_state & HLS_UP) {
8956 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8959 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8960 & LINK_QUALITY_MASK;
8964 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8968 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8969 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8972 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8976 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8977 *ldr = (frame & 0xff);
8980 static int read_tx_settings(struct hfi1_devdata *dd,
8982 u8 *tx_polarity_inversion,
8983 u8 *rx_polarity_inversion,
8989 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8990 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8991 & ENABLE_LANE_TX_MASK;
8992 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8993 & TX_POLARITY_INVERSION_MASK;
8994 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8995 & RX_POLARITY_INVERSION_MASK;
8996 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9000 static int write_tx_settings(struct hfi1_devdata *dd,
9002 u8 tx_polarity_inversion,
9003 u8 rx_polarity_inversion,
9008 /* no need to mask, all variable sizes match field widths */
9009 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9010 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9011 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9012 | max_rate << MAX_RATE_SHIFT;
9013 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9017 * Read an idle LCB message.
9019 * Returns 0 on success, -EINVAL on error
9021 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9025 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9026 if (ret != HCMD_SUCCESS) {
9027 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9031 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9032 /* return only the payload as we already know the type */
9033 *data_out >>= IDLE_PAYLOAD_SHIFT;
9038 * Read an idle SMA message. To be done in response to a notification from
9041 * Returns 0 on success, -EINVAL on error
9043 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9045 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9050 * Send an idle LCB message.
9052 * Returns 0 on success, -EINVAL on error
9054 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9058 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9059 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9060 if (ret != HCMD_SUCCESS) {
9061 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9069 * Send an idle SMA message.
9071 * Returns 0 on success, -EINVAL on error
9073 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9077 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9078 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9079 return send_idle_message(dd, data);
9083 * Initialize the LCB then do a quick link up. This may or may not be
9086 * return 0 on success, -errno on error
9088 static int do_quick_linkup(struct hfi1_devdata *dd)
9092 lcb_shutdown(dd, 0);
9095 /* LCB_CFG_LOOPBACK.VAL = 2 */
9096 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9097 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9098 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9099 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9102 /* start the LCBs */
9103 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9104 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9106 /* simulator only loopback steps */
9107 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9108 /* LCB_CFG_RUN.EN = 1 */
9109 write_csr(dd, DC_LCB_CFG_RUN,
9110 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9112 ret = wait_link_transfer_active(dd, 10);
9116 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9117 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9122 * When doing quick linkup and not in loopback, both
9123 * sides must be done with LCB set-up before either
9124 * starts the quick linkup. Put a delay here so that
9125 * both sides can be started and have a chance to be
9126 * done with LCB set up before resuming.
9129 "Pausing for peer to be finished with LCB set up\n");
9131 dd_dev_err(dd, "Continuing with quick linkup\n");
9134 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9135 set_8051_lcb_access(dd);
9138 * State "quick" LinkUp request sets the physical link state to
9139 * LinkUp without a verify capability sequence.
9140 * This state is in simulator v37 and later.
9142 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9143 if (ret != HCMD_SUCCESS) {
9145 "%s: set physical link state to quick LinkUp failed with return %d\n",
9148 set_host_lcb_access(dd);
9149 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9156 return 0; /* success */
9160 * Set the SerDes to internal loopback mode.
9161 * Returns 0 on success, -errno on error.
9163 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9167 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9168 if (ret == HCMD_SUCCESS)
9171 "Set physical link state to SerDes Loopback failed with return %d\n",
9179 * Do all special steps to set up loopback.
9181 static int init_loopback(struct hfi1_devdata *dd)
9183 dd_dev_info(dd, "Entering loopback mode\n");
9185 /* all loopbacks should disable self GUID check */
9186 write_csr(dd, DC_DC8051_CFG_MODE,
9187 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9190 * The simulator has only one loopback option - LCB. Switch
9191 * to that option, which includes quick link up.
9193 * Accept all valid loopback values.
9195 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9196 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9197 loopback == LOOPBACK_CABLE)) {
9198 loopback = LOOPBACK_LCB;
9203 /* handle serdes loopback */
9204 if (loopback == LOOPBACK_SERDES) {
9205 /* internal serdes loopack needs quick linkup on RTL */
9206 if (dd->icode == ICODE_RTL_SILICON)
9208 return set_serdes_loopback_mode(dd);
9211 /* LCB loopback - handled at poll time */
9212 if (loopback == LOOPBACK_LCB) {
9213 quick_linkup = 1; /* LCB is always quick linkup */
9215 /* not supported in emulation due to emulation RTL changes */
9216 if (dd->icode == ICODE_FPGA_EMULATION) {
9218 "LCB loopback not supported in emulation\n");
9224 /* external cable loopback requires no extra steps */
9225 if (loopback == LOOPBACK_CABLE)
9228 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9233 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9234 * used in the Verify Capability link width attribute.
9236 static u16 opa_to_vc_link_widths(u16 opa_widths)
9241 static const struct link_bits {
9244 } opa_link_xlate[] = {
9245 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9246 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9247 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9248 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9251 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9252 if (opa_widths & opa_link_xlate[i].from)
9253 result |= opa_link_xlate[i].to;
9259 * Set link attributes before moving to polling.
9261 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9263 struct hfi1_devdata *dd = ppd->dd;
9265 u8 tx_polarity_inversion;
9266 u8 rx_polarity_inversion;
9269 /* reset our fabric serdes to clear any lingering problems */
9270 fabric_serdes_reset(dd);
9272 /* set the local tx rate - need to read-modify-write */
9273 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9274 &rx_polarity_inversion, &ppd->local_tx_rate);
9276 goto set_local_link_attributes_fail;
9278 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9279 /* set the tx rate to the fastest enabled */
9280 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9281 ppd->local_tx_rate = 1;
9283 ppd->local_tx_rate = 0;
9285 /* set the tx rate to all enabled */
9286 ppd->local_tx_rate = 0;
9287 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9288 ppd->local_tx_rate |= 2;
9289 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9290 ppd->local_tx_rate |= 1;
9293 enable_lane_tx = 0xF; /* enable all four lanes */
9294 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9295 rx_polarity_inversion, ppd->local_tx_rate);
9296 if (ret != HCMD_SUCCESS)
9297 goto set_local_link_attributes_fail;
9300 * DC supports continuous updates.
9302 ret = write_vc_local_phy(dd,
9303 0 /* no power management */,
9304 1 /* continuous updates */);
9305 if (ret != HCMD_SUCCESS)
9306 goto set_local_link_attributes_fail;
9308 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9309 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9310 ppd->port_crc_mode_enabled);
9311 if (ret != HCMD_SUCCESS)
9312 goto set_local_link_attributes_fail;
9314 ret = write_vc_local_link_width(dd, 0, 0,
9315 opa_to_vc_link_widths(
9316 ppd->link_width_enabled));
9317 if (ret != HCMD_SUCCESS)
9318 goto set_local_link_attributes_fail;
9320 /* let peer know who we are */
9321 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9322 if (ret == HCMD_SUCCESS)
9325 set_local_link_attributes_fail:
9327 "Failed to set local link attributes, return 0x%x\n",
9333 * Call this to start the link.
9334 * Do not do anything if the link is disabled.
9335 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9337 int start_link(struct hfi1_pportdata *ppd)
9340 * Tune the SerDes to a ballpark setting for optimal signal and bit
9341 * error rate. Needs to be done before starting the link.
9345 if (!ppd->driver_link_ready) {
9346 dd_dev_info(ppd->dd,
9347 "%s: stopping link start because driver is not ready\n",
9353 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9354 * pkey table can be configured properly if the HFI unit is connected
9355 * to switch port with MgmtAllowed=NO
9357 clear_full_mgmt_pkey(ppd);
9359 return set_link_state(ppd, HLS_DN_POLL);
9362 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9364 struct hfi1_devdata *dd = ppd->dd;
9366 unsigned long timeout;
9369 * Some QSFP cables have a quirk that asserts the IntN line as a side
9370 * effect of power up on plug-in. We ignore this false positive
9371 * interrupt until the module has finished powering up by waiting for
9372 * a minimum timeout of the module inrush initialization time of
9373 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9374 * module have stabilized.
9379 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9381 timeout = jiffies + msecs_to_jiffies(2000);
9383 mask = read_csr(dd, dd->hfi1_id ?
9384 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9385 if (!(mask & QSFP_HFI0_INT_N))
9387 if (time_after(jiffies, timeout)) {
9388 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9396 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9398 struct hfi1_devdata *dd = ppd->dd;
9401 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9404 * Clear the status register to avoid an immediate interrupt
9405 * when we re-enable the IntN pin
9407 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9409 mask |= (u64)QSFP_HFI0_INT_N;
9411 mask &= ~(u64)QSFP_HFI0_INT_N;
9413 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9416 void reset_qsfp(struct hfi1_pportdata *ppd)
9418 struct hfi1_devdata *dd = ppd->dd;
9419 u64 mask, qsfp_mask;
9421 /* Disable INT_N from triggering QSFP interrupts */
9422 set_qsfp_int_n(ppd, 0);
9424 /* Reset the QSFP */
9425 mask = (u64)QSFP_HFI0_RESET_N;
9427 qsfp_mask = read_csr(dd,
9428 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9431 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9437 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9439 wait_for_qsfp_init(ppd);
9442 * Allow INT_N to trigger the QSFP interrupt to watch
9443 * for alarms and warnings
9445 set_qsfp_int_n(ppd, 1);
9448 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9449 u8 *qsfp_interrupt_status)
9451 struct hfi1_devdata *dd = ppd->dd;
9453 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9454 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9455 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9458 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9459 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9460 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9464 * The remaining alarms/warnings don't matter if the link is down.
9466 if (ppd->host_link_state & HLS_DOWN)
9469 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9470 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9471 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9474 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9475 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9476 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9479 /* Byte 2 is vendor specific */
9481 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9482 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9483 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9486 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9487 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9488 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9491 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9492 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9493 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9496 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9497 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9498 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9501 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9502 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9503 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9506 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9507 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9508 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9511 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9512 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9513 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9516 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9517 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9518 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9521 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9522 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9523 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9526 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9527 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9528 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9531 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9532 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9533 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9536 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9537 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9538 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9541 /* Bytes 9-10 and 11-12 are reserved */
9542 /* Bytes 13-15 are vendor specific */
9547 /* This routine will only be scheduled if the QSFP module present is asserted */
9548 void qsfp_event(struct work_struct *work)
9550 struct qsfp_data *qd;
9551 struct hfi1_pportdata *ppd;
9552 struct hfi1_devdata *dd;
9554 qd = container_of(work, struct qsfp_data, qsfp_work);
9559 if (!qsfp_mod_present(ppd))
9562 if (ppd->host_link_state == HLS_DN_DISABLE) {
9563 dd_dev_info(ppd->dd,
9564 "%s: stopping link start because link is disabled\n",
9570 * Turn DC back on after cable has been re-inserted. Up until
9571 * now, the DC has been in reset to save power.
9575 if (qd->cache_refresh_required) {
9576 set_qsfp_int_n(ppd, 0);
9578 wait_for_qsfp_init(ppd);
9581 * Allow INT_N to trigger the QSFP interrupt to watch
9582 * for alarms and warnings
9584 set_qsfp_int_n(ppd, 1);
9589 if (qd->check_interrupt_flags) {
9590 u8 qsfp_interrupt_status[16] = {0,};
9592 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9593 &qsfp_interrupt_status[0], 16) != 16) {
9595 "%s: Failed to read status of QSFP module\n",
9598 unsigned long flags;
9600 handle_qsfp_error_conditions(
9601 ppd, qsfp_interrupt_status);
9602 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9603 ppd->qsfp_info.check_interrupt_flags = 0;
9604 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9610 static void init_qsfp_int(struct hfi1_devdata *dd)
9612 struct hfi1_pportdata *ppd = dd->pport;
9613 u64 qsfp_mask, cce_int_mask;
9614 const int qsfp1_int_smask = QSFP1_INT % 64;
9615 const int qsfp2_int_smask = QSFP2_INT % 64;
9618 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9619 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9620 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9621 * the index of the appropriate CSR in the CCEIntMask CSR array
9623 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9624 (8 * (QSFP1_INT / 64)));
9626 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9627 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9630 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9631 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9635 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9636 /* Clear current status to avoid spurious interrupts */
9637 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9639 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9642 set_qsfp_int_n(ppd, 0);
9644 /* Handle active low nature of INT_N and MODPRST_N pins */
9645 if (qsfp_mod_present(ppd))
9646 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9648 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9653 * Do a one-time initialize of the LCB block.
9655 static void init_lcb(struct hfi1_devdata *dd)
9657 /* simulator does not correctly handle LCB cclk loopback, skip */
9658 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9661 /* the DC has been reset earlier in the driver load */
9663 /* set LCB for cclk loopback on the port */
9664 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9665 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9666 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9667 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9668 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9669 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9670 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9674 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9677 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9683 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9686 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9689 /* read byte 2, the status byte */
9690 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9696 return 0; /* success */
9700 * Values for QSFP retry.
9702 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9703 * arrived at from experience on a large cluster.
9705 #define MAX_QSFP_RETRIES 20
9706 #define QSFP_RETRY_WAIT 500 /* msec */
9709 * Try a QSFP read. If it fails, schedule a retry for later.
9710 * Called on first link activation after driver load.
9712 static void try_start_link(struct hfi1_pportdata *ppd)
9714 if (test_qsfp_read(ppd)) {
9716 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9717 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9720 dd_dev_info(ppd->dd,
9721 "QSFP not responding, waiting and retrying %d\n",
9722 (int)ppd->qsfp_retry_count);
9723 ppd->qsfp_retry_count++;
9724 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9725 msecs_to_jiffies(QSFP_RETRY_WAIT));
9728 ppd->qsfp_retry_count = 0;
9734 * Workqueue function to start the link after a delay.
9736 void handle_start_link(struct work_struct *work)
9738 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9739 start_link_work.work);
9740 try_start_link(ppd);
9743 int bringup_serdes(struct hfi1_pportdata *ppd)
9745 struct hfi1_devdata *dd = ppd->dd;
9749 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9750 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9752 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9755 guid = dd->base_guid + ppd->port - 1;
9756 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9759 /* Set linkinit_reason on power up per OPA spec */
9760 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9762 /* one-time init of the LCB */
9766 ret = init_loopback(dd);
9772 if (ppd->port_type == PORT_TYPE_QSFP) {
9773 set_qsfp_int_n(ppd, 0);
9774 wait_for_qsfp_init(ppd);
9775 set_qsfp_int_n(ppd, 1);
9778 try_start_link(ppd);
9782 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9784 struct hfi1_devdata *dd = ppd->dd;
9787 * Shut down the link and keep it down. First turn off that the
9788 * driver wants to allow the link to be up (driver_link_ready).
9789 * Then make sure the link is not automatically restarted
9790 * (link_enabled). Cancel any pending restart. And finally
9793 ppd->driver_link_ready = 0;
9794 ppd->link_enabled = 0;
9796 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9797 flush_delayed_work(&ppd->start_link_work);
9798 cancel_delayed_work_sync(&ppd->start_link_work);
9800 ppd->offline_disabled_reason =
9801 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9802 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9803 OPA_LINKDOWN_REASON_SMA_DISABLED);
9804 set_link_state(ppd, HLS_DN_OFFLINE);
9806 /* disable the port */
9807 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9810 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9812 struct hfi1_pportdata *ppd;
9815 ppd = (struct hfi1_pportdata *)(dd + 1);
9816 for (i = 0; i < dd->num_pports; i++, ppd++) {
9817 ppd->ibport_data.rvp.rc_acks = NULL;
9818 ppd->ibport_data.rvp.rc_qacks = NULL;
9819 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9820 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9821 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9822 if (!ppd->ibport_data.rvp.rc_acks ||
9823 !ppd->ibport_data.rvp.rc_delayed_comp ||
9824 !ppd->ibport_data.rvp.rc_qacks)
9832 * index is the index into the receive array
9834 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9835 u32 type, unsigned long pa, u16 order)
9839 if (!(dd->flags & HFI1_PRESENT))
9842 if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9845 } else if (type > PT_INVALID) {
9847 "unexpected receive array type %u for index %u, not handled\n",
9851 trace_hfi1_put_tid(dd, index, type, pa, order);
9853 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9854 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9855 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9856 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9857 << RCV_ARRAY_RT_ADDR_SHIFT;
9858 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9859 writeq(reg, dd->rcvarray_wc + (index * 8));
9861 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9863 * Eager entries are written and flushed
9865 * Expected entries are flushed every 4 writes
9872 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9874 struct hfi1_devdata *dd = rcd->dd;
9877 /* this could be optimized */
9878 for (i = rcd->eager_base; i < rcd->eager_base +
9879 rcd->egrbufs.alloced; i++)
9880 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9882 for (i = rcd->expected_base;
9883 i < rcd->expected_base + rcd->expected_count; i++)
9884 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9887 static const char * const ib_cfg_name_strings[] = {
9888 "HFI1_IB_CFG_LIDLMC",
9889 "HFI1_IB_CFG_LWID_DG_ENB",
9890 "HFI1_IB_CFG_LWID_ENB",
9892 "HFI1_IB_CFG_SPD_ENB",
9894 "HFI1_IB_CFG_RXPOL_ENB",
9895 "HFI1_IB_CFG_LREV_ENB",
9896 "HFI1_IB_CFG_LINKLATENCY",
9897 "HFI1_IB_CFG_HRTBT",
9898 "HFI1_IB_CFG_OP_VLS",
9899 "HFI1_IB_CFG_VL_HIGH_CAP",
9900 "HFI1_IB_CFG_VL_LOW_CAP",
9901 "HFI1_IB_CFG_OVERRUN_THRESH",
9902 "HFI1_IB_CFG_PHYERR_THRESH",
9903 "HFI1_IB_CFG_LINKDEFAULT",
9904 "HFI1_IB_CFG_PKEYS",
9906 "HFI1_IB_CFG_LSTATE",
9907 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9908 "HFI1_IB_CFG_PMA_TICKS",
9912 static const char *ib_cfg_name(int which)
9914 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9916 return ib_cfg_name_strings[which];
9919 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9921 struct hfi1_devdata *dd = ppd->dd;
9925 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9926 val = ppd->link_width_enabled;
9928 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9929 val = ppd->link_width_active;
9931 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9932 val = ppd->link_speed_enabled;
9934 case HFI1_IB_CFG_SPD: /* current Link speed */
9935 val = ppd->link_speed_active;
9938 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9939 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9940 case HFI1_IB_CFG_LINKLATENCY:
9943 case HFI1_IB_CFG_OP_VLS:
9944 val = ppd->vls_operational;
9946 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9947 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9949 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9950 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9952 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9953 val = ppd->overrun_threshold;
9955 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9956 val = ppd->phy_error_threshold;
9958 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9959 val = dd->link_default;
9962 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9963 case HFI1_IB_CFG_PMA_TICKS:
9966 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9969 "%s: which %s: not implemented\n",
9971 ib_cfg_name(which));
9979 * The largest MAD packet size.
9981 #define MAX_MAD_PACKET 2048
9984 * Return the maximum header bytes that can go on the _wire_
9985 * for this device. This count includes the ICRC which is
9986 * not part of the packet held in memory but it is appended
9988 * This is dependent on the device's receive header entry size.
9989 * HFI allows this to be set per-receive context, but the
9990 * driver presently enforces a global value.
9992 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9995 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9996 * the Receive Header Entry Size minus the PBC (or RHF) size
9997 * plus one DW for the ICRC appended by HW.
9999 * dd->rcd[0].rcvhdrqentsize is in DW.
10000 * We use rcd[0] as all context will have the same value. Also,
10001 * the first kernel context would have been allocated by now so
10002 * we are guaranteed a valid value.
10004 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10009 * @ppd - per port data
10011 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
10012 * registers compare against LRH.PktLen, so use the max bytes included
10015 * This routine changes all VL values except VL15, which it maintains at
10018 static void set_send_length(struct hfi1_pportdata *ppd)
10020 struct hfi1_devdata *dd = ppd->dd;
10021 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10022 u32 maxvlmtu = dd->vld[15].mtu;
10023 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10024 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10025 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10029 for (i = 0; i < ppd->vls_supported; i++) {
10030 if (dd->vld[i].mtu > maxvlmtu)
10031 maxvlmtu = dd->vld[i].mtu;
10033 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10034 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10035 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10037 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10038 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10039 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10041 write_csr(dd, SEND_LEN_CHECK0, len1);
10042 write_csr(dd, SEND_LEN_CHECK1, len2);
10043 /* adjust kernel credit return thresholds based on new MTUs */
10044 /* all kernel receive contexts have the same hdrqentsize */
10045 for (i = 0; i < ppd->vls_supported; i++) {
10046 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10047 sc_mtu_to_threshold(dd->vld[i].sc,
10049 dd->rcd[0]->rcvhdrqentsize));
10050 for (j = 0; j < INIT_SC_PER_VL; j++)
10051 sc_set_cr_threshold(
10052 pio_select_send_context_vl(dd, j, i),
10055 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10056 sc_mtu_to_threshold(dd->vld[15].sc,
10058 dd->rcd[0]->rcvhdrqentsize));
10059 sc_set_cr_threshold(dd->vld[15].sc, thres);
10061 /* Adjust maximum MTU for the port in DC */
10062 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10063 (ilog2(maxvlmtu >> 8) + 1);
10064 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10065 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10066 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10067 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10068 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10071 static void set_lidlmc(struct hfi1_pportdata *ppd)
10075 struct hfi1_devdata *dd = ppd->dd;
10076 u32 mask = ~((1U << ppd->lmc) - 1);
10077 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10081 * Program 0 in CSR if port lid is extended. This prevents
10082 * 9B packets being sent out for large lids.
10084 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10085 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10086 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10087 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10088 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10089 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10090 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10091 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10094 * Iterate over all the send contexts and set their SLID check
10096 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10097 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10098 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10099 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10101 for (i = 0; i < dd->chip_send_contexts; i++) {
10102 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10104 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10107 /* Now we have to do the same thing for the sdma engines */
10108 sdma_update_lmc(dd, mask, lid);
10111 static const char *state_completed_string(u32 completed)
10113 static const char * const state_completed[] = {
10119 if (completed < ARRAY_SIZE(state_completed))
10120 return state_completed[completed];
10125 static const char all_lanes_dead_timeout_expired[] =
10126 "All lanes were inactive – was the interconnect media removed?";
10127 static const char tx_out_of_policy[] =
10128 "Passing lanes on local port do not meet the local link width policy";
10129 static const char no_state_complete[] =
10130 "State timeout occurred before link partner completed the state";
10131 static const char * const state_complete_reasons[] = {
10132 [0x00] = "Reason unknown",
10133 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10134 [0x02] = "Link partner reported failure",
10135 [0x10] = "Unable to achieve frame sync on any lane",
10137 "Unable to find a common bit rate with the link partner",
10139 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10141 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10142 [0x14] = no_state_complete,
10144 "State timeout occurred before link partner identified equalization presets",
10146 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10147 [0x17] = tx_out_of_policy,
10148 [0x20] = all_lanes_dead_timeout_expired,
10150 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10151 [0x22] = no_state_complete,
10153 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10154 [0x24] = tx_out_of_policy,
10155 [0x30] = all_lanes_dead_timeout_expired,
10157 "State timeout occurred waiting for host to process received frames",
10158 [0x32] = no_state_complete,
10160 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10161 [0x34] = tx_out_of_policy,
10162 [0x35] = "Negotiated link width is mutually exclusive",
10164 "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10165 [0x37] = "Unable to resolve secure data exchange",
10168 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10171 const char *str = NULL;
10173 if (code < ARRAY_SIZE(state_complete_reasons))
10174 str = state_complete_reasons[code];
10181 /* describe the given last state complete frame */
10182 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10183 const char *prefix)
10185 struct hfi1_devdata *dd = ppd->dd;
10193 * [ 0: 0] - success
10195 * [ 7: 4] - next state timeout
10196 * [15: 8] - reason code
10199 success = frame & 0x1;
10200 state = (frame >> 1) & 0x7;
10201 reason = (frame >> 8) & 0xff;
10202 lanes = (frame >> 16) & 0xffff;
10204 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10206 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10207 state_completed_string(state), state);
10208 dd_dev_err(dd, " state successfully completed: %s\n",
10209 success ? "yes" : "no");
10210 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10211 reason, state_complete_reason_code_string(ppd, reason));
10212 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10216 * Read the last state complete frames and explain them. This routine
10217 * expects to be called if the link went down during link negotiation
10218 * and initialization (LNI). That is, anywhere between polling and link up.
10220 static void check_lni_states(struct hfi1_pportdata *ppd)
10222 u32 last_local_state;
10223 u32 last_remote_state;
10225 read_last_local_state(ppd->dd, &last_local_state);
10226 read_last_remote_state(ppd->dd, &last_remote_state);
10229 * Don't report anything if there is nothing to report. A value of
10230 * 0 means the link was taken down while polling and there was no
10231 * training in-process.
10233 if (last_local_state == 0 && last_remote_state == 0)
10236 decode_state_complete(ppd, last_local_state, "transmitted");
10237 decode_state_complete(ppd, last_remote_state, "received");
10240 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10241 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10244 unsigned long timeout;
10246 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10247 timeout = jiffies + msecs_to_jiffies(wait_ms);
10249 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10252 if (time_after(jiffies, timeout)) {
10254 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10262 /* called when the logical link state is not down as it should be */
10263 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10265 struct hfi1_devdata *dd = ppd->dd;
10268 * Bring link up in LCB loopback
10270 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10271 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10272 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10274 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10275 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10276 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10277 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10279 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10280 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10282 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10283 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10285 wait_link_transfer_active(dd, 100);
10288 * Bring the link down again.
10290 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10291 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10292 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10294 /* adjust ppd->statusp, if needed */
10295 update_statusp(ppd, IB_PORT_DOWN);
10297 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10301 * Helper for set_link_state(). Do not call except from that routine.
10302 * Expects ppd->hls_mutex to be held.
10304 * @rem_reason value to be sent to the neighbor
10306 * LinkDownReasons only set if transition succeeds.
10308 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10310 struct hfi1_devdata *dd = ppd->dd;
10311 u32 previous_state;
10314 update_lcb_cache(dd);
10316 previous_state = ppd->host_link_state;
10317 ppd->host_link_state = HLS_GOING_OFFLINE;
10319 /* start offline transition */
10320 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10322 if (ret != HCMD_SUCCESS) {
10324 "Failed to transition to Offline link state, return %d\n",
10328 if (ppd->offline_disabled_reason ==
10329 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10330 ppd->offline_disabled_reason =
10331 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10334 * Wait for offline transition. It can take a while for
10335 * the link to go down.
10337 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
10342 * Now in charge of LCB - must be after the physical state is
10343 * offline.quiet and before host_link_state is changed.
10345 set_host_lcb_access(dd);
10346 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10348 /* make sure the logical state is also down */
10349 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10351 force_logical_link_state_down(ppd);
10353 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10355 if (ppd->port_type == PORT_TYPE_QSFP &&
10356 ppd->qsfp_info.limiting_active &&
10357 qsfp_mod_present(ppd)) {
10360 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10362 set_qsfp_tx(ppd, 0);
10363 release_chip_resource(dd, qsfp_resource(dd));
10365 /* not fatal, but should warn */
10367 "Unable to acquire lock to turn off QSFP TX\n");
10372 * The LNI has a mandatory wait time after the physical state
10373 * moves to Offline.Quiet. The wait time may be different
10374 * depending on how the link went down. The 8051 firmware
10375 * will observe the needed wait time and only move to ready
10376 * when that is completed. The largest of the quiet timeouts
10377 * is 6s, so wait that long and then at least 0.5s more for
10378 * other transitions, and another 0.5s for a buffer.
10380 ret = wait_fm_ready(dd, 7000);
10383 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10384 /* state is really offline, so make it so */
10385 ppd->host_link_state = HLS_DN_OFFLINE;
10390 * The state is now offline and the 8051 is ready to accept host
10392 * - change our state
10393 * - notify others if we were previously in a linkup state
10395 ppd->host_link_state = HLS_DN_OFFLINE;
10396 if (previous_state & HLS_UP) {
10397 /* went down while link was up */
10398 handle_linkup_change(dd, 0);
10399 } else if (previous_state
10400 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10401 /* went down while attempting link up */
10402 check_lni_states(ppd);
10405 /* the active link width (downgrade) is 0 on link down */
10406 ppd->link_width_active = 0;
10407 ppd->link_width_downgrade_tx_active = 0;
10408 ppd->link_width_downgrade_rx_active = 0;
10409 ppd->current_egress_rate = 0;
10413 /* return the link state name */
10414 static const char *link_state_name(u32 state)
10417 int n = ilog2(state);
10418 static const char * const names[] = {
10419 [__HLS_UP_INIT_BP] = "INIT",
10420 [__HLS_UP_ARMED_BP] = "ARMED",
10421 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10422 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10423 [__HLS_DN_POLL_BP] = "POLL",
10424 [__HLS_DN_DISABLE_BP] = "DISABLE",
10425 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10426 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10427 [__HLS_GOING_UP_BP] = "GOING_UP",
10428 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10429 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10432 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10433 return name ? name : "unknown";
10436 /* return the link state reason name */
10437 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10439 if (state == HLS_UP_INIT) {
10440 switch (ppd->linkinit_reason) {
10441 case OPA_LINKINIT_REASON_LINKUP:
10443 case OPA_LINKINIT_REASON_FLAPPING:
10444 return "(FLAPPING)";
10445 case OPA_LINKINIT_OUTSIDE_POLICY:
10446 return "(OUTSIDE_POLICY)";
10447 case OPA_LINKINIT_QUARANTINED:
10448 return "(QUARANTINED)";
10449 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10450 return "(INSUFIC_CAPABILITY)";
10459 * driver_pstate - convert the driver's notion of a port's
10460 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10461 * Return -1 (converted to a u32) to indicate error.
10463 u32 driver_pstate(struct hfi1_pportdata *ppd)
10465 switch (ppd->host_link_state) {
10468 case HLS_UP_ACTIVE:
10469 return IB_PORTPHYSSTATE_LINKUP;
10471 return IB_PORTPHYSSTATE_POLLING;
10472 case HLS_DN_DISABLE:
10473 return IB_PORTPHYSSTATE_DISABLED;
10474 case HLS_DN_OFFLINE:
10475 return OPA_PORTPHYSSTATE_OFFLINE;
10476 case HLS_VERIFY_CAP:
10477 return IB_PORTPHYSSTATE_POLLING;
10479 return IB_PORTPHYSSTATE_POLLING;
10480 case HLS_GOING_OFFLINE:
10481 return OPA_PORTPHYSSTATE_OFFLINE;
10482 case HLS_LINK_COOLDOWN:
10483 return OPA_PORTPHYSSTATE_OFFLINE;
10484 case HLS_DN_DOWNDEF:
10486 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10487 ppd->host_link_state);
10493 * driver_lstate - convert the driver's notion of a port's
10494 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10495 * (converted to a u32) to indicate error.
10497 u32 driver_lstate(struct hfi1_pportdata *ppd)
10499 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10500 return IB_PORT_DOWN;
10502 switch (ppd->host_link_state & HLS_UP) {
10504 return IB_PORT_INIT;
10506 return IB_PORT_ARMED;
10507 case HLS_UP_ACTIVE:
10508 return IB_PORT_ACTIVE;
10510 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10511 ppd->host_link_state);
10516 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10517 u8 neigh_reason, u8 rem_reason)
10519 if (ppd->local_link_down_reason.latest == 0 &&
10520 ppd->neigh_link_down_reason.latest == 0) {
10521 ppd->local_link_down_reason.latest = lcl_reason;
10522 ppd->neigh_link_down_reason.latest = neigh_reason;
10523 ppd->remote_link_down_reason = rem_reason;
10528 * Verify if BCT for data VLs is non-zero.
10530 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10532 return !!ppd->actual_vls_operational;
10536 * Change the physical and/or logical link state.
10538 * Do not call this routine while inside an interrupt. It contains
10539 * calls to routines that can take multiple seconds to finish.
10541 * Returns 0 on success, -errno on failure.
10543 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10545 struct hfi1_devdata *dd = ppd->dd;
10546 struct ib_event event = {.device = NULL};
10548 int orig_new_state, poll_bounce;
10550 mutex_lock(&ppd->hls_lock);
10552 orig_new_state = state;
10553 if (state == HLS_DN_DOWNDEF)
10554 state = dd->link_default;
10556 /* interpret poll -> poll as a link bounce */
10557 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10558 state == HLS_DN_POLL;
10560 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10561 link_state_name(ppd->host_link_state),
10562 link_state_name(orig_new_state),
10563 poll_bounce ? "(bounce) " : "",
10564 link_state_reason_name(ppd, state));
10567 * If we're going to a (HLS_*) link state that implies the logical
10568 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10569 * reset is_sm_config_started to 0.
10571 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10572 ppd->is_sm_config_started = 0;
10575 * Do nothing if the states match. Let a poll to poll link bounce
10578 if (ppd->host_link_state == state && !poll_bounce)
10583 if (ppd->host_link_state == HLS_DN_POLL &&
10584 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10586 * Quick link up jumps from polling to here.
10588 * Whether in normal or loopback mode, the
10589 * simulator jumps from polling to link up.
10590 * Accept that here.
10593 } else if (ppd->host_link_state != HLS_GOING_UP) {
10598 * Wait for Link_Up physical state.
10599 * Physical and Logical states should already be
10600 * be transitioned to LinkUp and LinkInit respectively.
10602 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10605 "%s: physical state did not change to LINK-UP\n",
10610 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10613 "%s: logical state did not change to INIT\n",
10618 /* clear old transient LINKINIT_REASON code */
10619 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10620 ppd->linkinit_reason =
10621 OPA_LINKINIT_REASON_LINKUP;
10623 /* enable the port */
10624 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10626 handle_linkup_change(dd, 1);
10627 ppd->host_link_state = HLS_UP_INIT;
10630 if (ppd->host_link_state != HLS_UP_INIT)
10633 if (!data_vls_operational(ppd)) {
10635 "%s: data VLs not operational\n", __func__);
10640 set_logical_state(dd, LSTATE_ARMED);
10641 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10644 "%s: logical state did not change to ARMED\n",
10648 ppd->host_link_state = HLS_UP_ARMED;
10650 * The simulator does not currently implement SMA messages,
10651 * so neighbor_normal is not set. Set it here when we first
10654 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10655 ppd->neighbor_normal = 1;
10657 case HLS_UP_ACTIVE:
10658 if (ppd->host_link_state != HLS_UP_ARMED)
10661 set_logical_state(dd, LSTATE_ACTIVE);
10662 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10665 "%s: logical state did not change to ACTIVE\n",
10668 /* tell all engines to go running */
10669 sdma_all_running(dd);
10670 ppd->host_link_state = HLS_UP_ACTIVE;
10672 /* Signal the IB layer that the port has went active */
10673 event.device = &dd->verbs_dev.rdi.ibdev;
10674 event.element.port_num = ppd->port;
10675 event.event = IB_EVENT_PORT_ACTIVE;
10679 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10680 ppd->host_link_state == HLS_DN_OFFLINE) &&
10683 /* Hand LED control to the DC */
10684 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10686 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10687 u8 tmp = ppd->link_enabled;
10689 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10691 ppd->link_enabled = tmp;
10694 ppd->remote_link_down_reason = 0;
10696 if (ppd->driver_link_ready)
10697 ppd->link_enabled = 1;
10700 set_all_slowpath(ppd->dd);
10701 ret = set_local_link_attributes(ppd);
10705 ppd->port_error_action = 0;
10706 ppd->host_link_state = HLS_DN_POLL;
10708 if (quick_linkup) {
10709 /* quick linkup does not go into polling */
10710 ret = do_quick_linkup(dd);
10712 ret1 = set_physical_link_state(dd, PLS_POLLING);
10713 if (ret1 != HCMD_SUCCESS) {
10715 "Failed to transition to Polling link state, return 0x%x\n",
10720 ppd->offline_disabled_reason =
10721 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10723 * If an error occurred above, go back to offline. The
10724 * caller may reschedule another attempt.
10727 goto_offline(ppd, 0);
10729 log_physical_state(ppd, PLS_POLLING);
10731 case HLS_DN_DISABLE:
10732 /* link is disabled */
10733 ppd->link_enabled = 0;
10735 /* allow any state to transition to disabled */
10737 /* must transition to offline first */
10738 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10739 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10742 ppd->remote_link_down_reason = 0;
10745 if (!dd->dc_shutdown) {
10746 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10747 if (ret1 != HCMD_SUCCESS) {
10749 "Failed to transition to Disabled link state, return 0x%x\n",
10754 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10757 "%s: physical state did not change to DISABLED\n",
10763 ppd->host_link_state = HLS_DN_DISABLE;
10765 case HLS_DN_OFFLINE:
10766 if (ppd->host_link_state == HLS_DN_DISABLE)
10769 /* allow any state to transition to offline */
10770 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10772 ppd->remote_link_down_reason = 0;
10774 case HLS_VERIFY_CAP:
10775 if (ppd->host_link_state != HLS_DN_POLL)
10777 ppd->host_link_state = HLS_VERIFY_CAP;
10778 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10781 if (ppd->host_link_state != HLS_VERIFY_CAP)
10784 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10785 if (ret1 != HCMD_SUCCESS) {
10787 "Failed to transition to link up state, return 0x%x\n",
10792 ppd->host_link_state = HLS_GOING_UP;
10795 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10796 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10798 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10807 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10808 __func__, link_state_name(ppd->host_link_state),
10809 link_state_name(state));
10813 mutex_unlock(&ppd->hls_lock);
10816 ib_dispatch_event(&event);
10821 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10827 case HFI1_IB_CFG_LIDLMC:
10830 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10832 * The VL Arbitrator high limit is sent in units of 4k
10833 * bytes, while HFI stores it in units of 64 bytes.
10836 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10837 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10838 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10840 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10841 /* HFI only supports POLL as the default link down state */
10842 if (val != HLS_DN_POLL)
10845 case HFI1_IB_CFG_OP_VLS:
10846 if (ppd->vls_operational != val) {
10847 ppd->vls_operational = val;
10853 * For link width, link width downgrade, and speed enable, always AND
10854 * the setting with what is actually supported. This has two benefits.
10855 * First, enabled can't have unsupported values, no matter what the
10856 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10857 * "fill in with your supported value" have all the bits in the
10858 * field set, so simply ANDing with supported has the desired result.
10860 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10861 ppd->link_width_enabled = val & ppd->link_width_supported;
10863 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10864 ppd->link_width_downgrade_enabled =
10865 val & ppd->link_width_downgrade_supported;
10867 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10868 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10870 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10872 * HFI does not follow IB specs, save this value
10873 * so we can report it, if asked.
10875 ppd->overrun_threshold = val;
10877 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10879 * HFI does not follow IB specs, save this value
10880 * so we can report it, if asked.
10882 ppd->phy_error_threshold = val;
10885 case HFI1_IB_CFG_MTU:
10886 set_send_length(ppd);
10889 case HFI1_IB_CFG_PKEYS:
10890 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10891 set_partition_keys(ppd);
10895 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10896 dd_dev_info(ppd->dd,
10897 "%s: which %s, val 0x%x: not implemented\n",
10898 __func__, ib_cfg_name(which), val);
10904 /* begin functions related to vl arbitration table caching */
10905 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10909 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10910 VL_ARB_LOW_PRIO_TABLE_SIZE);
10911 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10912 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10915 * Note that we always return values directly from the
10916 * 'vl_arb_cache' (and do no CSR reads) in response to a
10917 * 'Get(VLArbTable)'. This is obviously correct after a
10918 * 'Set(VLArbTable)', since the cache will then be up to
10919 * date. But it's also correct prior to any 'Set(VLArbTable)'
10920 * since then both the cache, and the relevant h/w registers
10924 for (i = 0; i < MAX_PRIO_TABLE; i++)
10925 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10929 * vl_arb_lock_cache
10931 * All other vl_arb_* functions should be called only after locking
10934 static inline struct vl_arb_cache *
10935 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10937 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10939 spin_lock(&ppd->vl_arb_cache[idx].lock);
10940 return &ppd->vl_arb_cache[idx];
10943 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10945 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10948 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10949 struct ib_vl_weight_elem *vl)
10951 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10954 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10955 struct ib_vl_weight_elem *vl)
10957 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10960 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10961 struct ib_vl_weight_elem *vl)
10963 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10966 /* end functions related to vl arbitration table caching */
10968 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10969 u32 size, struct ib_vl_weight_elem *vl)
10971 struct hfi1_devdata *dd = ppd->dd;
10973 unsigned int i, is_up = 0;
10974 int drain, ret = 0;
10976 mutex_lock(&ppd->hls_lock);
10978 if (ppd->host_link_state & HLS_UP)
10981 drain = !is_ax(dd) && is_up;
10985 * Before adjusting VL arbitration weights, empty per-VL
10986 * FIFOs, otherwise a packet whose VL weight is being
10987 * set to 0 could get stuck in a FIFO with no chance to
10990 ret = stop_drain_data_vls(dd);
10995 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11000 for (i = 0; i < size; i++, vl++) {
11002 * NOTE: The low priority shift and mask are used here, but
11003 * they are the same for both the low and high registers.
11005 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11006 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11007 | (((u64)vl->weight
11008 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11009 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11010 write_csr(dd, target + (i * 8), reg);
11012 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11015 open_fill_data_vls(dd); /* reopen all VLs */
11018 mutex_unlock(&ppd->hls_lock);
11024 * Read one credit merge VL register.
11026 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11027 struct vl_limit *vll)
11029 u64 reg = read_csr(dd, csr);
11031 vll->dedicated = cpu_to_be16(
11032 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11033 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11034 vll->shared = cpu_to_be16(
11035 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11036 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11040 * Read the current credit merge limits.
11042 static int get_buffer_control(struct hfi1_devdata *dd,
11043 struct buffer_control *bc, u16 *overall_limit)
11048 /* not all entries are filled in */
11049 memset(bc, 0, sizeof(*bc));
11051 /* OPA and HFI have a 1-1 mapping */
11052 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11053 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11055 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11056 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11058 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11059 bc->overall_shared_limit = cpu_to_be16(
11060 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11061 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11063 *overall_limit = (reg
11064 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11065 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11066 return sizeof(struct buffer_control);
11069 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11074 /* each register contains 16 SC->VLnt mappings, 4 bits each */
11075 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11076 for (i = 0; i < sizeof(u64); i++) {
11077 u8 byte = *(((u8 *)®) + i);
11079 dp->vlnt[2 * i] = byte & 0xf;
11080 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11083 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11084 for (i = 0; i < sizeof(u64); i++) {
11085 u8 byte = *(((u8 *)®) + i);
11087 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11088 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11090 return sizeof(struct sc2vlnt);
11093 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11094 struct ib_vl_weight_elem *vl)
11098 for (i = 0; i < nelems; i++, vl++) {
11104 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11106 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11108 0, dp->vlnt[0] & 0xf,
11109 1, dp->vlnt[1] & 0xf,
11110 2, dp->vlnt[2] & 0xf,
11111 3, dp->vlnt[3] & 0xf,
11112 4, dp->vlnt[4] & 0xf,
11113 5, dp->vlnt[5] & 0xf,
11114 6, dp->vlnt[6] & 0xf,
11115 7, dp->vlnt[7] & 0xf,
11116 8, dp->vlnt[8] & 0xf,
11117 9, dp->vlnt[9] & 0xf,
11118 10, dp->vlnt[10] & 0xf,
11119 11, dp->vlnt[11] & 0xf,
11120 12, dp->vlnt[12] & 0xf,
11121 13, dp->vlnt[13] & 0xf,
11122 14, dp->vlnt[14] & 0xf,
11123 15, dp->vlnt[15] & 0xf));
11124 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11125 DC_SC_VL_VAL(31_16,
11126 16, dp->vlnt[16] & 0xf,
11127 17, dp->vlnt[17] & 0xf,
11128 18, dp->vlnt[18] & 0xf,
11129 19, dp->vlnt[19] & 0xf,
11130 20, dp->vlnt[20] & 0xf,
11131 21, dp->vlnt[21] & 0xf,
11132 22, dp->vlnt[22] & 0xf,
11133 23, dp->vlnt[23] & 0xf,
11134 24, dp->vlnt[24] & 0xf,
11135 25, dp->vlnt[25] & 0xf,
11136 26, dp->vlnt[26] & 0xf,
11137 27, dp->vlnt[27] & 0xf,
11138 28, dp->vlnt[28] & 0xf,
11139 29, dp->vlnt[29] & 0xf,
11140 30, dp->vlnt[30] & 0xf,
11141 31, dp->vlnt[31] & 0xf));
11144 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11148 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11149 what, (int)limit, idx);
11152 /* change only the shared limit portion of SendCmGLobalCredit */
11153 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11157 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11158 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11159 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11160 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11163 /* change only the total credit limit portion of SendCmGLobalCredit */
11164 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11168 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11169 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11170 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11171 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11174 /* set the given per-VL shared limit */
11175 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11180 if (vl < TXE_NUM_DATA_VL)
11181 addr = SEND_CM_CREDIT_VL + (8 * vl);
11183 addr = SEND_CM_CREDIT_VL15;
11185 reg = read_csr(dd, addr);
11186 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11187 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11188 write_csr(dd, addr, reg);
11191 /* set the given per-VL dedicated limit */
11192 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11197 if (vl < TXE_NUM_DATA_VL)
11198 addr = SEND_CM_CREDIT_VL + (8 * vl);
11200 addr = SEND_CM_CREDIT_VL15;
11202 reg = read_csr(dd, addr);
11203 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11204 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11205 write_csr(dd, addr, reg);
11208 /* spin until the given per-VL status mask bits clear */
11209 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11212 unsigned long timeout;
11215 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11217 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11220 return; /* success */
11221 if (time_after(jiffies, timeout))
11222 break; /* timed out */
11227 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11228 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11230 * If this occurs, it is likely there was a credit loss on the link.
11231 * The only recovery from that is a link bounce.
11234 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
11238 * The number of credits on the VLs may be changed while everything
11239 * is "live", but the following algorithm must be followed due to
11240 * how the hardware is actually implemented. In particular,
11241 * Return_Credit_Status[] is the only correct status check.
11243 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11244 * set Global_Shared_Credit_Limit = 0
11246 * mask0 = all VLs that are changing either dedicated or shared limits
11247 * set Shared_Limit[mask0] = 0
11248 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11249 * if (changing any dedicated limit)
11250 * mask1 = all VLs that are lowering dedicated limits
11251 * lower Dedicated_Limit[mask1]
11252 * spin until Return_Credit_Status[mask1] == 0
11253 * raise Dedicated_Limits
11254 * raise Shared_Limits
11255 * raise Global_Shared_Credit_Limit
11257 * lower = if the new limit is lower, set the limit to the new value
11258 * raise = if the new limit is higher than the current value (may be changed
11259 * earlier in the algorithm), set the new limit to the new value
11261 int set_buffer_control(struct hfi1_pportdata *ppd,
11262 struct buffer_control *new_bc)
11264 struct hfi1_devdata *dd = ppd->dd;
11265 u64 changing_mask, ld_mask, stat_mask;
11267 int i, use_all_mask;
11268 int this_shared_changing;
11269 int vl_count = 0, ret;
11271 * A0: add the variable any_shared_limit_changing below and in the
11272 * algorithm above. If removing A0 support, it can be removed.
11274 int any_shared_limit_changing;
11275 struct buffer_control cur_bc;
11276 u8 changing[OPA_MAX_VLS];
11277 u8 lowering_dedicated[OPA_MAX_VLS];
11280 const u64 all_mask =
11281 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11282 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11283 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11284 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11285 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11286 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11287 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11288 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11289 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11291 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11292 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
11294 /* find the new total credits, do sanity check on unused VLs */
11295 for (i = 0; i < OPA_MAX_VLS; i++) {
11297 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11300 nonzero_msg(dd, i, "dedicated",
11301 be16_to_cpu(new_bc->vl[i].dedicated));
11302 nonzero_msg(dd, i, "shared",
11303 be16_to_cpu(new_bc->vl[i].shared));
11304 new_bc->vl[i].dedicated = 0;
11305 new_bc->vl[i].shared = 0;
11307 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11309 /* fetch the current values */
11310 get_buffer_control(dd, &cur_bc, &cur_total);
11313 * Create the masks we will use.
11315 memset(changing, 0, sizeof(changing));
11316 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11318 * NOTE: Assumes that the individual VL bits are adjacent and in
11322 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11326 any_shared_limit_changing = 0;
11327 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11330 this_shared_changing = new_bc->vl[i].shared
11331 != cur_bc.vl[i].shared;
11332 if (this_shared_changing)
11333 any_shared_limit_changing = 1;
11334 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11335 this_shared_changing) {
11337 changing_mask |= stat_mask;
11340 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11341 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11342 lowering_dedicated[i] = 1;
11343 ld_mask |= stat_mask;
11347 /* bracket the credit change with a total adjustment */
11348 if (new_total > cur_total)
11349 set_global_limit(dd, new_total);
11352 * Start the credit change algorithm.
11355 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11356 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11357 (is_ax(dd) && any_shared_limit_changing)) {
11358 set_global_shared(dd, 0);
11359 cur_bc.overall_shared_limit = 0;
11363 for (i = 0; i < NUM_USABLE_VLS; i++) {
11368 set_vl_shared(dd, i, 0);
11369 cur_bc.vl[i].shared = 0;
11373 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11376 if (change_count > 0) {
11377 for (i = 0; i < NUM_USABLE_VLS; i++) {
11381 if (lowering_dedicated[i]) {
11382 set_vl_dedicated(dd, i,
11383 be16_to_cpu(new_bc->
11385 cur_bc.vl[i].dedicated =
11386 new_bc->vl[i].dedicated;
11390 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11392 /* now raise all dedicated that are going up */
11393 for (i = 0; i < NUM_USABLE_VLS; i++) {
11397 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11398 be16_to_cpu(cur_bc.vl[i].dedicated))
11399 set_vl_dedicated(dd, i,
11400 be16_to_cpu(new_bc->
11405 /* next raise all shared that are going up */
11406 for (i = 0; i < NUM_USABLE_VLS; i++) {
11410 if (be16_to_cpu(new_bc->vl[i].shared) >
11411 be16_to_cpu(cur_bc.vl[i].shared))
11412 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11415 /* finally raise the global shared */
11416 if (be16_to_cpu(new_bc->overall_shared_limit) >
11417 be16_to_cpu(cur_bc.overall_shared_limit))
11418 set_global_shared(dd,
11419 be16_to_cpu(new_bc->overall_shared_limit));
11421 /* bracket the credit change with a total adjustment */
11422 if (new_total < cur_total)
11423 set_global_limit(dd, new_total);
11426 * Determine the actual number of operational VLS using the number of
11427 * dedicated and shared credits for each VL.
11429 if (change_count > 0) {
11430 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11431 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11432 be16_to_cpu(new_bc->vl[i].shared) > 0)
11434 ppd->actual_vls_operational = vl_count;
11435 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11436 ppd->actual_vls_operational :
11437 ppd->vls_operational,
11440 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11441 ppd->actual_vls_operational :
11442 ppd->vls_operational, NULL);
11450 * Read the given fabric manager table. Return the size of the
11451 * table (in bytes) on success, and a negative error code on
11454 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11458 struct vl_arb_cache *vlc;
11461 case FM_TBL_VL_HIGH_ARB:
11464 * OPA specifies 128 elements (of 2 bytes each), though
11465 * HFI supports only 16 elements in h/w.
11467 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11468 vl_arb_get_cache(vlc, t);
11469 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11471 case FM_TBL_VL_LOW_ARB:
11474 * OPA specifies 128 elements (of 2 bytes each), though
11475 * HFI supports only 16 elements in h/w.
11477 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11478 vl_arb_get_cache(vlc, t);
11479 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11481 case FM_TBL_BUFFER_CONTROL:
11482 size = get_buffer_control(ppd->dd, t, NULL);
11484 case FM_TBL_SC2VLNT:
11485 size = get_sc2vlnt(ppd->dd, t);
11487 case FM_TBL_VL_PREEMPT_ELEMS:
11489 /* OPA specifies 128 elements, of 2 bytes each */
11490 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11492 case FM_TBL_VL_PREEMPT_MATRIX:
11495 * OPA specifies that this is the same size as the VL
11496 * arbitration tables (i.e., 256 bytes).
11506 * Write the given fabric manager table.
11508 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11511 struct vl_arb_cache *vlc;
11514 case FM_TBL_VL_HIGH_ARB:
11515 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11516 if (vl_arb_match_cache(vlc, t)) {
11517 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11520 vl_arb_set_cache(vlc, t);
11521 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11522 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11523 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11525 case FM_TBL_VL_LOW_ARB:
11526 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11527 if (vl_arb_match_cache(vlc, t)) {
11528 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11531 vl_arb_set_cache(vlc, t);
11532 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11533 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11534 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11536 case FM_TBL_BUFFER_CONTROL:
11537 ret = set_buffer_control(ppd, t);
11539 case FM_TBL_SC2VLNT:
11540 set_sc2vlnt(ppd->dd, t);
11549 * Disable all data VLs.
11551 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11553 static int disable_data_vls(struct hfi1_devdata *dd)
11558 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11564 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11565 * Just re-enables all data VLs (the "fill" part happens
11566 * automatically - the name was chosen for symmetry with
11567 * stop_drain_data_vls()).
11569 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11571 int open_fill_data_vls(struct hfi1_devdata *dd)
11576 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11582 * drain_data_vls() - assumes that disable_data_vls() has been called,
11583 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11584 * engines to drop to 0.
11586 static void drain_data_vls(struct hfi1_devdata *dd)
11590 pause_for_credit_return(dd);
11594 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11596 * Use open_fill_data_vls() to resume using data VLs. This pair is
11597 * meant to be used like this:
11599 * stop_drain_data_vls(dd);
11600 * // do things with per-VL resources
11601 * open_fill_data_vls(dd);
11603 int stop_drain_data_vls(struct hfi1_devdata *dd)
11607 ret = disable_data_vls(dd);
11609 drain_data_vls(dd);
11615 * Convert a nanosecond time to a cclock count. No matter how slow
11616 * the cclock, a non-zero ns will always have a non-zero result.
11618 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11622 if (dd->icode == ICODE_FPGA_EMULATION)
11623 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11624 else /* simulation pretends to be ASIC */
11625 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11626 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11632 * Convert a cclock count to nanoseconds. Not matter how slow
11633 * the cclock, a non-zero cclocks will always have a non-zero result.
11635 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11639 if (dd->icode == ICODE_FPGA_EMULATION)
11640 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11641 else /* simulation pretends to be ASIC */
11642 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11643 if (cclocks && !ns)
11649 * Dynamically adjust the receive interrupt timeout for a context based on
11650 * incoming packet rate.
11652 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11654 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11656 struct hfi1_devdata *dd = rcd->dd;
11657 u32 timeout = rcd->rcvavail_timeout;
11660 * This algorithm doubles or halves the timeout depending on whether
11661 * the number of packets received in this interrupt were less than or
11662 * greater equal the interrupt count.
11664 * The calculations below do not allow a steady state to be achieved.
11665 * Only at the endpoints it is possible to have an unchanging
11668 if (npkts < rcv_intr_count) {
11670 * Not enough packets arrived before the timeout, adjust
11671 * timeout downward.
11673 if (timeout < 2) /* already at minimum? */
11678 * More than enough packets arrived before the timeout, adjust
11681 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11683 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11686 rcd->rcvavail_timeout = timeout;
11688 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11689 * been verified to be in range
11691 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11693 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11696 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11697 u32 intr_adjust, u32 npkts)
11699 struct hfi1_devdata *dd = rcd->dd;
11701 u32 ctxt = rcd->ctxt;
11704 * Need to write timeout register before updating RcvHdrHead to ensure
11705 * that a new value is used when the HW decides to restart counting.
11708 adjust_rcv_timeout(rcd, npkts);
11710 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11711 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11712 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11715 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11716 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11717 << RCV_HDR_HEAD_HEAD_SHIFT);
11718 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11722 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11726 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11727 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11729 if (rcd->rcvhdrtail_kvaddr)
11730 tail = get_rcvhdrtail(rcd);
11732 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11734 return head == tail;
11738 * Context Control and Receive Array encoding for buffer size:
11747 * 0x8 512 KB (Receive Array only)
11748 * 0x9 1 MB (Receive Array only)
11749 * 0xa 2 MB (Receive Array only)
11751 * 0xB-0xF - reserved (Receive Array only)
11754 * This routine assumes that the value has already been sanity checked.
11756 static u32 encoded_size(u32 size)
11759 case 4 * 1024: return 0x1;
11760 case 8 * 1024: return 0x2;
11761 case 16 * 1024: return 0x3;
11762 case 32 * 1024: return 0x4;
11763 case 64 * 1024: return 0x5;
11764 case 128 * 1024: return 0x6;
11765 case 256 * 1024: return 0x7;
11766 case 512 * 1024: return 0x8;
11767 case 1 * 1024 * 1024: return 0x9;
11768 case 2 * 1024 * 1024: return 0xa;
11770 return 0x1; /* if invalid, go with the minimum size */
11773 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11774 struct hfi1_ctxtdata *rcd)
11777 int did_enable = 0;
11785 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11787 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11788 /* if the context already enabled, don't do the extra steps */
11789 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11790 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11791 /* reset the tail and hdr addresses, and sequence count */
11792 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11794 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11795 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11796 rcd->rcvhdrqtailaddr_dma);
11799 /* reset the cached receive header queue head value */
11803 * Zero the receive header queue so we don't get false
11804 * positives when checking the sequence number. The
11805 * sequence numbers could land exactly on the same spot.
11806 * E.g. a rcd restart before the receive header wrapped.
11808 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11810 /* starting timeout */
11811 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11813 /* enable the context */
11814 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11816 /* clean the egr buffer size first */
11817 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11818 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11819 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11820 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11822 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11823 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11826 /* zero RcvEgrIndexHead */
11827 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11829 /* set eager count and base index */
11830 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11831 & RCV_EGR_CTRL_EGR_CNT_MASK)
11832 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11833 (((rcd->eager_base >> RCV_SHIFT)
11834 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11835 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11836 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11839 * Set TID (expected) count and base index.
11840 * rcd->expected_count is set to individual RcvArray entries,
11841 * not pairs, and the CSR takes a pair-count in groups of
11842 * four, so divide by 8.
11844 reg = (((rcd->expected_count >> RCV_SHIFT)
11845 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11846 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11847 (((rcd->expected_base >> RCV_SHIFT)
11848 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11849 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11850 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11851 if (ctxt == HFI1_CTRL_CTXT)
11852 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11854 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11855 write_csr(dd, RCV_VL15, 0);
11857 * When receive context is being disabled turn on tail
11858 * update with a dummy tail address and then disable
11861 if (dd->rcvhdrtail_dummy_dma) {
11862 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11863 dd->rcvhdrtail_dummy_dma);
11864 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11865 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11868 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11870 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11871 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11872 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11873 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11874 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
11875 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11876 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11877 /* See comment on RcvCtxtCtrl.TailUpd above */
11878 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11879 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11881 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11882 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11883 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11884 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11885 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11887 * In one-packet-per-eager mode, the size comes from
11888 * the RcvArray entry.
11890 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11891 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11893 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11894 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11895 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11896 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11897 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11898 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11899 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11900 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11901 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11902 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11903 rcd->rcvctrl = rcvctrl;
11904 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11905 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11907 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11909 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11910 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11912 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11914 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11915 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11916 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11917 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11918 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11919 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11920 ctxt, reg, reg == 0 ? "not" : "still");
11926 * The interrupt timeout and count must be set after
11927 * the context is enabled to take effect.
11929 /* set interrupt timeout */
11930 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11931 (u64)rcd->rcvavail_timeout <<
11932 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11934 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11935 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11936 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11939 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11941 * If the context has been disabled and the Tail Update has
11942 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11943 * so it doesn't contain an address that is invalid.
11945 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11946 dd->rcvhdrtail_dummy_dma);
11949 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11955 ret = dd->cntrnameslen;
11956 *namep = dd->cntrnames;
11958 const struct cntr_entry *entry;
11961 ret = (dd->ndevcntrs) * sizeof(u64);
11963 /* Get the start of the block of counters */
11964 *cntrp = dd->cntrs;
11967 * Now go and fill in each counter in the block.
11969 for (i = 0; i < DEV_CNTR_LAST; i++) {
11970 entry = &dev_cntrs[i];
11971 hfi1_cdbg(CNTR, "reading %s", entry->name);
11972 if (entry->flags & CNTR_DISABLED) {
11974 hfi1_cdbg(CNTR, "\tDisabled\n");
11976 if (entry->flags & CNTR_VL) {
11977 hfi1_cdbg(CNTR, "\tPer VL\n");
11978 for (j = 0; j < C_VL_COUNT; j++) {
11979 val = entry->rw_cntr(entry,
11985 "\t\tRead 0x%llx for %d\n",
11987 dd->cntrs[entry->offset + j] =
11990 } else if (entry->flags & CNTR_SDMA) {
11992 "\t Per SDMA Engine\n");
11993 for (j = 0; j < dd->chip_sdma_engines;
11996 entry->rw_cntr(entry, dd, j,
11999 "\t\tRead 0x%llx for %d\n",
12001 dd->cntrs[entry->offset + j] =
12005 val = entry->rw_cntr(entry, dd,
12008 dd->cntrs[entry->offset] = val;
12009 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12018 * Used by sysfs to create files for hfi stats to read
12020 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12026 ret = ppd->dd->portcntrnameslen;
12027 *namep = ppd->dd->portcntrnames;
12029 const struct cntr_entry *entry;
12032 ret = ppd->dd->nportcntrs * sizeof(u64);
12033 *cntrp = ppd->cntrs;
12035 for (i = 0; i < PORT_CNTR_LAST; i++) {
12036 entry = &port_cntrs[i];
12037 hfi1_cdbg(CNTR, "reading %s", entry->name);
12038 if (entry->flags & CNTR_DISABLED) {
12040 hfi1_cdbg(CNTR, "\tDisabled\n");
12044 if (entry->flags & CNTR_VL) {
12045 hfi1_cdbg(CNTR, "\tPer VL");
12046 for (j = 0; j < C_VL_COUNT; j++) {
12047 val = entry->rw_cntr(entry, ppd, j,
12052 "\t\tRead 0x%llx for %d",
12054 ppd->cntrs[entry->offset + j] = val;
12057 val = entry->rw_cntr(entry, ppd,
12061 ppd->cntrs[entry->offset] = val;
12062 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12069 static void free_cntrs(struct hfi1_devdata *dd)
12071 struct hfi1_pportdata *ppd;
12074 if (dd->synth_stats_timer.data)
12075 del_timer_sync(&dd->synth_stats_timer);
12076 dd->synth_stats_timer.data = 0;
12077 ppd = (struct hfi1_pportdata *)(dd + 1);
12078 for (i = 0; i < dd->num_pports; i++, ppd++) {
12080 kfree(ppd->scntrs);
12081 free_percpu(ppd->ibport_data.rvp.rc_acks);
12082 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12083 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12085 ppd->scntrs = NULL;
12086 ppd->ibport_data.rvp.rc_acks = NULL;
12087 ppd->ibport_data.rvp.rc_qacks = NULL;
12088 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12090 kfree(dd->portcntrnames);
12091 dd->portcntrnames = NULL;
12096 kfree(dd->cntrnames);
12097 dd->cntrnames = NULL;
12098 if (dd->update_cntr_wq) {
12099 destroy_workqueue(dd->update_cntr_wq);
12100 dd->update_cntr_wq = NULL;
12104 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12105 u64 *psval, void *context, int vl)
12110 if (entry->flags & CNTR_DISABLED) {
12111 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12115 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12117 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12119 /* If its a synthetic counter there is more work we need to do */
12120 if (entry->flags & CNTR_SYNTH) {
12121 if (sval == CNTR_MAX) {
12122 /* No need to read already saturated */
12126 if (entry->flags & CNTR_32BIT) {
12127 /* 32bit counters can wrap multiple times */
12128 u64 upper = sval >> 32;
12129 u64 lower = (sval << 32) >> 32;
12131 if (lower > val) { /* hw wrapped */
12132 if (upper == CNTR_32BIT_MAX)
12138 if (val != CNTR_MAX)
12139 val = (upper << 32) | val;
12142 /* If we rolled we are saturated */
12143 if ((val < sval) || (val > CNTR_MAX))
12150 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12155 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12156 struct cntr_entry *entry,
12157 u64 *psval, void *context, int vl, u64 data)
12161 if (entry->flags & CNTR_DISABLED) {
12162 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12166 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12168 if (entry->flags & CNTR_SYNTH) {
12170 if (entry->flags & CNTR_32BIT) {
12171 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12172 (data << 32) >> 32);
12173 val = data; /* return the full 64bit value */
12175 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12179 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12184 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12189 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12191 struct cntr_entry *entry;
12194 entry = &dev_cntrs[index];
12195 sval = dd->scntrs + entry->offset;
12197 if (vl != CNTR_INVALID_VL)
12200 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12203 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12205 struct cntr_entry *entry;
12208 entry = &dev_cntrs[index];
12209 sval = dd->scntrs + entry->offset;
12211 if (vl != CNTR_INVALID_VL)
12214 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12217 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12219 struct cntr_entry *entry;
12222 entry = &port_cntrs[index];
12223 sval = ppd->scntrs + entry->offset;
12225 if (vl != CNTR_INVALID_VL)
12228 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12229 (index <= C_RCV_HDR_OVF_LAST)) {
12230 /* We do not want to bother for disabled contexts */
12234 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12237 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12239 struct cntr_entry *entry;
12242 entry = &port_cntrs[index];
12243 sval = ppd->scntrs + entry->offset;
12245 if (vl != CNTR_INVALID_VL)
12248 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12249 (index <= C_RCV_HDR_OVF_LAST)) {
12250 /* We do not want to bother for disabled contexts */
12254 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12257 static void do_update_synth_timer(struct work_struct *work)
12264 struct hfi1_pportdata *ppd;
12265 struct cntr_entry *entry;
12266 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12270 * Rather than keep beating on the CSRs pick a minimal set that we can
12271 * check to watch for potential roll over. We can do this by looking at
12272 * the number of flits sent/recv. If the total flits exceeds 32bits then
12273 * we have to iterate all the counters and update.
12275 entry = &dev_cntrs[C_DC_RCV_FLITS];
12276 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12278 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12279 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12283 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12284 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12286 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12288 * May not be strictly necessary to update but it won't hurt and
12289 * simplifies the logic here.
12292 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12295 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12297 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12298 total_flits, (u64)CNTR_32BIT_MAX);
12299 if (total_flits >= CNTR_32BIT_MAX) {
12300 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12307 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12308 for (i = 0; i < DEV_CNTR_LAST; i++) {
12309 entry = &dev_cntrs[i];
12310 if (entry->flags & CNTR_VL) {
12311 for (vl = 0; vl < C_VL_COUNT; vl++)
12312 read_dev_cntr(dd, i, vl);
12314 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12317 ppd = (struct hfi1_pportdata *)(dd + 1);
12318 for (i = 0; i < dd->num_pports; i++, ppd++) {
12319 for (j = 0; j < PORT_CNTR_LAST; j++) {
12320 entry = &port_cntrs[j];
12321 if (entry->flags & CNTR_VL) {
12322 for (vl = 0; vl < C_VL_COUNT; vl++)
12323 read_port_cntr(ppd, j, vl);
12325 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12331 * We want the value in the register. The goal is to keep track
12332 * of the number of "ticks" not the counter value. In other
12333 * words if the register rolls we want to notice it and go ahead
12334 * and force an update.
12336 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12337 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12340 entry = &dev_cntrs[C_DC_RCV_FLITS];
12341 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12344 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12345 dd->unit, dd->last_tx, dd->last_rx);
12348 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12352 static void update_synth_timer(unsigned long opaque)
12354 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12356 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12357 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12360 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12361 static int init_cntrs(struct hfi1_devdata *dd)
12363 int i, rcv_ctxts, j;
12366 char name[C_MAX_NAME];
12367 struct hfi1_pportdata *ppd;
12368 const char *bit_type_32 = ",32";
12369 const int bit_type_32_sz = strlen(bit_type_32);
12371 /* set up the stats timer; the add_timer is done at the end */
12372 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12373 (unsigned long)dd);
12375 /***********************/
12376 /* per device counters */
12377 /***********************/
12379 /* size names and determine how many we have*/
12383 for (i = 0; i < DEV_CNTR_LAST; i++) {
12384 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12385 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12389 if (dev_cntrs[i].flags & CNTR_VL) {
12390 dev_cntrs[i].offset = dd->ndevcntrs;
12391 for (j = 0; j < C_VL_COUNT; j++) {
12392 snprintf(name, C_MAX_NAME, "%s%d",
12393 dev_cntrs[i].name, vl_from_idx(j));
12394 sz += strlen(name);
12395 /* Add ",32" for 32-bit counters */
12396 if (dev_cntrs[i].flags & CNTR_32BIT)
12397 sz += bit_type_32_sz;
12401 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12402 dev_cntrs[i].offset = dd->ndevcntrs;
12403 for (j = 0; j < dd->chip_sdma_engines; j++) {
12404 snprintf(name, C_MAX_NAME, "%s%d",
12405 dev_cntrs[i].name, j);
12406 sz += strlen(name);
12407 /* Add ",32" for 32-bit counters */
12408 if (dev_cntrs[i].flags & CNTR_32BIT)
12409 sz += bit_type_32_sz;
12414 /* +1 for newline. */
12415 sz += strlen(dev_cntrs[i].name) + 1;
12416 /* Add ",32" for 32-bit counters */
12417 if (dev_cntrs[i].flags & CNTR_32BIT)
12418 sz += bit_type_32_sz;
12419 dev_cntrs[i].offset = dd->ndevcntrs;
12424 /* allocate space for the counter values */
12425 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12429 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12433 /* allocate space for the counter names */
12434 dd->cntrnameslen = sz;
12435 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12436 if (!dd->cntrnames)
12439 /* fill in the names */
12440 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12441 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12443 } else if (dev_cntrs[i].flags & CNTR_VL) {
12444 for (j = 0; j < C_VL_COUNT; j++) {
12445 snprintf(name, C_MAX_NAME, "%s%d",
12448 memcpy(p, name, strlen(name));
12451 /* Counter is 32 bits */
12452 if (dev_cntrs[i].flags & CNTR_32BIT) {
12453 memcpy(p, bit_type_32, bit_type_32_sz);
12454 p += bit_type_32_sz;
12459 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12460 for (j = 0; j < dd->chip_sdma_engines; j++) {
12461 snprintf(name, C_MAX_NAME, "%s%d",
12462 dev_cntrs[i].name, j);
12463 memcpy(p, name, strlen(name));
12466 /* Counter is 32 bits */
12467 if (dev_cntrs[i].flags & CNTR_32BIT) {
12468 memcpy(p, bit_type_32, bit_type_32_sz);
12469 p += bit_type_32_sz;
12475 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12476 p += strlen(dev_cntrs[i].name);
12478 /* Counter is 32 bits */
12479 if (dev_cntrs[i].flags & CNTR_32BIT) {
12480 memcpy(p, bit_type_32, bit_type_32_sz);
12481 p += bit_type_32_sz;
12488 /*********************/
12489 /* per port counters */
12490 /*********************/
12493 * Go through the counters for the overflows and disable the ones we
12494 * don't need. This varies based on platform so we need to do it
12495 * dynamically here.
12497 rcv_ctxts = dd->num_rcv_contexts;
12498 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12499 i <= C_RCV_HDR_OVF_LAST; i++) {
12500 port_cntrs[i].flags |= CNTR_DISABLED;
12503 /* size port counter names and determine how many we have*/
12505 dd->nportcntrs = 0;
12506 for (i = 0; i < PORT_CNTR_LAST; i++) {
12507 if (port_cntrs[i].flags & CNTR_DISABLED) {
12508 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12512 if (port_cntrs[i].flags & CNTR_VL) {
12513 port_cntrs[i].offset = dd->nportcntrs;
12514 for (j = 0; j < C_VL_COUNT; j++) {
12515 snprintf(name, C_MAX_NAME, "%s%d",
12516 port_cntrs[i].name, vl_from_idx(j));
12517 sz += strlen(name);
12518 /* Add ",32" for 32-bit counters */
12519 if (port_cntrs[i].flags & CNTR_32BIT)
12520 sz += bit_type_32_sz;
12525 /* +1 for newline */
12526 sz += strlen(port_cntrs[i].name) + 1;
12527 /* Add ",32" for 32-bit counters */
12528 if (port_cntrs[i].flags & CNTR_32BIT)
12529 sz += bit_type_32_sz;
12530 port_cntrs[i].offset = dd->nportcntrs;
12535 /* allocate space for the counter names */
12536 dd->portcntrnameslen = sz;
12537 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12538 if (!dd->portcntrnames)
12541 /* fill in port cntr names */
12542 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12543 if (port_cntrs[i].flags & CNTR_DISABLED)
12546 if (port_cntrs[i].flags & CNTR_VL) {
12547 for (j = 0; j < C_VL_COUNT; j++) {
12548 snprintf(name, C_MAX_NAME, "%s%d",
12549 port_cntrs[i].name, vl_from_idx(j));
12550 memcpy(p, name, strlen(name));
12553 /* Counter is 32 bits */
12554 if (port_cntrs[i].flags & CNTR_32BIT) {
12555 memcpy(p, bit_type_32, bit_type_32_sz);
12556 p += bit_type_32_sz;
12562 memcpy(p, port_cntrs[i].name,
12563 strlen(port_cntrs[i].name));
12564 p += strlen(port_cntrs[i].name);
12566 /* Counter is 32 bits */
12567 if (port_cntrs[i].flags & CNTR_32BIT) {
12568 memcpy(p, bit_type_32, bit_type_32_sz);
12569 p += bit_type_32_sz;
12576 /* allocate per port storage for counter values */
12577 ppd = (struct hfi1_pportdata *)(dd + 1);
12578 for (i = 0; i < dd->num_pports; i++, ppd++) {
12579 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12583 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12588 /* CPU counters need to be allocated and zeroed */
12589 if (init_cpu_counters(dd))
12592 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12593 WQ_MEM_RECLAIM, dd->unit);
12594 if (!dd->update_cntr_wq)
12597 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12599 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12606 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12608 switch (chip_lstate) {
12611 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12615 return IB_PORT_DOWN;
12617 return IB_PORT_INIT;
12619 return IB_PORT_ARMED;
12620 case LSTATE_ACTIVE:
12621 return IB_PORT_ACTIVE;
12625 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12627 /* look at the HFI meta-states only */
12628 switch (chip_pstate & 0xf0) {
12630 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12634 return IB_PORTPHYSSTATE_DISABLED;
12636 return OPA_PORTPHYSSTATE_OFFLINE;
12638 return IB_PORTPHYSSTATE_POLLING;
12639 case PLS_CONFIGPHY:
12640 return IB_PORTPHYSSTATE_TRAINING;
12642 return IB_PORTPHYSSTATE_LINKUP;
12644 return IB_PORTPHYSSTATE_PHY_TEST;
12648 /* return the OPA port logical state name */
12649 const char *opa_lstate_name(u32 lstate)
12651 static const char * const port_logical_names[] = {
12657 "PORT_ACTIVE_DEFER",
12659 if (lstate < ARRAY_SIZE(port_logical_names))
12660 return port_logical_names[lstate];
12664 /* return the OPA port physical state name */
12665 const char *opa_pstate_name(u32 pstate)
12667 static const char * const port_physical_names[] = {
12674 "PHYS_LINK_ERR_RECOVER",
12681 if (pstate < ARRAY_SIZE(port_physical_names))
12682 return port_physical_names[pstate];
12686 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12689 * Set port status flags in the page mapped into userspace
12690 * memory. Do it here to ensure a reliable state - this is
12691 * the only function called by all state handling code.
12692 * Always set the flags due to the fact that the cache value
12693 * might have been changed explicitly outside of this
12696 if (ppd->statusp) {
12700 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12701 HFI1_STATUS_IB_READY);
12703 case IB_PORT_ARMED:
12704 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12706 case IB_PORT_ACTIVE:
12707 *ppd->statusp |= HFI1_STATUS_IB_READY;
12714 * wait_logical_linkstate - wait for an IB link state change to occur
12715 * @ppd: port device
12716 * @state: the state to wait for
12717 * @msecs: the number of milliseconds to wait
12719 * Wait up to msecs milliseconds for IB link state change to occur.
12720 * For now, take the easy polling route.
12721 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12723 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12726 unsigned long timeout;
12729 timeout = jiffies + msecs_to_jiffies(msecs);
12731 new_state = chip_to_opa_lstate(ppd->dd,
12732 read_logical_state(ppd->dd));
12733 if (new_state == state)
12735 if (time_after(jiffies, timeout)) {
12736 dd_dev_err(ppd->dd,
12737 "timeout waiting for link state 0x%x\n",
12744 update_statusp(ppd, state);
12745 dd_dev_info(ppd->dd,
12746 "logical state changed to %s (0x%x)\n",
12747 opa_lstate_name(state),
12752 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12754 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12756 dd_dev_info(ppd->dd,
12757 "physical state changed to %s (0x%x), phy 0x%x\n",
12758 opa_pstate_name(ib_pstate), ib_pstate, state);
12762 * Read the physical hardware link state and check if it matches host
12763 * drivers anticipated state.
12765 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12767 u32 read_state = read_physical_state(ppd->dd);
12769 if (read_state == state) {
12770 log_state_transition(ppd, state);
12772 dd_dev_err(ppd->dd,
12773 "anticipated phy link state 0x%x, read 0x%x\n",
12774 state, read_state);
12779 * wait_physical_linkstate - wait for an physical link state change to occur
12780 * @ppd: port device
12781 * @state: the state to wait for
12782 * @msecs: the number of milliseconds to wait
12784 * Wait up to msecs milliseconds for physical link state change to occur.
12785 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12787 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12791 unsigned long timeout;
12793 timeout = jiffies + msecs_to_jiffies(msecs);
12795 read_state = read_physical_state(ppd->dd);
12796 if (read_state == state)
12798 if (time_after(jiffies, timeout)) {
12799 dd_dev_err(ppd->dd,
12800 "timeout waiting for phy link state 0x%x\n",
12804 usleep_range(1950, 2050); /* sleep 2ms-ish */
12807 log_state_transition(ppd, state);
12811 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12812 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12814 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12815 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12817 void hfi1_init_ctxt(struct send_context *sc)
12820 struct hfi1_devdata *dd = sc->dd;
12822 u8 set = (sc->type == SC_USER ?
12823 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12824 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12825 reg = read_kctxt_csr(dd, sc->hw_context,
12826 SEND_CTXT_CHECK_ENABLE);
12828 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12830 SET_STATIC_RATE_CONTROL_SMASK(reg);
12831 write_kctxt_csr(dd, sc->hw_context,
12832 SEND_CTXT_CHECK_ENABLE, reg);
12836 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12841 if (dd->icode != ICODE_RTL_SILICON) {
12842 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12843 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12847 reg = read_csr(dd, ASIC_STS_THERM);
12848 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12849 ASIC_STS_THERM_CURR_TEMP_MASK);
12850 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12851 ASIC_STS_THERM_LO_TEMP_MASK);
12852 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12853 ASIC_STS_THERM_HI_TEMP_MASK);
12854 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12855 ASIC_STS_THERM_CRIT_TEMP_MASK);
12856 /* triggers is a 3-bit value - 1 bit per trigger. */
12857 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12862 /* ========================================================================= */
12865 * Enable/disable chip from delivering interrupts.
12867 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12872 * In HFI, the mask needs to be 1 to allow interrupts.
12875 /* enable all interrupts */
12876 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12877 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12881 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12882 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12887 * Clear all interrupt sources on the chip.
12889 static void clear_all_interrupts(struct hfi1_devdata *dd)
12893 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12894 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12896 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12897 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12898 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12899 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12900 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12901 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12902 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12903 for (i = 0; i < dd->chip_send_contexts; i++)
12904 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12905 for (i = 0; i < dd->chip_sdma_engines; i++)
12906 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12908 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12909 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12910 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12913 /* Move to pcie.c? */
12914 static void disable_intx(struct pci_dev *pdev)
12919 static void clean_up_interrupts(struct hfi1_devdata *dd)
12923 /* remove irqs - must happen before disabling/turning off */
12924 if (dd->num_msix_entries) {
12926 struct hfi1_msix_entry *me = dd->msix_entries;
12928 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12929 if (!me->arg) /* => no irq, no affinity */
12931 hfi1_put_irq_affinity(dd, me);
12932 pci_free_irq(dd->pcidev, i, me->arg);
12935 /* clean structures */
12936 kfree(dd->msix_entries);
12937 dd->msix_entries = NULL;
12938 dd->num_msix_entries = 0;
12941 if (dd->requested_intx_irq) {
12942 pci_free_irq(dd->pcidev, 0, dd);
12943 dd->requested_intx_irq = 0;
12945 disable_intx(dd->pcidev);
12948 pci_free_irq_vectors(dd->pcidev);
12952 * Remap the interrupt source from the general handler to the given MSI-X
12955 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12960 /* clear from the handled mask of the general interrupt */
12963 if (likely(m < CCE_NUM_INT_CSRS)) {
12964 dd->gi_mask[m] &= ~((u64)1 << n);
12966 dd_dev_err(dd, "remap interrupt err\n");
12970 /* direct the chip source to the given MSI-X interrupt */
12973 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12974 reg &= ~((u64)0xff << (8 * n));
12975 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12976 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12979 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12980 int engine, int msix_intr)
12983 * SDMA engine interrupt sources grouped by type, rather than
12984 * engine. Per-engine interrupts are as follows:
12989 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12991 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12993 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12997 static int request_intx_irq(struct hfi1_devdata *dd)
13001 ret = pci_request_irq(dd->pcidev, 0, general_interrupt, NULL, dd,
13002 DRIVER_NAME "_%d", dd->unit);
13004 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
13007 dd->requested_intx_irq = 1;
13011 static int request_msix_irqs(struct hfi1_devdata *dd)
13013 int first_general, last_general;
13014 int first_sdma, last_sdma;
13015 int first_rx, last_rx;
13018 /* calculate the ranges we are going to use */
13020 last_general = first_general + 1;
13021 first_sdma = last_general;
13022 last_sdma = first_sdma + dd->num_sdma;
13023 first_rx = last_sdma;
13024 last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
13026 /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
13027 dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
13030 * Sanity check - the code expects all SDMA chip source
13031 * interrupts to be in the same CSR, starting at bit 0. Verify
13032 * that this is true by checking the bit location of the start.
13034 BUILD_BUG_ON(IS_SDMA_START % 64);
13036 for (i = 0; i < dd->num_msix_entries; i++) {
13037 struct hfi1_msix_entry *me = &dd->msix_entries[i];
13038 const char *err_info;
13039 irq_handler_t handler;
13040 irq_handler_t thread = NULL;
13043 struct hfi1_ctxtdata *rcd = NULL;
13044 struct sdma_engine *sde = NULL;
13045 char name[MAX_NAME_SIZE];
13047 /* obtain the arguments to pci_request_irq */
13048 if (first_general <= i && i < last_general) {
13049 idx = i - first_general;
13050 handler = general_interrupt;
13052 snprintf(name, sizeof(name),
13053 DRIVER_NAME "_%d", dd->unit);
13054 err_info = "general";
13055 me->type = IRQ_GENERAL;
13056 } else if (first_sdma <= i && i < last_sdma) {
13057 idx = i - first_sdma;
13058 sde = &dd->per_sdma[idx];
13059 handler = sdma_interrupt;
13061 snprintf(name, sizeof(name),
13062 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
13064 remap_sdma_interrupts(dd, idx, i);
13065 me->type = IRQ_SDMA;
13066 } else if (first_rx <= i && i < last_rx) {
13067 idx = i - first_rx;
13068 rcd = hfi1_rcd_get_by_index(dd, idx);
13071 * Set the interrupt register and mask for this
13072 * context's interrupt.
13074 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13075 rcd->imask = ((u64)1) <<
13076 ((IS_RCVAVAIL_START + idx) % 64);
13077 handler = receive_context_interrupt;
13078 thread = receive_context_thread;
13080 snprintf(name, sizeof(name),
13081 DRIVER_NAME "_%d kctxt%d",
13083 err_info = "receive context";
13084 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
13085 me->type = IRQ_RCVCTXT;
13086 rcd->msix_intr = i;
13090 /* not in our expected range - complain, then
13094 "Unexpected extra MSI-X interrupt %d\n", i);
13097 /* no argument, no interrupt */
13100 /* make sure the name is terminated */
13101 name[sizeof(name) - 1] = 0;
13102 me->irq = pci_irq_vector(dd->pcidev, i);
13103 ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
13107 "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13108 err_info, me->irq, idx, ret);
13112 * assign arg after pci_request_irq call, so it will be
13117 ret = hfi1_get_irq_affinity(dd, me);
13119 dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
13125 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13129 if (!dd->num_msix_entries) {
13130 synchronize_irq(pci_irq_vector(dd->pcidev, 0));
13134 for (i = 0; i < dd->vnic.num_ctxt; i++) {
13135 struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13136 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13138 synchronize_irq(me->irq);
13142 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13144 struct hfi1_devdata *dd = rcd->dd;
13145 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13147 if (!me->arg) /* => no irq, no affinity */
13150 hfi1_put_irq_affinity(dd, me);
13151 pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
13156 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13158 struct hfi1_devdata *dd = rcd->dd;
13159 struct hfi1_msix_entry *me;
13160 int idx = rcd->ctxt;
13164 rcd->msix_intr = dd->vnic.msix_idx++;
13165 me = &dd->msix_entries[rcd->msix_intr];
13168 * Set the interrupt register and mask for this
13169 * context's interrupt.
13171 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13172 rcd->imask = ((u64)1) <<
13173 ((IS_RCVAVAIL_START + idx) % 64);
13174 me->type = IRQ_RCVCTXT;
13175 me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13176 remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13178 ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
13179 receive_context_interrupt,
13180 receive_context_thread, arg,
13181 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13183 dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13184 me->irq, idx, ret);
13188 * assign arg after pci_request_irq call, so it will be
13193 ret = hfi1_get_irq_affinity(dd, me);
13196 "unable to pin IRQ %d\n", ret);
13197 pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
13202 * Set the general handler to accept all interrupts, remap all
13203 * chip interrupts back to MSI-X 0.
13205 static void reset_interrupts(struct hfi1_devdata *dd)
13209 /* all interrupts handled by the general handler */
13210 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13211 dd->gi_mask[i] = ~(u64)0;
13213 /* all chip interrupts map to MSI-X 0 */
13214 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13215 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13218 static int set_up_interrupts(struct hfi1_devdata *dd)
13222 int single_interrupt = 0; /* we expect to have all the interrupts */
13226 * 1 general, "slow path" interrupt (includes the SDMA engines
13227 * slow source, SDMACleanupDone)
13228 * N interrupts - one per used SDMA engine
13229 * M interrupt - one per kernel receive context
13231 total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
13233 /* ask for MSI-X interrupts */
13234 request = request_msix(dd, total);
13238 } else if (request == 0) {
13240 /* dd->num_msix_entries already zero */
13241 single_interrupt = 1;
13242 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
13243 } else if (request < total) {
13244 /* using MSI-X, with reduced interrupts */
13245 dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
13250 dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13252 if (!dd->msix_entries) {
13257 dd->num_msix_entries = total;
13258 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13261 /* mask all interrupts */
13262 set_intr_state(dd, 0);
13263 /* clear all pending interrupts */
13264 clear_all_interrupts(dd);
13266 /* reset general handler mask, chip MSI-X mappings */
13267 reset_interrupts(dd);
13269 if (single_interrupt)
13270 ret = request_intx_irq(dd);
13272 ret = request_msix_irqs(dd);
13279 clean_up_interrupts(dd);
13284 * Set up context values in dd. Sets:
13286 * num_rcv_contexts - number of contexts being used
13287 * n_krcv_queues - number of kernel contexts
13288 * first_dyn_alloc_ctxt - first dynamically allocated context
13289 * in array of contexts
13290 * freectxts - number of free user contexts
13291 * num_send_contexts - number of PIO send contexts being used
13293 static int set_up_context_variables(struct hfi1_devdata *dd)
13295 unsigned long num_kernel_contexts;
13296 int total_contexts;
13300 int user_rmt_reduced;
13303 * Kernel receive contexts:
13304 * - Context 0 - control context (VL15/multicast/error)
13305 * - Context 1 - first kernel context
13306 * - Context 2 - second kernel context
13311 * n_krcvqs is the sum of module parameter kernel receive
13312 * contexts, krcvqs[]. It does not include the control
13313 * context, so add that.
13315 num_kernel_contexts = n_krcvqs + 1;
13317 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13319 * Every kernel receive context needs an ACK send context.
13320 * one send context is allocated for each VL{0-7} and VL15
13322 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13324 "Reducing # kernel rcv contexts to: %d, from %lu\n",
13325 (int)(dd->chip_send_contexts - num_vls - 1),
13326 num_kernel_contexts);
13327 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13331 * - default to 1 user context per real (non-HT) CPU core if
13332 * num_user_contexts is negative
13334 if (num_user_contexts < 0)
13335 num_user_contexts =
13336 cpumask_weight(&node_affinity.real_cpu_mask);
13338 total_contexts = num_kernel_contexts + num_user_contexts;
13341 * Adjust the counts given a global max.
13343 if (total_contexts > dd->chip_rcv_contexts) {
13345 "Reducing # user receive contexts to: %d, from %d\n",
13346 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
13347 (int)num_user_contexts);
13348 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
13350 total_contexts = num_kernel_contexts + num_user_contexts;
13353 /* each user context requires an entry in the RMT */
13354 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13355 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13356 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13358 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13359 (int)num_user_contexts,
13362 num_user_contexts = user_rmt_reduced;
13363 total_contexts = num_kernel_contexts + num_user_contexts;
13366 /* Accommodate VNIC contexts */
13367 if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
13368 total_contexts += HFI1_NUM_VNIC_CTXT;
13370 /* the first N are kernel contexts, the rest are user/vnic contexts */
13371 dd->num_rcv_contexts = total_contexts;
13372 dd->n_krcv_queues = num_kernel_contexts;
13373 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13374 dd->num_user_contexts = num_user_contexts;
13375 dd->freectxts = num_user_contexts;
13377 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13378 (int)dd->chip_rcv_contexts,
13379 (int)dd->num_rcv_contexts,
13380 (int)dd->n_krcv_queues,
13381 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
13384 * Receive array allocation:
13385 * All RcvArray entries are divided into groups of 8. This
13386 * is required by the hardware and will speed up writes to
13387 * consecutive entries by using write-combining of the entire
13390 * The number of groups are evenly divided among all contexts.
13391 * any left over groups will be given to the first N user
13394 dd->rcv_entries.group_size = RCV_INCREMENT;
13395 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13396 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13397 dd->rcv_entries.nctxt_extra = ngroups -
13398 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13399 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13400 dd->rcv_entries.ngroups,
13401 dd->rcv_entries.nctxt_extra);
13402 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13403 MAX_EAGER_ENTRIES * 2) {
13404 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13405 dd->rcv_entries.group_size;
13407 "RcvArray group count too high, change to %u\n",
13408 dd->rcv_entries.ngroups);
13409 dd->rcv_entries.nctxt_extra = 0;
13412 * PIO send contexts
13414 ret = init_sc_pools_and_sizes(dd);
13415 if (ret >= 0) { /* success */
13416 dd->num_send_contexts = ret;
13419 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13420 dd->chip_send_contexts,
13421 dd->num_send_contexts,
13422 dd->sc_sizes[SC_KERNEL].count,
13423 dd->sc_sizes[SC_ACK].count,
13424 dd->sc_sizes[SC_USER].count,
13425 dd->sc_sizes[SC_VL15].count);
13426 ret = 0; /* success */
13433 * Set the device/port partition key table. The MAD code
13434 * will ensure that, at least, the partial management
13435 * partition key is present in the table.
13437 static void set_partition_keys(struct hfi1_pportdata *ppd)
13439 struct hfi1_devdata *dd = ppd->dd;
13443 dd_dev_info(dd, "Setting partition keys\n");
13444 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13445 reg |= (ppd->pkeys[i] &
13446 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13448 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13449 /* Each register holds 4 PKey values. */
13450 if ((i % 4) == 3) {
13451 write_csr(dd, RCV_PARTITION_KEY +
13452 ((i - 3) * 2), reg);
13457 /* Always enable HW pkeys check when pkeys table is set */
13458 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13462 * These CSRs and memories are uninitialized on reset and must be
13463 * written before reading to set the ECC/parity bits.
13465 * NOTE: All user context CSRs that are not mmaped write-only
13466 * (e.g. the TID flows) must be initialized even if the driver never
13469 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13474 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13475 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13477 /* SendCtxtCreditReturnAddr */
13478 for (i = 0; i < dd->chip_send_contexts; i++)
13479 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13481 /* PIO Send buffers */
13482 /* SDMA Send buffers */
13484 * These are not normally read, and (presently) have no method
13485 * to be read, so are not pre-initialized
13489 /* RcvHdrTailAddr */
13490 /* RcvTidFlowTable */
13491 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13492 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13493 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13494 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13495 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13499 for (i = 0; i < dd->chip_rcv_array_count; i++)
13500 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13502 /* RcvQPMapTable */
13503 for (i = 0; i < 32; i++)
13504 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13508 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13510 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13513 unsigned long timeout;
13516 /* is the condition present? */
13517 reg = read_csr(dd, CCE_STATUS);
13518 if ((reg & status_bits) == 0)
13521 /* clear the condition */
13522 write_csr(dd, CCE_CTRL, ctrl_bits);
13524 /* wait for the condition to clear */
13525 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13527 reg = read_csr(dd, CCE_STATUS);
13528 if ((reg & status_bits) == 0)
13530 if (time_after(jiffies, timeout)) {
13532 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13533 status_bits, reg & status_bits);
13540 /* set CCE CSRs to chip reset defaults */
13541 static void reset_cce_csrs(struct hfi1_devdata *dd)
13545 /* CCE_REVISION read-only */
13546 /* CCE_REVISION2 read-only */
13547 /* CCE_CTRL - bits clear automatically */
13548 /* CCE_STATUS read-only, use CceCtrl to clear */
13549 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13550 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13551 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13552 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13553 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13554 /* CCE_ERR_STATUS read-only */
13555 write_csr(dd, CCE_ERR_MASK, 0);
13556 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13557 /* CCE_ERR_FORCE leave alone */
13558 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13559 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13560 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13561 /* CCE_PCIE_CTRL leave alone */
13562 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13563 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13564 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13565 CCE_MSIX_TABLE_UPPER_RESETCSR);
13567 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13568 /* CCE_MSIX_PBA read-only */
13569 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13570 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13572 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13573 write_csr(dd, CCE_INT_MAP, 0);
13574 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13575 /* CCE_INT_STATUS read-only */
13576 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13577 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13578 /* CCE_INT_FORCE leave alone */
13579 /* CCE_INT_BLOCKED read-only */
13581 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13582 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13585 /* set MISC CSRs to chip reset defaults */
13586 static void reset_misc_csrs(struct hfi1_devdata *dd)
13590 for (i = 0; i < 32; i++) {
13591 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13592 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13593 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13596 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13597 * only be written 128-byte chunks
13599 /* init RSA engine to clear lingering errors */
13600 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13601 write_csr(dd, MISC_CFG_RSA_MU, 0);
13602 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13603 /* MISC_STS_8051_DIGEST read-only */
13604 /* MISC_STS_SBM_DIGEST read-only */
13605 /* MISC_STS_PCIE_DIGEST read-only */
13606 /* MISC_STS_FAB_DIGEST read-only */
13607 /* MISC_ERR_STATUS read-only */
13608 write_csr(dd, MISC_ERR_MASK, 0);
13609 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13610 /* MISC_ERR_FORCE leave alone */
13613 /* set TXE CSRs to chip reset defaults */
13614 static void reset_txe_csrs(struct hfi1_devdata *dd)
13621 write_csr(dd, SEND_CTRL, 0);
13622 __cm_reset(dd, 0); /* reset CM internal state */
13623 /* SEND_CONTEXTS read-only */
13624 /* SEND_DMA_ENGINES read-only */
13625 /* SEND_PIO_MEM_SIZE read-only */
13626 /* SEND_DMA_MEM_SIZE read-only */
13627 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13628 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13629 /* SEND_PIO_ERR_STATUS read-only */
13630 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13631 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13632 /* SEND_PIO_ERR_FORCE leave alone */
13633 /* SEND_DMA_ERR_STATUS read-only */
13634 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13635 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13636 /* SEND_DMA_ERR_FORCE leave alone */
13637 /* SEND_EGRESS_ERR_STATUS read-only */
13638 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13639 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13640 /* SEND_EGRESS_ERR_FORCE leave alone */
13641 write_csr(dd, SEND_BTH_QP, 0);
13642 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13643 write_csr(dd, SEND_SC2VLT0, 0);
13644 write_csr(dd, SEND_SC2VLT1, 0);
13645 write_csr(dd, SEND_SC2VLT2, 0);
13646 write_csr(dd, SEND_SC2VLT3, 0);
13647 write_csr(dd, SEND_LEN_CHECK0, 0);
13648 write_csr(dd, SEND_LEN_CHECK1, 0);
13649 /* SEND_ERR_STATUS read-only */
13650 write_csr(dd, SEND_ERR_MASK, 0);
13651 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13652 /* SEND_ERR_FORCE read-only */
13653 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13654 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13655 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13656 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13657 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13658 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13659 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13660 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13661 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13662 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13663 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13664 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13665 /* SEND_CM_CREDIT_USED_STATUS read-only */
13666 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13667 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13668 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13669 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13670 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13671 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13672 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13673 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13674 /* SEND_CM_CREDIT_USED_VL read-only */
13675 /* SEND_CM_CREDIT_USED_VL15 read-only */
13676 /* SEND_EGRESS_CTXT_STATUS read-only */
13677 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13678 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13679 /* SEND_EGRESS_ERR_INFO read-only */
13680 /* SEND_EGRESS_ERR_SOURCE read-only */
13683 * TXE Per-Context CSRs
13685 for (i = 0; i < dd->chip_send_contexts; i++) {
13686 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13687 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13688 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13689 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13690 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13691 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13692 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13693 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13694 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13695 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13696 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13697 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13701 * TXE Per-SDMA CSRs
13703 for (i = 0; i < dd->chip_sdma_engines; i++) {
13704 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13705 /* SEND_DMA_STATUS read-only */
13706 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13707 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13708 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13709 /* SEND_DMA_HEAD read-only */
13710 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13711 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13712 /* SEND_DMA_IDLE_CNT read-only */
13713 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13714 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13715 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13716 /* SEND_DMA_ENG_ERR_STATUS read-only */
13717 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13718 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13719 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13720 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13721 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13722 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13723 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13724 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13725 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13726 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13732 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13734 static void init_rbufs(struct hfi1_devdata *dd)
13740 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13745 reg = read_csr(dd, RCV_STATUS);
13746 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13747 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13750 * Give up after 1ms - maximum wait time.
13752 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
13753 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13754 * 136 KB / (66% * 250MB/s) = 844us
13756 if (count++ > 500) {
13758 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13762 udelay(2); /* do not busy-wait the CSR */
13765 /* start the init - expect RcvCtrl to be 0 */
13766 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13769 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13770 * period after the write before RcvStatus.RxRbufInitDone is valid.
13771 * The delay in the first run through the loop below is sufficient and
13772 * required before the first read of RcvStatus.RxRbufInintDone.
13774 read_csr(dd, RCV_CTRL);
13776 /* wait for the init to finish */
13779 /* delay is required first time through - see above */
13780 udelay(2); /* do not busy-wait the CSR */
13781 reg = read_csr(dd, RCV_STATUS);
13782 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13785 /* give up after 100us - slowest possible at 33MHz is 73us */
13786 if (count++ > 50) {
13788 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13795 /* set RXE CSRs to chip reset defaults */
13796 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13803 write_csr(dd, RCV_CTRL, 0);
13805 /* RCV_STATUS read-only */
13806 /* RCV_CONTEXTS read-only */
13807 /* RCV_ARRAY_CNT read-only */
13808 /* RCV_BUF_SIZE read-only */
13809 write_csr(dd, RCV_BTH_QP, 0);
13810 write_csr(dd, RCV_MULTICAST, 0);
13811 write_csr(dd, RCV_BYPASS, 0);
13812 write_csr(dd, RCV_VL15, 0);
13813 /* this is a clear-down */
13814 write_csr(dd, RCV_ERR_INFO,
13815 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13816 /* RCV_ERR_STATUS read-only */
13817 write_csr(dd, RCV_ERR_MASK, 0);
13818 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13819 /* RCV_ERR_FORCE leave alone */
13820 for (i = 0; i < 32; i++)
13821 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13822 for (i = 0; i < 4; i++)
13823 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13824 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13825 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13826 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13827 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13828 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13829 clear_rsm_rule(dd, i);
13830 for (i = 0; i < 32; i++)
13831 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13834 * RXE Kernel and User Per-Context CSRs
13836 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13838 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13839 /* RCV_CTXT_STATUS read-only */
13840 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13841 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13842 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13843 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13844 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13845 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13846 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13847 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13848 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13849 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13852 /* RCV_HDR_TAIL read-only */
13853 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13854 /* RCV_EGR_INDEX_TAIL read-only */
13855 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13856 /* RCV_EGR_OFFSET_TAIL read-only */
13857 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13858 write_uctxt_csr(dd, i,
13859 RCV_TID_FLOW_TABLE + (8 * j), 0);
13865 * Set sc2vl tables.
13867 * They power on to zeros, so to avoid send context errors
13868 * they need to be set:
13870 * SC 0-7 -> VL 0-7 (respectively)
13875 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13878 /* init per architecture spec, constrained by hardware capability */
13880 /* HFI maps sent packets */
13881 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13887 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13893 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13899 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13906 /* DC maps received packets */
13907 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13909 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13910 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13911 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13913 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13914 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13916 /* initialize the cached sc2vl values consistently with h/w */
13917 for (i = 0; i < 32; i++) {
13918 if (i < 8 || i == 15)
13919 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13921 *((u8 *)(dd->sc2vl) + i) = 0;
13926 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13927 * depend on the chip going through a power-on reset - a driver may be loaded
13928 * and unloaded many times.
13930 * Do not write any CSR values to the chip in this routine - there may be
13931 * a reset following the (possible) FLR in this routine.
13934 static int init_chip(struct hfi1_devdata *dd)
13940 * Put the HFI CSRs in a known state.
13941 * Combine this with a DC reset.
13943 * Stop the device from doing anything while we do a
13944 * reset. We know there are no other active users of
13945 * the device since we are now in charge. Turn off
13946 * off all outbound and inbound traffic and make sure
13947 * the device does not generate any interrupts.
13950 /* disable send contexts and SDMA engines */
13951 write_csr(dd, SEND_CTRL, 0);
13952 for (i = 0; i < dd->chip_send_contexts; i++)
13953 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13954 for (i = 0; i < dd->chip_sdma_engines; i++)
13955 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13956 /* disable port (turn off RXE inbound traffic) and contexts */
13957 write_csr(dd, RCV_CTRL, 0);
13958 for (i = 0; i < dd->chip_rcv_contexts; i++)
13959 write_csr(dd, RCV_CTXT_CTRL, 0);
13960 /* mask all interrupt sources */
13961 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13962 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13965 * DC Reset: do a full DC reset before the register clear.
13966 * A recommended length of time to hold is one CSR read,
13967 * so reread the CceDcCtrl. Then, hold the DC in reset
13968 * across the clear.
13970 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13971 (void)read_csr(dd, CCE_DC_CTRL);
13975 * A FLR will reset the SPC core and part of the PCIe.
13976 * The parts that need to be restored have already been
13979 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13981 /* do the FLR, the DC reset will remain */
13982 pcie_flr(dd->pcidev);
13984 /* restore command and BARs */
13985 ret = restore_pci_variables(dd);
13987 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13993 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13994 pcie_flr(dd->pcidev);
13995 ret = restore_pci_variables(dd);
13997 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14003 dd_dev_info(dd, "Resetting CSRs with writes\n");
14004 reset_cce_csrs(dd);
14005 reset_txe_csrs(dd);
14006 reset_rxe_csrs(dd);
14007 reset_misc_csrs(dd);
14009 /* clear the DC reset */
14010 write_csr(dd, CCE_DC_CTRL, 0);
14012 /* Set the LED off */
14016 * Clear the QSFP reset.
14017 * An FLR enforces a 0 on all out pins. The driver does not touch
14018 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
14019 * anything plugged constantly in reset, if it pays attention
14021 * Prime examples of this are optical cables. Set all pins high.
14022 * I2CCLK and I2CDAT will change per direction, and INT_N and
14023 * MODPRS_N are input only and their value is ignored.
14025 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14026 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14027 init_chip_resources(dd);
14031 static void init_early_variables(struct hfi1_devdata *dd)
14035 /* assign link credit variables */
14037 dd->link_credits = CM_GLOBAL_CREDITS;
14039 dd->link_credits--;
14040 dd->vcu = cu_to_vcu(hfi1_cu);
14041 /* enough room for 8 MAD packets plus header - 17K */
14042 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14043 if (dd->vl15_init > dd->link_credits)
14044 dd->vl15_init = dd->link_credits;
14046 write_uninitialized_csrs_and_memories(dd);
14048 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14049 for (i = 0; i < dd->num_pports; i++) {
14050 struct hfi1_pportdata *ppd = &dd->pport[i];
14052 set_partition_keys(ppd);
14054 init_sc2vl_tables(dd);
14057 static void init_kdeth_qp(struct hfi1_devdata *dd)
14059 /* user changed the KDETH_QP */
14060 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14061 /* out of range or illegal value */
14062 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14065 if (kdeth_qp == 0) /* not set, or failed range check */
14066 kdeth_qp = DEFAULT_KDETH_QP;
14068 write_csr(dd, SEND_BTH_QP,
14069 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14070 SEND_BTH_QP_KDETH_QP_SHIFT);
14072 write_csr(dd, RCV_BTH_QP,
14073 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14074 RCV_BTH_QP_KDETH_QP_SHIFT);
14079 * @dd - device data
14080 * @first_ctxt - first context
14081 * @last_ctxt - first context
14083 * This return sets the qpn mapping table that
14084 * is indexed by qpn[8:1].
14086 * The routine will round robin the 256 settings
14087 * from first_ctxt to last_ctxt.
14089 * The first/last looks ahead to having specialized
14090 * receive contexts for mgmt and bypass. Normal
14091 * verbs traffic will assumed to be on a range
14092 * of receive contexts.
14094 static void init_qpmap_table(struct hfi1_devdata *dd,
14099 u64 regno = RCV_QP_MAP_TABLE;
14101 u64 ctxt = first_ctxt;
14103 for (i = 0; i < 256; i++) {
14104 reg |= ctxt << (8 * (i % 8));
14106 if (ctxt > last_ctxt)
14109 write_csr(dd, regno, reg);
14115 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14116 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14119 struct rsm_map_table {
14120 u64 map[NUM_MAP_REGS];
14124 struct rsm_rule_data {
14140 * Return an initialized RMT map table for users to fill in. OK if it
14141 * returns NULL, indicating no table.
14143 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14145 struct rsm_map_table *rmt;
14146 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14148 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14150 memset(rmt->map, rxcontext, sizeof(rmt->map));
14158 * Write the final RMT map table to the chip and free the table. OK if
14161 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14162 struct rsm_map_table *rmt)
14167 /* write table to chip */
14168 for (i = 0; i < NUM_MAP_REGS; i++)
14169 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14172 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14177 * Add a receive side mapping rule.
14179 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14180 struct rsm_rule_data *rrd)
14182 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14183 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14184 1ull << rule_index | /* enable bit */
14185 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14186 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14187 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14188 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14189 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14190 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14191 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14192 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14193 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14194 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14195 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14196 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14197 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14201 * Clear a receive side mapping rule.
14203 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14205 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14206 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14207 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14210 /* return the number of RSM map table entries that will be used for QOS */
14211 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14218 /* is QOS active at all? */
14219 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14224 /* determine bits for qpn */
14225 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14226 if (krcvqs[i] > max_by_vl)
14227 max_by_vl = krcvqs[i];
14228 if (max_by_vl > 32)
14230 m = ilog2(__roundup_pow_of_two(max_by_vl));
14232 /* determine bits for vl */
14233 n = ilog2(__roundup_pow_of_two(num_vls));
14235 /* reject if too much is used */
14244 return 1 << (m + n);
14255 * init_qos - init RX qos
14256 * @dd - device data
14257 * @rmt - RSM map table
14259 * This routine initializes Rule 0 and the RSM map table to implement
14260 * quality of service (qos).
14262 * If all of the limit tests succeed, qos is applied based on the array
14263 * interpretation of krcvqs where entry 0 is VL0.
14265 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14266 * feed both the RSM map table and the single rule.
14268 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14270 struct rsm_rule_data rrd;
14271 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14272 unsigned int rmt_entries;
14277 rmt_entries = qos_rmt_entries(dd, &m, &n);
14278 if (rmt_entries == 0)
14280 qpns_per_vl = 1 << m;
14282 /* enough room in the map table? */
14283 rmt_entries = 1 << (m + n);
14284 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14287 /* add qos entries to the the RSM map table */
14288 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14291 for (qpn = 0, tctxt = ctxt;
14292 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14293 unsigned idx, regoff, regidx;
14295 /* generate the index the hardware will produce */
14296 idx = rmt->used + ((qpn << n) ^ i);
14297 regoff = (idx % 8) * 8;
14299 /* replace default with context number */
14300 reg = rmt->map[regidx];
14301 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14303 reg |= (u64)(tctxt++) << regoff;
14304 rmt->map[regidx] = reg;
14305 if (tctxt == ctxt + krcvqs[i])
14311 rrd.offset = rmt->used;
14313 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14314 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14315 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14316 rrd.index1_width = n;
14317 rrd.index2_off = QPN_SELECT_OFFSET;
14318 rrd.index2_width = m + n;
14319 rrd.mask1 = LRH_BTH_MASK;
14320 rrd.value1 = LRH_BTH_VALUE;
14321 rrd.mask2 = LRH_SC_MASK;
14322 rrd.value2 = LRH_SC_VALUE;
14325 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14327 /* mark RSM map entries as used */
14328 rmt->used += rmt_entries;
14329 /* map everything else to the mcast/err/vl15 context */
14330 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14331 dd->qos_shift = n + 1;
14335 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14338 static void init_user_fecn_handling(struct hfi1_devdata *dd,
14339 struct rsm_map_table *rmt)
14341 struct rsm_rule_data rrd;
14343 int i, idx, regoff, regidx;
14346 /* there needs to be enough room in the map table */
14347 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14348 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14353 * RSM will extract the destination context as an index into the
14354 * map table. The destination contexts are a sequential block
14355 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14356 * Map entries are accessed as offset + extracted value. Adjust
14357 * the added offset so this sequence can be placed anywhere in
14358 * the table - as long as the entries themselves do not wrap.
14359 * There are only enough bits in offset for the table size, so
14360 * start with that to allow for a "negative" offset.
14362 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14363 (int)dd->first_dyn_alloc_ctxt);
14365 for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14366 i < dd->num_rcv_contexts; i++, idx++) {
14367 /* replace with identity mapping */
14368 regoff = (idx % 8) * 8;
14370 reg = rmt->map[regidx];
14371 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14372 reg |= (u64)i << regoff;
14373 rmt->map[regidx] = reg;
14377 * For RSM intercept of Expected FECN packets:
14378 * o packet type 0 - expected
14379 * o match on F (bit 95), using select/match 1, and
14380 * o match on SH (bit 133), using select/match 2.
14382 * Use index 1 to extract the 8-bit receive context from DestQP
14383 * (start at bit 64). Use that as the RSM map table index.
14385 rrd.offset = offset;
14387 rrd.field1_off = 95;
14388 rrd.field2_off = 133;
14389 rrd.index1_off = 64;
14390 rrd.index1_width = 8;
14391 rrd.index2_off = 0;
14392 rrd.index2_width = 0;
14399 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14401 rmt->used += dd->num_user_contexts;
14404 /* Initialize RSM for VNIC */
14405 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14411 struct rsm_rule_data rrd;
14413 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14414 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14415 dd->vnic.rmt_start);
14419 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14420 dd->vnic.rmt_start,
14421 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14423 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14424 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14425 reg = read_csr(dd, regoff);
14426 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14427 /* Update map register with vnic context */
14428 j = (dd->vnic.rmt_start + i) % 8;
14429 reg &= ~(0xffllu << (j * 8));
14430 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14431 /* Wrap up vnic ctx index */
14432 ctx_id %= dd->vnic.num_ctxt;
14433 /* Write back map register */
14434 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14435 dev_dbg(&(dd)->pcidev->dev,
14436 "Vnic rsm map reg[%d] =0x%llx\n",
14437 regoff - RCV_RSM_MAP_TABLE, reg);
14439 write_csr(dd, regoff, reg);
14441 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14442 reg = read_csr(dd, regoff);
14446 /* Add rule for vnic */
14447 rrd.offset = dd->vnic.rmt_start;
14449 /* Match 16B packets */
14450 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14451 rrd.mask1 = L2_TYPE_MASK;
14452 rrd.value1 = L2_16B_VALUE;
14453 /* Match ETH L4 packets */
14454 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14455 rrd.mask2 = L4_16B_TYPE_MASK;
14456 rrd.value2 = L4_16B_ETH_VALUE;
14457 /* Calc context from veswid and entropy */
14458 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14459 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14460 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14461 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14462 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14464 /* Enable RSM if not already enabled */
14465 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14468 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14470 clear_rsm_rule(dd, RSM_INS_VNIC);
14472 /* Disable RSM if used only by vnic */
14473 if (dd->vnic.rmt_start == 0)
14474 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14477 static void init_rxe(struct hfi1_devdata *dd)
14479 struct rsm_map_table *rmt;
14482 /* enable all receive errors */
14483 write_csr(dd, RCV_ERR_MASK, ~0ull);
14485 rmt = alloc_rsm_map_table(dd);
14486 /* set up QOS, including the QPN map table */
14488 init_user_fecn_handling(dd, rmt);
14489 complete_rsm_map_table(dd, rmt);
14490 /* record number of used rsm map entries for vnic */
14491 dd->vnic.rmt_start = rmt->used;
14495 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14496 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14497 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14498 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14499 * Max_PayLoad_Size set to its minimum of 128.
14501 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14502 * (64 bytes). Max_Payload_Size is possibly modified upward in
14503 * tune_pcie_caps() which is called after this routine.
14506 /* Have 16 bytes (4DW) of bypass header available in header queue */
14507 val = read_csr(dd, RCV_BYPASS);
14508 val |= (4ull << 16);
14509 write_csr(dd, RCV_BYPASS, val);
14512 static void init_other(struct hfi1_devdata *dd)
14514 /* enable all CCE errors */
14515 write_csr(dd, CCE_ERR_MASK, ~0ull);
14516 /* enable *some* Misc errors */
14517 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14518 /* enable all DC errors, except LCB */
14519 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14520 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14524 * Fill out the given AU table using the given CU. A CU is defined in terms
14525 * AUs. The table is a an encoding: given the index, how many AUs does that
14528 * NOTE: Assumes that the register layout is the same for the
14529 * local and remote tables.
14531 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14532 u32 csr0to3, u32 csr4to7)
14534 write_csr(dd, csr0to3,
14535 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14536 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14538 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14540 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14541 write_csr(dd, csr4to7,
14543 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14545 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14547 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14549 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14552 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14554 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14555 SEND_CM_LOCAL_AU_TABLE4_TO7);
14558 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14560 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14561 SEND_CM_REMOTE_AU_TABLE4_TO7);
14564 static void init_txe(struct hfi1_devdata *dd)
14568 /* enable all PIO, SDMA, general, and Egress errors */
14569 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14570 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14571 write_csr(dd, SEND_ERR_MASK, ~0ull);
14572 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14574 /* enable all per-context and per-SDMA engine errors */
14575 for (i = 0; i < dd->chip_send_contexts; i++)
14576 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14577 for (i = 0; i < dd->chip_sdma_engines; i++)
14578 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14580 /* set the local CU to AU mapping */
14581 assign_local_cm_au_table(dd, dd->vcu);
14584 * Set reasonable default for Credit Return Timer
14585 * Don't set on Simulator - causes it to choke.
14587 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14588 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14591 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14597 if (!rcd || !rcd->sc)
14600 hw_ctxt = rcd->sc->hw_context;
14601 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14602 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14603 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14604 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14605 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14606 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14607 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14609 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14612 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14613 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14614 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14617 /* Enable J_KEY check on receive context. */
14618 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14619 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14620 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14621 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14626 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14631 if (!rcd || !rcd->sc)
14634 hw_ctxt = rcd->sc->hw_context;
14635 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14637 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14638 * This check would not have been enabled for A0 h/w, see
14642 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14643 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14644 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14646 /* Turn off the J_KEY on the receive side */
14647 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14652 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14658 if (!rcd || !rcd->sc)
14661 hw_ctxt = rcd->sc->hw_context;
14662 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14663 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14664 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14665 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14666 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14667 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14668 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14673 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14678 if (!ctxt || !ctxt->sc)
14681 hw_ctxt = ctxt->sc->hw_context;
14682 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14683 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14684 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14685 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14691 * Start doing the clean up the the chip. Our clean up happens in multiple
14692 * stages and this is just the first.
14694 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14699 clean_up_interrupts(dd);
14700 finish_chip_resources(dd);
14703 #define HFI_BASE_GUID(dev) \
14704 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14707 * Information can be shared between the two HFIs on the same ASIC
14708 * in the same OS. This function finds the peer device and sets
14709 * up a shared structure.
14711 static int init_asic_data(struct hfi1_devdata *dd)
14713 unsigned long flags;
14714 struct hfi1_devdata *tmp, *peer = NULL;
14715 struct hfi1_asic_data *asic_data;
14718 /* pre-allocate the asic structure in case we are the first device */
14719 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14723 spin_lock_irqsave(&hfi1_devs_lock, flags);
14724 /* Find our peer device */
14725 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14726 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14727 dd->unit != tmp->unit) {
14734 /* use already allocated structure */
14735 dd->asic_data = peer->asic_data;
14738 dd->asic_data = asic_data;
14739 mutex_init(&dd->asic_data->asic_resource_mutex);
14741 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14742 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14744 /* first one through - set up i2c devices */
14746 ret = set_up_i2c(dd, dd->asic_data);
14752 * Set dd->boardname. Use a generic name if a name is not returned from
14753 * EFI variable space.
14755 * Return 0 on success, -ENOMEM if space could not be allocated.
14757 static int obtain_boardname(struct hfi1_devdata *dd)
14759 /* generic board description */
14760 const char generic[] =
14761 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14762 unsigned long size;
14765 ret = read_hfi1_efi_var(dd, "description", &size,
14766 (void **)&dd->boardname);
14768 dd_dev_info(dd, "Board description not found\n");
14769 /* use generic description */
14770 dd->boardname = kstrdup(generic, GFP_KERNEL);
14771 if (!dd->boardname)
14778 * Check the interrupt registers to make sure that they are mapped correctly.
14779 * It is intended to help user identify any mismapping by VMM when the driver
14780 * is running in a VM. This function should only be called before interrupt
14781 * is set up properly.
14783 * Return 0 on success, -EINVAL on failure.
14785 static int check_int_registers(struct hfi1_devdata *dd)
14788 u64 all_bits = ~(u64)0;
14791 /* Clear CceIntMask[0] to avoid raising any interrupts */
14792 mask = read_csr(dd, CCE_INT_MASK);
14793 write_csr(dd, CCE_INT_MASK, 0ull);
14794 reg = read_csr(dd, CCE_INT_MASK);
14798 /* Clear all interrupt status bits */
14799 write_csr(dd, CCE_INT_CLEAR, all_bits);
14800 reg = read_csr(dd, CCE_INT_STATUS);
14804 /* Set all interrupt status bits */
14805 write_csr(dd, CCE_INT_FORCE, all_bits);
14806 reg = read_csr(dd, CCE_INT_STATUS);
14807 if (reg != all_bits)
14810 /* Restore the interrupt mask */
14811 write_csr(dd, CCE_INT_CLEAR, all_bits);
14812 write_csr(dd, CCE_INT_MASK, mask);
14816 write_csr(dd, CCE_INT_MASK, mask);
14817 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14822 * Allocate and initialize the device structure for the hfi.
14823 * @dev: the pci_dev for hfi1_ib device
14824 * @ent: pci_device_id struct for this dev
14826 * Also allocates, initializes, and returns the devdata struct for this
14829 * This is global, and is called directly at init to set up the
14830 * chip-specific function pointers for later use.
14832 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14833 const struct pci_device_id *ent)
14835 struct hfi1_devdata *dd;
14836 struct hfi1_pportdata *ppd;
14839 static const char * const inames[] = { /* implementation names */
14841 "RTL VCS simulation",
14842 "RTL FPGA emulation",
14843 "Functional simulator"
14845 struct pci_dev *parent = pdev->bus->self;
14847 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14848 sizeof(struct hfi1_pportdata));
14852 for (i = 0; i < dd->num_pports; i++, ppd++) {
14854 /* init common fields */
14855 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14856 /* DC supports 4 link widths */
14857 ppd->link_width_supported =
14858 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14859 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14860 ppd->link_width_downgrade_supported =
14861 ppd->link_width_supported;
14862 /* start out enabling only 4X */
14863 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14864 ppd->link_width_downgrade_enabled =
14865 ppd->link_width_downgrade_supported;
14866 /* link width active is 0 when link is down */
14867 /* link width downgrade active is 0 when link is down */
14869 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14870 num_vls > HFI1_MAX_VLS_SUPPORTED) {
14871 hfi1_early_err(&pdev->dev,
14872 "Invalid num_vls %u, using %u VLs\n",
14873 num_vls, HFI1_MAX_VLS_SUPPORTED);
14874 num_vls = HFI1_MAX_VLS_SUPPORTED;
14876 ppd->vls_supported = num_vls;
14877 ppd->vls_operational = ppd->vls_supported;
14878 /* Set the default MTU. */
14879 for (vl = 0; vl < num_vls; vl++)
14880 dd->vld[vl].mtu = hfi1_max_mtu;
14881 dd->vld[15].mtu = MAX_MAD_PACKET;
14883 * Set the initial values to reasonable default, will be set
14884 * for real when link is up.
14886 ppd->overrun_threshold = 0x4;
14887 ppd->phy_error_threshold = 0xf;
14888 ppd->port_crc_mode_enabled = link_crc_mask;
14889 /* initialize supported LTP CRC mode */
14890 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14891 /* initialize enabled LTP CRC mode */
14892 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14893 /* start in offline */
14894 ppd->host_link_state = HLS_DN_OFFLINE;
14895 init_vl_arb_caches(ppd);
14898 dd->link_default = HLS_DN_POLL;
14901 * Do remaining PCIe setup and save PCIe values in dd.
14902 * Any error printing is already done by the init code.
14903 * On return, we have the chip mapped.
14905 ret = hfi1_pcie_ddinit(dd, pdev);
14909 /* Save PCI space registers to rewrite after device reset */
14910 ret = save_pci_variables(dd);
14914 /* verify that reads actually work, save revision for reset check */
14915 dd->revision = read_csr(dd, CCE_REVISION);
14916 if (dd->revision == ~(u64)0) {
14917 dd_dev_err(dd, "cannot read chip CSRs\n");
14921 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14922 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14923 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14924 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14927 * Check interrupt registers mapping if the driver has no access to
14928 * the upstream component. In this case, it is likely that the driver
14929 * is running in a VM.
14932 ret = check_int_registers(dd);
14938 * obtain the hardware ID - NOT related to unit, which is a
14939 * software enumeration
14941 reg = read_csr(dd, CCE_REVISION2);
14942 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14943 & CCE_REVISION2_HFI_ID_MASK;
14944 /* the variable size will remove unwanted bits */
14945 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14946 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14947 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14948 dd->icode < ARRAY_SIZE(inames) ?
14949 inames[dd->icode] : "unknown", (int)dd->irev);
14951 /* speeds the hardware can support */
14952 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14953 /* speeds allowed to run at */
14954 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14955 /* give a reasonable active value, will be set on link up */
14956 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14958 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14959 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14960 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14961 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14962 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14963 /* fix up link widths for emulation _p */
14965 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14966 ppd->link_width_supported =
14967 ppd->link_width_enabled =
14968 ppd->link_width_downgrade_supported =
14969 ppd->link_width_downgrade_enabled =
14972 /* insure num_vls isn't larger than number of sdma engines */
14973 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14974 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14975 num_vls, dd->chip_sdma_engines);
14976 num_vls = dd->chip_sdma_engines;
14977 ppd->vls_supported = dd->chip_sdma_engines;
14978 ppd->vls_operational = ppd->vls_supported;
14982 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14983 * Limit the max if larger than the field holds. If timeout is
14984 * non-zero, then the calculated field will be at least 1.
14986 * Must be after icode is set up - the cclock rate depends
14987 * on knowing the hardware being used.
14989 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14990 if (dd->rcv_intr_timeout_csr >
14991 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14992 dd->rcv_intr_timeout_csr =
14993 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14994 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14995 dd->rcv_intr_timeout_csr = 1;
14997 /* needs to be done before we look for the peer device */
15000 /* set up shared ASIC data with peer device */
15001 ret = init_asic_data(dd);
15005 /* obtain chip sizes, reset chip CSRs */
15006 ret = init_chip(dd);
15010 /* read in the PCIe link speed information */
15011 ret = pcie_speeds(dd);
15015 /* call before get_platform_config(), after init_chip_resources() */
15016 ret = eprom_init(dd);
15018 goto bail_free_rcverr;
15020 /* Needs to be called before hfi1_firmware_init */
15021 get_platform_config(dd);
15023 /* read in firmware */
15024 ret = hfi1_firmware_init(dd);
15029 * In general, the PCIe Gen3 transition must occur after the
15030 * chip has been idled (so it won't initiate any PCIe transactions
15031 * e.g. an interrupt) and before the driver changes any registers
15032 * (the transition will reset the registers).
15034 * In particular, place this call after:
15035 * - init_chip() - the chip will not initiate any PCIe transactions
15036 * - pcie_speeds() - reads the current link speed
15037 * - hfi1_firmware_init() - the needed firmware is ready to be
15040 ret = do_pcie_gen3_transition(dd);
15044 /* start setting dd values and adjusting CSRs */
15045 init_early_variables(dd);
15047 parse_platform_config(dd);
15049 ret = obtain_boardname(dd);
15053 snprintf(dd->boardversion, BOARD_VERS_MAX,
15054 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15055 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15058 (dd->revision >> CCE_REVISION_SW_SHIFT)
15059 & CCE_REVISION_SW_MASK);
15061 ret = set_up_context_variables(dd);
15065 /* set initial RXE CSRs */
15067 /* set initial TXE CSRs */
15069 /* set initial non-RXE, non-TXE CSRs */
15071 /* set up KDETH QP prefix in both RX and TX CSRs */
15074 ret = hfi1_dev_affinity_init(dd);
15078 /* send contexts must be set up before receive contexts */
15079 ret = init_send_contexts(dd);
15083 ret = hfi1_create_kctxts(dd);
15088 * Initialize aspm, to be done after gen3 transition and setting up
15089 * contexts and before enabling interrupts
15093 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
15095 * rcd[0] is guaranteed to be valid by this point. Also, all
15096 * context are using the same value, as per the module parameter.
15098 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
15100 ret = init_pervl_scs(dd);
15105 for (i = 0; i < dd->num_pports; ++i) {
15106 ret = sdma_init(dd, i);
15111 /* use contexts created by hfi1_create_kctxts */
15112 ret = set_up_interrupts(dd);
15116 /* set up LCB access - must be after set_up_interrupts() */
15117 init_lcb_access(dd);
15120 * Serial number is created from the base guid:
15121 * [27:24] = base guid [38:35]
15122 * [23: 0] = base guid [23: 0]
15124 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15125 (dd->base_guid & 0xFFFFFF) |
15126 ((dd->base_guid >> 11) & 0xF000000));
15128 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15129 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15130 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15132 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15134 goto bail_clear_intr;
15138 ret = init_cntrs(dd);
15140 goto bail_clear_intr;
15142 ret = init_rcverr(dd);
15144 goto bail_free_cntrs;
15146 init_completion(&dd->user_comp);
15148 /* The user refcount starts with one to inidicate an active device */
15149 atomic_set(&dd->user_refcount, 1);
15158 clean_up_interrupts(dd);
15160 hfi1_pcie_ddcleanup(dd);
15162 hfi1_free_devdata(dd);
15168 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15172 u32 current_egress_rate = ppd->current_egress_rate;
15173 /* rates here are in units of 10^6 bits/sec */
15175 if (desired_egress_rate == -1)
15176 return 0; /* shouldn't happen */
15178 if (desired_egress_rate >= current_egress_rate)
15179 return 0; /* we can't help go faster, only slower */
15181 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15182 egress_cycles(dw_len * 4, current_egress_rate);
15184 return (u16)delta_cycles;
15188 * create_pbc - build a pbc for transmission
15189 * @flags: special case flags or-ed in built pbc
15190 * @srate: static rate
15192 * @dwlen: dword length (header words + data words + pbc words)
15194 * Create a PBC with the given flags, rate, VL, and length.
15196 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15197 * for verbs, which does not use this PSM feature. The lone other caller
15198 * is for the diagnostic interface which calls this if the user does not
15199 * supply their own PBC.
15201 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15204 u64 pbc, delay = 0;
15206 if (unlikely(srate_mbs))
15207 delay = delay_cycles(ppd, srate_mbs, dw_len);
15210 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15211 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15212 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15213 | (dw_len & PBC_LENGTH_DWS_MASK)
15214 << PBC_LENGTH_DWS_SHIFT;
15219 #define SBUS_THERMAL 0x4f
15220 #define SBUS_THERM_MONITOR_MODE 0x1
15222 #define THERM_FAILURE(dev, ret, reason) \
15224 "Thermal sensor initialization failed: %s (%d)\n", \
15228 * Initialize the thermal sensor.
15230 * After initialization, enable polling of thermal sensor through
15231 * SBus interface. In order for this to work, the SBus Master
15232 * firmware has to be loaded due to the fact that the HW polling
15233 * logic uses SBus interrupts, which are not supported with
15234 * default firmware. Otherwise, no data will be returned through
15235 * the ASIC_STS_THERM CSR.
15237 static int thermal_init(struct hfi1_devdata *dd)
15241 if (dd->icode != ICODE_RTL_SILICON ||
15242 check_chip_resource(dd, CR_THERM_INIT, NULL))
15245 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15247 THERM_FAILURE(dd, ret, "Acquire SBus");
15251 dd_dev_info(dd, "Initializing thermal sensor\n");
15252 /* Disable polling of thermal readings */
15253 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15255 /* Thermal Sensor Initialization */
15256 /* Step 1: Reset the Thermal SBus Receiver */
15257 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15258 RESET_SBUS_RECEIVER, 0);
15260 THERM_FAILURE(dd, ret, "Bus Reset");
15263 /* Step 2: Set Reset bit in Thermal block */
15264 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15265 WRITE_SBUS_RECEIVER, 0x1);
15267 THERM_FAILURE(dd, ret, "Therm Block Reset");
15270 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15271 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15272 WRITE_SBUS_RECEIVER, 0x32);
15274 THERM_FAILURE(dd, ret, "Write Clock Div");
15277 /* Step 4: Select temperature mode */
15278 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15279 WRITE_SBUS_RECEIVER,
15280 SBUS_THERM_MONITOR_MODE);
15282 THERM_FAILURE(dd, ret, "Write Mode Sel");
15285 /* Step 5: De-assert block reset and start conversion */
15286 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15287 WRITE_SBUS_RECEIVER, 0x2);
15289 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15292 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15295 /* Enable polling of thermal readings */
15296 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15298 /* Set initialized flag */
15299 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15301 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15304 release_chip_resource(dd, CR_SBUS);
15308 static void handle_temp_err(struct hfi1_devdata *dd)
15310 struct hfi1_pportdata *ppd = &dd->pport[0];
15312 * Thermal Critical Interrupt
15313 * Put the device into forced freeze mode, take link down to
15314 * offline, and put DC into reset.
15317 "Critical temperature reached! Forcing device into freeze mode!\n");
15318 dd->flags |= HFI1_FORCED_FREEZE;
15319 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15321 * Shut DC down as much and as quickly as possible.
15323 * Step 1: Take the link down to OFFLINE. This will cause the
15324 * 8051 to put the Serdes in reset. However, we don't want to
15325 * go through the entire link state machine since we want to
15326 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15327 * but rather an attempt to save the chip.
15328 * Code below is almost the same as quiet_serdes() but avoids
15329 * all the extra work and the sleeps.
15331 ppd->driver_link_ready = 0;
15332 ppd->link_enabled = 0;
15333 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15336 * Step 2: Shutdown LCB and 8051
15337 * After shutdown, do not restore DC_CFG_RESET value.