2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains all of the code that is specific to the HFI chip
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
69 #define NUM_IB_PORTS 1
72 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
75 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76 module_param(num_vls, uint, S_IRUGO);
77 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
86 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87 module_param(rcv_intr_timeout, uint, S_IRUGO);
88 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
90 uint rcv_intr_count = 16; /* same as qib */
91 module_param(rcv_intr_count, uint, S_IRUGO);
92 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
94 ushort link_crc_mask = SUPPORTED_CRCS;
95 module_param(link_crc_mask, ushort, S_IRUGO);
96 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
99 module_param_named(loopback, loopback, uint, S_IRUGO);
100 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
102 /* Other driver tunables */
103 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104 static ushort crc_14b_sideband = 1;
105 static uint use_flr = 1;
106 uint quick_linkup; /* skip LNI */
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
116 /* str must be a string constant */
117 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
120 /* Send Error Consequences */
121 #define SEC_WRITE_DROPPED 0x1
122 #define SEC_PACKET_DROPPED 0x2
123 #define SEC_SC_HALTED 0x4 /* per-context only */
124 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
126 #define DEFAULT_KRCVQS 2
127 #define MIN_KERNEL_KCTXTS 2
128 #define FIRST_KERNEL_KCTXT 1
131 * RSM instance allocation
133 * 1 - User Fecn Handling
136 #define RSM_INS_VERBS 0
137 #define RSM_INS_FECN 1
138 #define RSM_INS_VNIC 2
140 /* Bit offset into the GUID which carries HFI id information */
141 #define GUID_HFI_INDEX_SHIFT 39
143 /* extract the emulation revision */
144 #define emulator_rev(dd) ((dd)->irev >> 8)
145 /* parallel and serial emulation versions are 3 and 4 respectively */
146 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
147 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
149 /* RSM fields for Verbs */
151 #define IB_PACKET_TYPE 2ull
152 #define QW_SHIFT 6ull
154 #define QPN_WIDTH 7ull
156 /* LRH.BTH: QW 0, OFFSET 48 - for match */
157 #define LRH_BTH_QW 0ull
158 #define LRH_BTH_BIT_OFFSET 48ull
159 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
160 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
161 #define LRH_BTH_SELECT
162 #define LRH_BTH_MASK 3ull
163 #define LRH_BTH_VALUE 2ull
165 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
166 #define LRH_SC_QW 0ull
167 #define LRH_SC_BIT_OFFSET 56ull
168 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
169 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
170 #define LRH_SC_MASK 128ull
171 #define LRH_SC_VALUE 0ull
173 /* SC[n..0] QW 0, OFFSET 60 - for select */
174 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
176 /* QPN[m+n:1] QW 1, OFFSET 1 */
177 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
179 /* RSM fields for Vnic */
180 /* L2_TYPE: QW 0, OFFSET 61 - for match */
181 #define L2_TYPE_QW 0ull
182 #define L2_TYPE_BIT_OFFSET 61ull
183 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
184 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
185 #define L2_TYPE_MASK 3ull
186 #define L2_16B_VALUE 2ull
188 /* L4_TYPE QW 1, OFFSET 0 - for match */
189 #define L4_TYPE_QW 1ull
190 #define L4_TYPE_BIT_OFFSET 0ull
191 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
192 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
193 #define L4_16B_TYPE_MASK 0xFFull
194 #define L4_16B_ETH_VALUE 0x78ull
196 /* 16B VESWID - for select */
197 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
198 /* 16B ENTROPY - for select */
199 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
201 /* defines to build power on SC2VL table */
213 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
214 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
215 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
216 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
217 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
218 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
219 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
220 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
223 #define DC_SC_VL_VAL( \
242 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
243 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
244 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
245 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
246 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
247 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
248 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
249 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
250 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
251 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
252 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
253 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
254 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
255 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
256 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
257 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
260 /* all CceStatus sub-block freeze bits */
261 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
262 | CCE_STATUS_RXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_FROZE_SMASK \
264 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
265 /* all CceStatus sub-block TXE pause bits */
266 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
267 | CCE_STATUS_TXE_PAUSED_SMASK \
268 | CCE_STATUS_SDMA_PAUSED_SMASK)
269 /* all CceStatus sub-block RXE pause bits */
270 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
272 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
273 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
278 static struct flag_table cce_err_status_flags[] = {
279 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
280 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
281 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
282 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
283 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
284 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
285 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
286 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
287 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
288 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
289 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
290 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
291 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
292 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
293 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
294 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
295 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
296 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
297 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
298 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
299 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
300 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
301 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
302 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
303 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
304 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
305 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
306 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
307 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
308 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
309 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
310 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
311 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
312 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
313 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
314 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
315 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
316 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
317 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
318 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
319 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
320 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
321 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
322 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
323 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
324 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
325 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
326 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
327 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
328 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
329 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
330 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
331 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
332 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
333 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
334 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
335 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
336 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
337 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
338 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
339 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
340 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
341 /*31*/ FLAG_ENTRY0("LATriggered",
342 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
343 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
344 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
345 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
346 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
347 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
348 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
349 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
350 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
351 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
352 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
353 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
354 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
355 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
356 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
357 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
358 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
359 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
360 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
367 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
368 static struct flag_table misc_err_status_flags[] = {
369 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
370 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
371 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
372 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
373 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
374 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
375 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
376 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
377 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
378 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
379 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
380 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
381 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
385 * TXE PIO Error flags and consequences
387 static struct flag_table pio_err_status_flags[] = {
388 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
390 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
391 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
393 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
394 /* 2*/ FLAG_ENTRY("PioCsrParity",
396 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
397 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
399 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
400 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
402 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
403 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
405 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
406 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
408 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
409 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
411 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
412 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
414 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
415 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
417 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
418 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
420 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
421 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
423 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
424 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
426 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
427 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
429 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
430 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
432 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
433 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
435 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
436 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
438 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
439 /*17*/ FLAG_ENTRY("PioInitSmIn",
441 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
442 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
444 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
445 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
447 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
448 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
450 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
451 /*21*/ FLAG_ENTRY("PioWriteDataParity",
453 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
454 /*22*/ FLAG_ENTRY("PioStateMachine",
456 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
457 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
458 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
459 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
460 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
461 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
462 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
463 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
465 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
466 /*26*/ FLAG_ENTRY("PioVlfSopParity",
468 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
469 /*27*/ FLAG_ENTRY("PioVlFifoParity",
471 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
472 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
474 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
475 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
477 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
479 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
481 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
482 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
484 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
485 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
487 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
488 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
490 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
494 /* TXE PIO errors that cause an SPC freeze */
495 #define ALL_PIO_FREEZE_ERR \
496 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
524 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
527 * TXE SDMA Error flags
529 static struct flag_table sdma_err_status_flags[] = {
530 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
531 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
532 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
533 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
534 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
535 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
536 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
537 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
541 /* TXE SDMA errors that cause an SPC freeze */
542 #define ALL_SDMA_FREEZE_ERR \
543 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
545 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
547 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
548 #define PORT_DISCARD_EGRESS_ERRS \
549 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
551 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
554 * TXE Egress Error flags
556 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
557 static struct flag_table egress_err_status_flags[] = {
558 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
559 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
561 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
562 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
563 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
564 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
566 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
567 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
568 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
569 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
571 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
572 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
573 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
574 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
575 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
576 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
577 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
578 SEES(TX_SDMA0_DISALLOWED_PACKET)),
579 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
580 SEES(TX_SDMA1_DISALLOWED_PACKET)),
581 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
582 SEES(TX_SDMA2_DISALLOWED_PACKET)),
583 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
584 SEES(TX_SDMA3_DISALLOWED_PACKET)),
585 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
586 SEES(TX_SDMA4_DISALLOWED_PACKET)),
587 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
588 SEES(TX_SDMA5_DISALLOWED_PACKET)),
589 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
590 SEES(TX_SDMA6_DISALLOWED_PACKET)),
591 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
592 SEES(TX_SDMA7_DISALLOWED_PACKET)),
593 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
594 SEES(TX_SDMA8_DISALLOWED_PACKET)),
595 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
596 SEES(TX_SDMA9_DISALLOWED_PACKET)),
597 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
598 SEES(TX_SDMA10_DISALLOWED_PACKET)),
599 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
600 SEES(TX_SDMA11_DISALLOWED_PACKET)),
601 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
602 SEES(TX_SDMA12_DISALLOWED_PACKET)),
603 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
604 SEES(TX_SDMA13_DISALLOWED_PACKET)),
605 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
606 SEES(TX_SDMA14_DISALLOWED_PACKET)),
607 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
608 SEES(TX_SDMA15_DISALLOWED_PACKET)),
609 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
610 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
611 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
612 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
613 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
614 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
615 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
616 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
617 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
618 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
619 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
620 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
621 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
622 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
623 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
624 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
625 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
626 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
627 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
628 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
629 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
630 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
631 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
632 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
633 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
634 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
635 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
636 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
637 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
638 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
639 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
640 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
641 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
642 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
643 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
644 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
645 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
646 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
647 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
648 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
649 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
650 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
651 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
655 * TXE Egress Error Info flags
657 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
658 static struct flag_table egress_err_info_flags[] = {
659 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
660 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
661 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
664 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
665 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
666 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
667 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
668 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
669 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
670 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
671 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
672 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
673 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
674 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
675 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
676 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
677 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
678 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
679 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
680 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
683 /* TXE Egress errors that cause an SPC freeze */
684 #define ALL_TXE_EGRESS_FREEZE_ERR \
685 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
686 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
688 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
689 | SEES(TX_LAUNCH_CSR_PARITY) \
690 | SEES(TX_SBRD_CTL_CSR_PARITY) \
691 | SEES(TX_CONFIG_PARITY) \
692 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
700 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
701 | SEES(TX_CREDIT_RETURN_PARITY))
704 * TXE Send error flags
706 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
707 static struct flag_table send_err_status_flags[] = {
708 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
709 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
710 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
714 * TXE Send Context Error flags and consequences
716 static struct flag_table sc_err_status_flags[] = {
717 /* 0*/ FLAG_ENTRY("InconsistentSop",
718 SEC_PACKET_DROPPED | SEC_SC_HALTED,
719 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
720 /* 1*/ FLAG_ENTRY("DisallowedPacket",
721 SEC_PACKET_DROPPED | SEC_SC_HALTED,
722 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
723 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
724 SEC_WRITE_DROPPED | SEC_SC_HALTED,
725 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
726 /* 3*/ FLAG_ENTRY("WriteOverflow",
727 SEC_WRITE_DROPPED | SEC_SC_HALTED,
728 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
729 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
730 SEC_WRITE_DROPPED | SEC_SC_HALTED,
731 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
736 * RXE Receive Error flags
738 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
739 static struct flag_table rxe_err_status_flags[] = {
740 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
741 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
742 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
743 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
744 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
745 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
746 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
747 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
748 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
749 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
750 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
751 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
752 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
753 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
754 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
755 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
756 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
757 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
758 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
759 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
760 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
761 RXES(RBUF_BLOCK_LIST_READ_UNC)),
762 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
763 RXES(RBUF_BLOCK_LIST_READ_COR)),
764 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
765 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
766 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
767 RXES(RBUF_CSR_QENT_CNT_PARITY)),
768 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
769 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
770 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
771 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
772 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
773 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
774 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
775 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
776 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
777 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
778 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
779 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
780 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
781 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
782 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
783 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
784 RXES(RBUF_FL_INITDONE_PARITY)),
785 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
786 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
787 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
788 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
789 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
790 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
791 RXES(LOOKUP_DES_PART1_UNC_COR)),
792 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
793 RXES(LOOKUP_DES_PART2_PARITY)),
794 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
795 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
796 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
797 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
798 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
799 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
800 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
801 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
802 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
803 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
804 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
805 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
806 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
807 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
808 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
809 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
810 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
811 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
812 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
813 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
814 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
815 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
818 /* RXE errors that will trigger an SPC freeze */
819 #define ALL_RXE_FREEZE_ERR \
820 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
863 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
865 #define RXE_FREEZE_ABORT_MASK \
866 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
868 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
873 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
874 static struct flag_table dcc_err_flags[] = {
875 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
876 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
877 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
878 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
879 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
880 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
881 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
882 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
883 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
884 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
885 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
886 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
887 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
888 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
889 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
890 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
891 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
892 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
893 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
894 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
895 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
896 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
897 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
898 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
899 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
900 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
901 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
902 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
903 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
904 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
905 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
906 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
907 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
908 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
909 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
910 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
911 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
912 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
913 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
914 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
915 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
916 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
917 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
918 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
919 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
920 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
926 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
927 static struct flag_table lcb_err_flags[] = {
928 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
929 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
930 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
931 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
932 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
933 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
934 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
935 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
936 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
937 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
938 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
939 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
940 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
941 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
942 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
943 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
944 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
945 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
946 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
947 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
948 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
949 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
950 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
951 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
952 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
953 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
954 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
955 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
956 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
957 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
958 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
959 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
960 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
961 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
962 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
963 LCBE(REDUNDANT_FLIT_PARITY_ERR))
969 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
970 static struct flag_table dc8051_err_flags[] = {
971 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
972 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
973 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
974 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
975 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
976 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
977 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
978 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
979 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
980 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
981 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
985 * DC8051 Information Error flags
987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
989 static struct flag_table dc8051_info_err_flags[] = {
990 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
991 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
992 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
993 FLAG_ENTRY0("Serdes internal loopback failure",
994 FAILED_SERDES_INTERNAL_LOOPBACK),
995 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
996 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
997 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
998 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
999 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1001 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1002 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
1003 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1004 FLAG_ENTRY0("External Device Request Timeout",
1005 EXTERNAL_DEVICE_REQ_TIMEOUT),
1009 * DC8051 Information Host Information flags
1011 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1013 static struct flag_table dc8051_info_host_msg_flags[] = {
1014 FLAG_ENTRY0("Host request done", 0x0001),
1015 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1016 FLAG_ENTRY0("BC SMA message", 0x0004),
1017 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019 FLAG_ENTRY0("External device config request", 0x0020),
1020 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022 FLAG_ENTRY0("Link going down", 0x0100),
1023 FLAG_ENTRY0("Link width downgraded", 0x0200),
1026 static u32 encoded_size(u32 size);
1027 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1028 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1029 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1031 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1032 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1033 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1034 u8 *remote_tx_rate, u16 *link_widths);
1035 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1036 u8 *flag_bits, u16 *link_widths);
1037 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1039 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1040 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042 u8 *tx_polarity_inversion,
1043 u8 *rx_polarity_inversion, u8 *max_rate);
1044 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045 unsigned int context, u64 err_status);
1046 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047 static void handle_dcc_err(struct hfi1_devdata *dd,
1048 unsigned int context, u64 err_status);
1049 static void handle_lcb_err(struct hfi1_devdata *dd,
1050 unsigned int context, u64 err_status);
1051 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1059 static void set_partition_keys(struct hfi1_pportdata *ppd);
1060 static const char *link_state_name(u32 state);
1061 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1063 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1065 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066 static int thermal_init(struct hfi1_devdata *dd);
1068 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1072 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1073 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1074 static void handle_temp_err(struct hfi1_devdata *dd);
1075 static void dc_shutdown(struct hfi1_devdata *dd);
1076 static void dc_start(struct hfi1_devdata *dd);
1077 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1079 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1080 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1081 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1084 * Error interrupt table entry. This is used as input to the interrupt
1085 * "clear down" routine used for all second tier error interrupt register.
1086 * Second tier interrupt registers have a single bit representing them
1087 * in the top-level CceIntStatus.
1089 struct err_reg_info {
1090 u32 status; /* status CSR offset */
1091 u32 clear; /* clear CSR offset */
1092 u32 mask; /* mask CSR offset */
1093 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1097 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1098 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1099 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1102 * Helpers for building HFI and DC error interrupt table entries. Different
1103 * helpers are needed because of inconsistent register names.
1105 #define EE(reg, handler, desc) \
1106 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1108 #define DC_EE1(reg, handler, desc) \
1109 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1110 #define DC_EE2(reg, handler, desc) \
1111 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1114 * Table of the "misc" grouping of error interrupts. Each entry refers to
1115 * another register containing more information.
1117 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1118 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1119 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1120 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1121 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1122 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1123 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1124 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1125 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1126 /* the rest are reserved */
1130 * Index into the Various section of the interrupt sources
1131 * corresponding to the Critical Temperature interrupt.
1133 #define TCRIT_INT_SOURCE 4
1136 * SDMA error interrupt entry - refers to another register containing more
1139 static const struct err_reg_info sdma_eng_err =
1140 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1142 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1143 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1144 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1145 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1146 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1147 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1148 /* rest are reserved */
1152 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1153 * register can not be derived from the MTU value because 10K is not
1154 * a power of 2. Therefore, we need a constant. Everything else can
1157 #define DCC_CFG_PORT_MTU_CAP_10240 7
1160 * Table of the DC grouping of error interrupts. Each entry refers to
1161 * another register containing more information.
1163 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1164 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1165 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1166 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1167 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1168 /* the rest are reserved */
1178 * csr to read for name (if applicable)
1183 * offset into dd or ppd to store the counter's value
1193 * accessor for stat element, context either dd or ppd
1195 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1196 int mode, u64 data);
1199 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1200 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1202 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1212 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1214 (counter * 8 + RCV_COUNTER_ARRAY32), \
1215 0, flags | CNTR_32BIT, \
1216 port_access_u32_csr)
1218 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1220 (counter * 8 + RCV_COUNTER_ARRAY32), \
1221 0, flags | CNTR_32BIT, \
1225 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1227 (counter * 8 + RCV_COUNTER_ARRAY64), \
1229 port_access_u64_csr)
1231 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1233 (counter * 8 + RCV_COUNTER_ARRAY64), \
1237 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1238 #define OVR_ELM(ctx) \
1239 CNTR_ELEM("RcvHdrOvr" #ctx, \
1240 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1241 0, CNTR_NORMAL, port_access_u64_csr)
1244 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1246 (counter * 8 + SEND_COUNTER_ARRAY32), \
1247 0, flags | CNTR_32BIT, \
1248 port_access_u32_csr)
1251 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1253 (counter * 8 + SEND_COUNTER_ARRAY64), \
1255 port_access_u64_csr)
1257 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1259 counter * 8 + SEND_COUNTER_ARRAY64, \
1265 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1267 (counter * 8 + CCE_COUNTER_ARRAY32), \
1268 0, flags | CNTR_32BIT, \
1271 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1273 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1274 0, flags | CNTR_32BIT, \
1278 #define DC_PERF_CNTR(name, counter, flags) \
1285 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1293 #define SW_IBP_CNTR(name, cntr) \
1300 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1302 if (dd->flags & HFI1_PRESENT) {
1303 return readq((void __iomem *)dd->kregbase + offset);
1308 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1310 if (dd->flags & HFI1_PRESENT)
1311 writeq(value, (void __iomem *)dd->kregbase + offset);
1314 void __iomem *get_csr_addr(
1315 struct hfi1_devdata *dd,
1318 return (void __iomem *)dd->kregbase + offset;
1321 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1322 int mode, u64 value)
1326 if (mode == CNTR_MODE_R) {
1327 ret = read_csr(dd, csr);
1328 } else if (mode == CNTR_MODE_W) {
1329 write_csr(dd, csr, value);
1332 dd_dev_err(dd, "Invalid cntr register access mode");
1336 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1341 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1342 void *context, int vl, int mode, u64 data)
1344 struct hfi1_devdata *dd = context;
1345 u64 csr = entry->csr;
1347 if (entry->flags & CNTR_SDMA) {
1348 if (vl == CNTR_INVALID_VL)
1352 if (vl != CNTR_INVALID_VL)
1355 return read_write_csr(dd, csr, mode, data);
1358 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1359 void *context, int idx, int mode, u64 data)
1361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1363 if (dd->per_sdma && idx < dd->num_sdma)
1364 return dd->per_sdma[idx].err_cnt;
1368 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1369 void *context, int idx, int mode, u64 data)
1371 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1373 if (dd->per_sdma && idx < dd->num_sdma)
1374 return dd->per_sdma[idx].sdma_int_cnt;
1378 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1379 void *context, int idx, int mode, u64 data)
1381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1383 if (dd->per_sdma && idx < dd->num_sdma)
1384 return dd->per_sdma[idx].idle_int_cnt;
1388 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1389 void *context, int idx, int mode,
1392 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1394 if (dd->per_sdma && idx < dd->num_sdma)
1395 return dd->per_sdma[idx].progress_int_cnt;
1399 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1400 int vl, int mode, u64 data)
1402 struct hfi1_devdata *dd = context;
1405 u64 csr = entry->csr;
1407 if (entry->flags & CNTR_VL) {
1408 if (vl == CNTR_INVALID_VL)
1412 if (vl != CNTR_INVALID_VL)
1416 val = read_write_csr(dd, csr, mode, data);
1420 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1421 int vl, int mode, u64 data)
1423 struct hfi1_devdata *dd = context;
1424 u32 csr = entry->csr;
1427 if (vl != CNTR_INVALID_VL)
1429 if (mode == CNTR_MODE_R)
1430 ret = read_lcb_csr(dd, csr, &data);
1431 else if (mode == CNTR_MODE_W)
1432 ret = write_lcb_csr(dd, csr, data);
1435 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1439 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1444 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1445 int vl, int mode, u64 data)
1447 struct hfi1_pportdata *ppd = context;
1449 if (vl != CNTR_INVALID_VL)
1451 return read_write_csr(ppd->dd, entry->csr, mode, data);
1454 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1455 void *context, int vl, int mode, u64 data)
1457 struct hfi1_pportdata *ppd = context;
1459 u64 csr = entry->csr;
1461 if (entry->flags & CNTR_VL) {
1462 if (vl == CNTR_INVALID_VL)
1466 if (vl != CNTR_INVALID_VL)
1469 val = read_write_csr(ppd->dd, csr, mode, data);
1473 /* Software defined */
1474 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1479 if (mode == CNTR_MODE_R) {
1481 } else if (mode == CNTR_MODE_W) {
1485 dd_dev_err(dd, "Invalid cntr sw access mode");
1489 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1494 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1495 int vl, int mode, u64 data)
1497 struct hfi1_pportdata *ppd = context;
1499 if (vl != CNTR_INVALID_VL)
1501 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1504 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1505 int vl, int mode, u64 data)
1507 struct hfi1_pportdata *ppd = context;
1509 if (vl != CNTR_INVALID_VL)
1511 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1514 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1515 void *context, int vl, int mode,
1518 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1520 if (vl != CNTR_INVALID_VL)
1522 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1525 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1526 void *context, int vl, int mode, u64 data)
1528 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1532 if (vl == CNTR_INVALID_VL)
1533 counter = &ppd->port_xmit_discards;
1534 else if (vl >= 0 && vl < C_VL_COUNT)
1535 counter = &ppd->port_xmit_discards_vl[vl];
1539 return read_write_sw(ppd->dd, counter, mode, data);
1542 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1543 void *context, int vl, int mode,
1546 struct hfi1_pportdata *ppd = context;
1548 if (vl != CNTR_INVALID_VL)
1551 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1555 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1556 void *context, int vl, int mode, u64 data)
1558 struct hfi1_pportdata *ppd = context;
1560 if (vl != CNTR_INVALID_VL)
1563 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1567 u64 get_all_cpu_total(u64 __percpu *cntr)
1572 for_each_possible_cpu(cpu)
1573 counter += *per_cpu_ptr(cntr, cpu);
1577 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1579 int vl, int mode, u64 data)
1583 if (vl != CNTR_INVALID_VL)
1586 if (mode == CNTR_MODE_R) {
1587 ret = get_all_cpu_total(cntr) - *z_val;
1588 } else if (mode == CNTR_MODE_W) {
1589 /* A write can only zero the counter */
1591 *z_val = get_all_cpu_total(cntr);
1593 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1595 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1602 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1603 void *context, int vl, int mode, u64 data)
1605 struct hfi1_devdata *dd = context;
1607 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1611 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1612 void *context, int vl, int mode, u64 data)
1614 struct hfi1_devdata *dd = context;
1616 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1620 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1621 void *context, int vl, int mode, u64 data)
1623 struct hfi1_devdata *dd = context;
1625 return dd->verbs_dev.n_piowait;
1628 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1629 void *context, int vl, int mode, u64 data)
1631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1633 return dd->verbs_dev.n_piodrain;
1636 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1637 void *context, int vl, int mode, u64 data)
1639 struct hfi1_devdata *dd = context;
1641 return dd->verbs_dev.n_txwait;
1644 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1645 void *context, int vl, int mode, u64 data)
1647 struct hfi1_devdata *dd = context;
1649 return dd->verbs_dev.n_kmem_wait;
1652 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1653 void *context, int vl, int mode, u64 data)
1655 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1657 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1661 /* Software counters for the error status bits within MISC_ERR_STATUS */
1662 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1663 void *context, int vl, int mode,
1666 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1668 return dd->misc_err_status_cnt[12];
1671 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1672 void *context, int vl, int mode,
1675 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1677 return dd->misc_err_status_cnt[11];
1680 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1681 void *context, int vl, int mode,
1684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1686 return dd->misc_err_status_cnt[10];
1689 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1690 void *context, int vl,
1693 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1695 return dd->misc_err_status_cnt[9];
1698 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1699 void *context, int vl, int mode,
1702 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1704 return dd->misc_err_status_cnt[8];
1707 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1708 const struct cntr_entry *entry,
1709 void *context, int vl, int mode, u64 data)
1711 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1713 return dd->misc_err_status_cnt[7];
1716 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1717 void *context, int vl,
1720 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1722 return dd->misc_err_status_cnt[6];
1725 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1726 void *context, int vl, int mode,
1729 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1731 return dd->misc_err_status_cnt[5];
1734 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1735 void *context, int vl, int mode,
1738 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1740 return dd->misc_err_status_cnt[4];
1743 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1744 void *context, int vl,
1747 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1749 return dd->misc_err_status_cnt[3];
1752 static u64 access_misc_csr_write_bad_addr_err_cnt(
1753 const struct cntr_entry *entry,
1754 void *context, int vl, int mode, u64 data)
1756 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1758 return dd->misc_err_status_cnt[2];
1761 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1762 void *context, int vl,
1765 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1767 return dd->misc_err_status_cnt[1];
1770 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1771 void *context, int vl, int mode,
1774 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1776 return dd->misc_err_status_cnt[0];
1780 * Software counter for the aggregate of
1781 * individual CceErrStatus counters
1783 static u64 access_sw_cce_err_status_aggregated_cnt(
1784 const struct cntr_entry *entry,
1785 void *context, int vl, int mode, u64 data)
1787 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1789 return dd->sw_cce_err_status_aggregate;
1793 * Software counters corresponding to each of the
1794 * error status bits within CceErrStatus
1796 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1797 void *context, int vl, int mode,
1800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1802 return dd->cce_err_status_cnt[40];
1805 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1806 void *context, int vl, int mode,
1809 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1811 return dd->cce_err_status_cnt[39];
1814 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1815 void *context, int vl, int mode,
1818 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1820 return dd->cce_err_status_cnt[38];
1823 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1824 void *context, int vl, int mode,
1827 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1829 return dd->cce_err_status_cnt[37];
1832 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1833 void *context, int vl, int mode,
1836 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1838 return dd->cce_err_status_cnt[36];
1841 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1842 const struct cntr_entry *entry,
1843 void *context, int vl, int mode, u64 data)
1845 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1847 return dd->cce_err_status_cnt[35];
1850 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1851 const struct cntr_entry *entry,
1852 void *context, int vl, int mode, u64 data)
1854 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1856 return dd->cce_err_status_cnt[34];
1859 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1860 void *context, int vl,
1863 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1865 return dd->cce_err_status_cnt[33];
1868 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1869 void *context, int vl, int mode,
1872 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1874 return dd->cce_err_status_cnt[32];
1877 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1878 void *context, int vl, int mode, u64 data)
1880 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1882 return dd->cce_err_status_cnt[31];
1885 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1886 void *context, int vl, int mode,
1889 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1891 return dd->cce_err_status_cnt[30];
1894 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1895 void *context, int vl, int mode,
1898 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1900 return dd->cce_err_status_cnt[29];
1903 static u64 access_pcic_transmit_back_parity_err_cnt(
1904 const struct cntr_entry *entry,
1905 void *context, int vl, int mode, u64 data)
1907 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1909 return dd->cce_err_status_cnt[28];
1912 static u64 access_pcic_transmit_front_parity_err_cnt(
1913 const struct cntr_entry *entry,
1914 void *context, int vl, int mode, u64 data)
1916 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1918 return dd->cce_err_status_cnt[27];
1921 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1922 void *context, int vl, int mode,
1925 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1927 return dd->cce_err_status_cnt[26];
1930 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1931 void *context, int vl, int mode,
1934 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1936 return dd->cce_err_status_cnt[25];
1939 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1940 void *context, int vl, int mode,
1943 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1945 return dd->cce_err_status_cnt[24];
1948 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1949 void *context, int vl, int mode,
1952 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1954 return dd->cce_err_status_cnt[23];
1957 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1958 void *context, int vl,
1961 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1963 return dd->cce_err_status_cnt[22];
1966 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1967 void *context, int vl, int mode,
1970 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1972 return dd->cce_err_status_cnt[21];
1975 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1976 const struct cntr_entry *entry,
1977 void *context, int vl, int mode, u64 data)
1979 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1981 return dd->cce_err_status_cnt[20];
1984 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1985 void *context, int vl,
1988 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1990 return dd->cce_err_status_cnt[19];
1993 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1994 void *context, int vl, int mode,
1997 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1999 return dd->cce_err_status_cnt[18];
2002 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2003 void *context, int vl, int mode,
2006 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2008 return dd->cce_err_status_cnt[17];
2011 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2012 void *context, int vl, int mode,
2015 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2017 return dd->cce_err_status_cnt[16];
2020 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2021 void *context, int vl, int mode,
2024 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2026 return dd->cce_err_status_cnt[15];
2029 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2030 void *context, int vl,
2033 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2035 return dd->cce_err_status_cnt[14];
2038 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2039 void *context, int vl, int mode,
2042 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2044 return dd->cce_err_status_cnt[13];
2047 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2048 const struct cntr_entry *entry,
2049 void *context, int vl, int mode, u64 data)
2051 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2053 return dd->cce_err_status_cnt[12];
2056 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2057 const struct cntr_entry *entry,
2058 void *context, int vl, int mode, u64 data)
2060 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2062 return dd->cce_err_status_cnt[11];
2065 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2066 const struct cntr_entry *entry,
2067 void *context, int vl, int mode, u64 data)
2069 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2071 return dd->cce_err_status_cnt[10];
2074 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2075 const struct cntr_entry *entry,
2076 void *context, int vl, int mode, u64 data)
2078 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2080 return dd->cce_err_status_cnt[9];
2083 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2084 const struct cntr_entry *entry,
2085 void *context, int vl, int mode, u64 data)
2087 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2089 return dd->cce_err_status_cnt[8];
2092 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2093 void *context, int vl,
2096 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2098 return dd->cce_err_status_cnt[7];
2101 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2102 const struct cntr_entry *entry,
2103 void *context, int vl, int mode, u64 data)
2105 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2107 return dd->cce_err_status_cnt[6];
2110 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2111 void *context, int vl, int mode,
2114 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2116 return dd->cce_err_status_cnt[5];
2119 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2120 void *context, int vl, int mode,
2123 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2125 return dd->cce_err_status_cnt[4];
2128 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2129 const struct cntr_entry *entry,
2130 void *context, int vl, int mode, u64 data)
2132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2134 return dd->cce_err_status_cnt[3];
2137 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2138 void *context, int vl,
2141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2143 return dd->cce_err_status_cnt[2];
2146 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2147 void *context, int vl,
2150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2152 return dd->cce_err_status_cnt[1];
2155 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2156 void *context, int vl, int mode,
2159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2161 return dd->cce_err_status_cnt[0];
2165 * Software counters corresponding to each of the
2166 * error status bits within RcvErrStatus
2168 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2169 void *context, int vl, int mode,
2172 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2174 return dd->rcv_err_status_cnt[63];
2177 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2178 void *context, int vl,
2181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2183 return dd->rcv_err_status_cnt[62];
2186 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2187 void *context, int vl, int mode,
2190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2192 return dd->rcv_err_status_cnt[61];
2195 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2196 void *context, int vl, int mode,
2199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2201 return dd->rcv_err_status_cnt[60];
2204 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2205 void *context, int vl,
2208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2210 return dd->rcv_err_status_cnt[59];
2213 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2214 void *context, int vl,
2217 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2219 return dd->rcv_err_status_cnt[58];
2222 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2223 void *context, int vl, int mode,
2226 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2228 return dd->rcv_err_status_cnt[57];
2231 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2232 void *context, int vl, int mode,
2235 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2237 return dd->rcv_err_status_cnt[56];
2240 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2241 void *context, int vl, int mode,
2244 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2246 return dd->rcv_err_status_cnt[55];
2249 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2250 const struct cntr_entry *entry,
2251 void *context, int vl, int mode, u64 data)
2253 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2255 return dd->rcv_err_status_cnt[54];
2258 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2259 const struct cntr_entry *entry,
2260 void *context, int vl, int mode, u64 data)
2262 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2264 return dd->rcv_err_status_cnt[53];
2267 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2268 void *context, int vl,
2271 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2273 return dd->rcv_err_status_cnt[52];
2276 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2277 void *context, int vl,
2280 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2282 return dd->rcv_err_status_cnt[51];
2285 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2286 void *context, int vl,
2289 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2291 return dd->rcv_err_status_cnt[50];
2294 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2295 void *context, int vl,
2298 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2300 return dd->rcv_err_status_cnt[49];
2303 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2304 void *context, int vl,
2307 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2309 return dd->rcv_err_status_cnt[48];
2312 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2313 void *context, int vl,
2316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2318 return dd->rcv_err_status_cnt[47];
2321 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2322 void *context, int vl, int mode,
2325 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2327 return dd->rcv_err_status_cnt[46];
2330 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2331 const struct cntr_entry *entry,
2332 void *context, int vl, int mode, u64 data)
2334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2336 return dd->rcv_err_status_cnt[45];
2339 static u64 access_rx_lookup_csr_parity_err_cnt(
2340 const struct cntr_entry *entry,
2341 void *context, int vl, int mode, u64 data)
2343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2345 return dd->rcv_err_status_cnt[44];
2348 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2349 const struct cntr_entry *entry,
2350 void *context, int vl, int mode, u64 data)
2352 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2354 return dd->rcv_err_status_cnt[43];
2357 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2358 const struct cntr_entry *entry,
2359 void *context, int vl, int mode, u64 data)
2361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2363 return dd->rcv_err_status_cnt[42];
2366 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2367 const struct cntr_entry *entry,
2368 void *context, int vl, int mode, u64 data)
2370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2372 return dd->rcv_err_status_cnt[41];
2375 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2376 const struct cntr_entry *entry,
2377 void *context, int vl, int mode, u64 data)
2379 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2381 return dd->rcv_err_status_cnt[40];
2384 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2385 const struct cntr_entry *entry,
2386 void *context, int vl, int mode, u64 data)
2388 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2390 return dd->rcv_err_status_cnt[39];
2393 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2394 const struct cntr_entry *entry,
2395 void *context, int vl, int mode, u64 data)
2397 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2399 return dd->rcv_err_status_cnt[38];
2402 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2403 const struct cntr_entry *entry,
2404 void *context, int vl, int mode, u64 data)
2406 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2408 return dd->rcv_err_status_cnt[37];
2411 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2412 const struct cntr_entry *entry,
2413 void *context, int vl, int mode, u64 data)
2415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2417 return dd->rcv_err_status_cnt[36];
2420 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2421 const struct cntr_entry *entry,
2422 void *context, int vl, int mode, u64 data)
2424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2426 return dd->rcv_err_status_cnt[35];
2429 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2430 const struct cntr_entry *entry,
2431 void *context, int vl, int mode, u64 data)
2433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2435 return dd->rcv_err_status_cnt[34];
2438 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2439 const struct cntr_entry *entry,
2440 void *context, int vl, int mode, u64 data)
2442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2444 return dd->rcv_err_status_cnt[33];
2447 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2448 void *context, int vl, int mode,
2451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2453 return dd->rcv_err_status_cnt[32];
2456 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2457 void *context, int vl, int mode,
2460 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2462 return dd->rcv_err_status_cnt[31];
2465 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2466 void *context, int vl, int mode,
2469 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2471 return dd->rcv_err_status_cnt[30];
2474 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2475 void *context, int vl, int mode,
2478 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2480 return dd->rcv_err_status_cnt[29];
2483 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2484 void *context, int vl,
2487 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2489 return dd->rcv_err_status_cnt[28];
2492 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2493 const struct cntr_entry *entry,
2494 void *context, int vl, int mode, u64 data)
2496 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2498 return dd->rcv_err_status_cnt[27];
2501 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2502 const struct cntr_entry *entry,
2503 void *context, int vl, int mode, u64 data)
2505 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2507 return dd->rcv_err_status_cnt[26];
2510 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2511 const struct cntr_entry *entry,
2512 void *context, int vl, int mode, u64 data)
2514 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2516 return dd->rcv_err_status_cnt[25];
2519 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2520 const struct cntr_entry *entry,
2521 void *context, int vl, int mode, u64 data)
2523 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2525 return dd->rcv_err_status_cnt[24];
2528 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2529 const struct cntr_entry *entry,
2530 void *context, int vl, int mode, u64 data)
2532 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2534 return dd->rcv_err_status_cnt[23];
2537 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2538 const struct cntr_entry *entry,
2539 void *context, int vl, int mode, u64 data)
2541 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2543 return dd->rcv_err_status_cnt[22];
2546 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2547 const struct cntr_entry *entry,
2548 void *context, int vl, int mode, u64 data)
2550 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2552 return dd->rcv_err_status_cnt[21];
2555 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2556 const struct cntr_entry *entry,
2557 void *context, int vl, int mode, u64 data)
2559 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2561 return dd->rcv_err_status_cnt[20];
2564 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2565 const struct cntr_entry *entry,
2566 void *context, int vl, int mode, u64 data)
2568 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2570 return dd->rcv_err_status_cnt[19];
2573 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2574 void *context, int vl,
2577 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2579 return dd->rcv_err_status_cnt[18];
2582 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2583 void *context, int vl,
2586 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2588 return dd->rcv_err_status_cnt[17];
2591 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2592 const struct cntr_entry *entry,
2593 void *context, int vl, int mode, u64 data)
2595 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2597 return dd->rcv_err_status_cnt[16];
2600 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2601 const struct cntr_entry *entry,
2602 void *context, int vl, int mode, u64 data)
2604 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2606 return dd->rcv_err_status_cnt[15];
2609 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2610 void *context, int vl,
2613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2615 return dd->rcv_err_status_cnt[14];
2618 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2619 void *context, int vl,
2622 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2624 return dd->rcv_err_status_cnt[13];
2627 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2628 void *context, int vl, int mode,
2631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2633 return dd->rcv_err_status_cnt[12];
2636 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2637 void *context, int vl, int mode,
2640 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2642 return dd->rcv_err_status_cnt[11];
2645 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2646 void *context, int vl, int mode,
2649 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2651 return dd->rcv_err_status_cnt[10];
2654 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2655 void *context, int vl, int mode,
2658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2660 return dd->rcv_err_status_cnt[9];
2663 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2664 void *context, int vl, int mode,
2667 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2669 return dd->rcv_err_status_cnt[8];
2672 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2673 const struct cntr_entry *entry,
2674 void *context, int vl, int mode, u64 data)
2676 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2678 return dd->rcv_err_status_cnt[7];
2681 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2682 const struct cntr_entry *entry,
2683 void *context, int vl, int mode, u64 data)
2685 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2687 return dd->rcv_err_status_cnt[6];
2690 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2691 void *context, int vl, int mode,
2694 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2696 return dd->rcv_err_status_cnt[5];
2699 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2700 void *context, int vl, int mode,
2703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2705 return dd->rcv_err_status_cnt[4];
2708 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2709 void *context, int vl, int mode,
2712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2714 return dd->rcv_err_status_cnt[3];
2717 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2718 void *context, int vl, int mode,
2721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2723 return dd->rcv_err_status_cnt[2];
2726 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2727 void *context, int vl, int mode,
2730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2732 return dd->rcv_err_status_cnt[1];
2735 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2736 void *context, int vl, int mode,
2739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2741 return dd->rcv_err_status_cnt[0];
2745 * Software counters corresponding to each of the
2746 * error status bits within SendPioErrStatus
2748 static u64 access_pio_pec_sop_head_parity_err_cnt(
2749 const struct cntr_entry *entry,
2750 void *context, int vl, int mode, u64 data)
2752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2754 return dd->send_pio_err_status_cnt[35];
2757 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2758 const struct cntr_entry *entry,
2759 void *context, int vl, int mode, u64 data)
2761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2763 return dd->send_pio_err_status_cnt[34];
2766 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2767 const struct cntr_entry *entry,
2768 void *context, int vl, int mode, u64 data)
2770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2772 return dd->send_pio_err_status_cnt[33];
2775 static u64 access_pio_current_free_cnt_parity_err_cnt(
2776 const struct cntr_entry *entry,
2777 void *context, int vl, int mode, u64 data)
2779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2781 return dd->send_pio_err_status_cnt[32];
2784 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2785 void *context, int vl, int mode,
2788 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2790 return dd->send_pio_err_status_cnt[31];
2793 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2794 void *context, int vl, int mode,
2797 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2799 return dd->send_pio_err_status_cnt[30];
2802 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2803 void *context, int vl, int mode,
2806 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2808 return dd->send_pio_err_status_cnt[29];
2811 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2812 const struct cntr_entry *entry,
2813 void *context, int vl, int mode, u64 data)
2815 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2817 return dd->send_pio_err_status_cnt[28];
2820 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2821 void *context, int vl, int mode,
2824 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2826 return dd->send_pio_err_status_cnt[27];
2829 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2830 void *context, int vl, int mode,
2833 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2835 return dd->send_pio_err_status_cnt[26];
2838 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2839 void *context, int vl,
2842 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2844 return dd->send_pio_err_status_cnt[25];
2847 static u64 access_pio_block_qw_count_parity_err_cnt(
2848 const struct cntr_entry *entry,
2849 void *context, int vl, int mode, u64 data)
2851 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2853 return dd->send_pio_err_status_cnt[24];
2856 static u64 access_pio_write_qw_valid_parity_err_cnt(
2857 const struct cntr_entry *entry,
2858 void *context, int vl, int mode, u64 data)
2860 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2862 return dd->send_pio_err_status_cnt[23];
2865 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2866 void *context, int vl, int mode,
2869 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2871 return dd->send_pio_err_status_cnt[22];
2874 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2875 void *context, int vl,
2878 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2880 return dd->send_pio_err_status_cnt[21];
2883 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2884 void *context, int vl,
2887 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2889 return dd->send_pio_err_status_cnt[20];
2892 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2893 void *context, int vl,
2896 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2898 return dd->send_pio_err_status_cnt[19];
2901 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2902 const struct cntr_entry *entry,
2903 void *context, int vl, int mode, u64 data)
2905 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2907 return dd->send_pio_err_status_cnt[18];
2910 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2911 void *context, int vl, int mode,
2914 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2916 return dd->send_pio_err_status_cnt[17];
2919 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2920 void *context, int vl, int mode,
2923 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2925 return dd->send_pio_err_status_cnt[16];
2928 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2929 const struct cntr_entry *entry,
2930 void *context, int vl, int mode, u64 data)
2932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2934 return dd->send_pio_err_status_cnt[15];
2937 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2938 const struct cntr_entry *entry,
2939 void *context, int vl, int mode, u64 data)
2941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2943 return dd->send_pio_err_status_cnt[14];
2946 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2947 const struct cntr_entry *entry,
2948 void *context, int vl, int mode, u64 data)
2950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2952 return dd->send_pio_err_status_cnt[13];
2955 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2956 const struct cntr_entry *entry,
2957 void *context, int vl, int mode, u64 data)
2959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2961 return dd->send_pio_err_status_cnt[12];
2964 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2965 const struct cntr_entry *entry,
2966 void *context, int vl, int mode, u64 data)
2968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2970 return dd->send_pio_err_status_cnt[11];
2973 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2974 const struct cntr_entry *entry,
2975 void *context, int vl, int mode, u64 data)
2977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2979 return dd->send_pio_err_status_cnt[10];
2982 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2983 const struct cntr_entry *entry,
2984 void *context, int vl, int mode, u64 data)
2986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2988 return dd->send_pio_err_status_cnt[9];
2991 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2992 const struct cntr_entry *entry,
2993 void *context, int vl, int mode, u64 data)
2995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2997 return dd->send_pio_err_status_cnt[8];
3000 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3001 const struct cntr_entry *entry,
3002 void *context, int vl, int mode, u64 data)
3004 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3006 return dd->send_pio_err_status_cnt[7];
3009 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3010 void *context, int vl, int mode,
3013 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3015 return dd->send_pio_err_status_cnt[6];
3018 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3019 void *context, int vl, int mode,
3022 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3024 return dd->send_pio_err_status_cnt[5];
3027 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3028 void *context, int vl, int mode,
3031 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3033 return dd->send_pio_err_status_cnt[4];
3036 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3037 void *context, int vl, int mode,
3040 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3042 return dd->send_pio_err_status_cnt[3];
3045 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3046 void *context, int vl, int mode,
3049 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3051 return dd->send_pio_err_status_cnt[2];
3054 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3055 void *context, int vl,
3058 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3060 return dd->send_pio_err_status_cnt[1];
3063 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3064 void *context, int vl, int mode,
3067 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3069 return dd->send_pio_err_status_cnt[0];
3073 * Software counters corresponding to each of the
3074 * error status bits within SendDmaErrStatus
3076 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3077 const struct cntr_entry *entry,
3078 void *context, int vl, int mode, u64 data)
3080 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3082 return dd->send_dma_err_status_cnt[3];
3085 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3086 const struct cntr_entry *entry,
3087 void *context, int vl, int mode, u64 data)
3089 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3091 return dd->send_dma_err_status_cnt[2];
3094 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3095 void *context, int vl, int mode,
3098 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3100 return dd->send_dma_err_status_cnt[1];
3103 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3104 void *context, int vl, int mode,
3107 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3109 return dd->send_dma_err_status_cnt[0];
3113 * Software counters corresponding to each of the
3114 * error status bits within SendEgressErrStatus
3116 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3117 const struct cntr_entry *entry,
3118 void *context, int vl, int mode, u64 data)
3120 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3122 return dd->send_egress_err_status_cnt[63];
3125 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3126 const struct cntr_entry *entry,
3127 void *context, int vl, int mode, u64 data)
3129 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3131 return dd->send_egress_err_status_cnt[62];
3134 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3135 void *context, int vl, int mode,
3138 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3140 return dd->send_egress_err_status_cnt[61];
3143 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3144 void *context, int vl,
3147 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3149 return dd->send_egress_err_status_cnt[60];
3152 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3153 const struct cntr_entry *entry,
3154 void *context, int vl, int mode, u64 data)
3156 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3158 return dd->send_egress_err_status_cnt[59];
3161 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3162 void *context, int vl, int mode,
3165 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3167 return dd->send_egress_err_status_cnt[58];
3170 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3171 void *context, int vl, int mode,
3174 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3176 return dd->send_egress_err_status_cnt[57];
3179 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3180 void *context, int vl, int mode,
3183 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3185 return dd->send_egress_err_status_cnt[56];
3188 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3189 void *context, int vl, int mode,
3192 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3194 return dd->send_egress_err_status_cnt[55];
3197 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3198 void *context, int vl, int mode,
3201 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3203 return dd->send_egress_err_status_cnt[54];
3206 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3207 void *context, int vl, int mode,
3210 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3212 return dd->send_egress_err_status_cnt[53];
3215 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3216 void *context, int vl, int mode,
3219 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3221 return dd->send_egress_err_status_cnt[52];
3224 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3225 void *context, int vl, int mode,
3228 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3230 return dd->send_egress_err_status_cnt[51];
3233 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3234 void *context, int vl, int mode,
3237 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3239 return dd->send_egress_err_status_cnt[50];
3242 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3243 void *context, int vl, int mode,
3246 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3248 return dd->send_egress_err_status_cnt[49];
3251 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3252 void *context, int vl, int mode,
3255 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3257 return dd->send_egress_err_status_cnt[48];
3260 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3261 void *context, int vl, int mode,
3264 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3266 return dd->send_egress_err_status_cnt[47];
3269 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3270 void *context, int vl, int mode,
3273 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3275 return dd->send_egress_err_status_cnt[46];
3278 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3279 void *context, int vl, int mode,
3282 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3284 return dd->send_egress_err_status_cnt[45];
3287 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3288 void *context, int vl,
3291 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3293 return dd->send_egress_err_status_cnt[44];
3296 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3297 const struct cntr_entry *entry,
3298 void *context, int vl, int mode, u64 data)
3300 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3302 return dd->send_egress_err_status_cnt[43];
3305 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3306 void *context, int vl, int mode,
3309 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3311 return dd->send_egress_err_status_cnt[42];
3314 static u64 access_tx_credit_return_partiy_err_cnt(
3315 const struct cntr_entry *entry,
3316 void *context, int vl, int mode, u64 data)
3318 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3320 return dd->send_egress_err_status_cnt[41];
3323 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3324 const struct cntr_entry *entry,
3325 void *context, int vl, int mode, u64 data)
3327 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3329 return dd->send_egress_err_status_cnt[40];
3332 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3333 const struct cntr_entry *entry,
3334 void *context, int vl, int mode, u64 data)
3336 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3338 return dd->send_egress_err_status_cnt[39];
3341 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3342 const struct cntr_entry *entry,
3343 void *context, int vl, int mode, u64 data)
3345 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3347 return dd->send_egress_err_status_cnt[38];
3350 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3351 const struct cntr_entry *entry,
3352 void *context, int vl, int mode, u64 data)
3354 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3356 return dd->send_egress_err_status_cnt[37];
3359 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3360 const struct cntr_entry *entry,
3361 void *context, int vl, int mode, u64 data)
3363 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3365 return dd->send_egress_err_status_cnt[36];
3368 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3369 const struct cntr_entry *entry,
3370 void *context, int vl, int mode, u64 data)
3372 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3374 return dd->send_egress_err_status_cnt[35];
3377 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3378 const struct cntr_entry *entry,
3379 void *context, int vl, int mode, u64 data)
3381 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3383 return dd->send_egress_err_status_cnt[34];
3386 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3387 const struct cntr_entry *entry,
3388 void *context, int vl, int mode, u64 data)
3390 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3392 return dd->send_egress_err_status_cnt[33];
3395 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3396 const struct cntr_entry *entry,
3397 void *context, int vl, int mode, u64 data)
3399 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3401 return dd->send_egress_err_status_cnt[32];
3404 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3405 const struct cntr_entry *entry,
3406 void *context, int vl, int mode, u64 data)
3408 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3410 return dd->send_egress_err_status_cnt[31];
3413 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3414 const struct cntr_entry *entry,
3415 void *context, int vl, int mode, u64 data)
3417 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3419 return dd->send_egress_err_status_cnt[30];
3422 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3423 const struct cntr_entry *entry,
3424 void *context, int vl, int mode, u64 data)
3426 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3428 return dd->send_egress_err_status_cnt[29];
3431 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3432 const struct cntr_entry *entry,
3433 void *context, int vl, int mode, u64 data)
3435 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3437 return dd->send_egress_err_status_cnt[28];
3440 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3441 const struct cntr_entry *entry,
3442 void *context, int vl, int mode, u64 data)
3444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3446 return dd->send_egress_err_status_cnt[27];
3449 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3450 const struct cntr_entry *entry,
3451 void *context, int vl, int mode, u64 data)
3453 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3455 return dd->send_egress_err_status_cnt[26];
3458 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3459 const struct cntr_entry *entry,
3460 void *context, int vl, int mode, u64 data)
3462 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3464 return dd->send_egress_err_status_cnt[25];
3467 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3468 const struct cntr_entry *entry,
3469 void *context, int vl, int mode, u64 data)
3471 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3473 return dd->send_egress_err_status_cnt[24];
3476 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3477 const struct cntr_entry *entry,
3478 void *context, int vl, int mode, u64 data)
3480 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3482 return dd->send_egress_err_status_cnt[23];
3485 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3486 const struct cntr_entry *entry,
3487 void *context, int vl, int mode, u64 data)
3489 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3491 return dd->send_egress_err_status_cnt[22];
3494 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3495 const struct cntr_entry *entry,
3496 void *context, int vl, int mode, u64 data)
3498 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3500 return dd->send_egress_err_status_cnt[21];
3503 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3504 const struct cntr_entry *entry,
3505 void *context, int vl, int mode, u64 data)
3507 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3509 return dd->send_egress_err_status_cnt[20];
3512 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3513 const struct cntr_entry *entry,
3514 void *context, int vl, int mode, u64 data)
3516 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3518 return dd->send_egress_err_status_cnt[19];
3521 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3522 const struct cntr_entry *entry,
3523 void *context, int vl, int mode, u64 data)
3525 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3527 return dd->send_egress_err_status_cnt[18];
3530 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3531 const struct cntr_entry *entry,
3532 void *context, int vl, int mode, u64 data)
3534 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3536 return dd->send_egress_err_status_cnt[17];
3539 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3540 const struct cntr_entry *entry,
3541 void *context, int vl, int mode, u64 data)
3543 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3545 return dd->send_egress_err_status_cnt[16];
3548 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3549 void *context, int vl, int mode,
3552 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3554 return dd->send_egress_err_status_cnt[15];
3557 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3558 void *context, int vl,
3561 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3563 return dd->send_egress_err_status_cnt[14];
3566 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3567 void *context, int vl, int mode,
3570 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3572 return dd->send_egress_err_status_cnt[13];
3575 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3576 void *context, int vl, int mode,
3579 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3581 return dd->send_egress_err_status_cnt[12];
3584 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3585 const struct cntr_entry *entry,
3586 void *context, int vl, int mode, u64 data)
3588 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3590 return dd->send_egress_err_status_cnt[11];
3593 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3594 void *context, int vl, int mode,
3597 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3599 return dd->send_egress_err_status_cnt[10];
3602 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3603 void *context, int vl, int mode,
3606 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3608 return dd->send_egress_err_status_cnt[9];
3611 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3612 const struct cntr_entry *entry,
3613 void *context, int vl, int mode, u64 data)
3615 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3617 return dd->send_egress_err_status_cnt[8];
3620 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3621 const struct cntr_entry *entry,
3622 void *context, int vl, int mode, u64 data)
3624 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3626 return dd->send_egress_err_status_cnt[7];
3629 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3630 void *context, int vl, int mode,
3633 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3635 return dd->send_egress_err_status_cnt[6];
3638 static u64 access_tx_incorrect_link_state_err_cnt(
3639 const struct cntr_entry *entry,
3640 void *context, int vl, int mode, u64 data)
3642 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3644 return dd->send_egress_err_status_cnt[5];
3647 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3648 void *context, int vl, int mode,
3651 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3653 return dd->send_egress_err_status_cnt[4];
3656 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3657 const struct cntr_entry *entry,
3658 void *context, int vl, int mode, u64 data)
3660 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3662 return dd->send_egress_err_status_cnt[3];
3665 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3666 void *context, int vl, int mode,
3669 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3671 return dd->send_egress_err_status_cnt[2];
3674 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3675 const struct cntr_entry *entry,
3676 void *context, int vl, int mode, u64 data)
3678 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3680 return dd->send_egress_err_status_cnt[1];
3683 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3684 const struct cntr_entry *entry,
3685 void *context, int vl, int mode, u64 data)
3687 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3689 return dd->send_egress_err_status_cnt[0];
3693 * Software counters corresponding to each of the
3694 * error status bits within SendErrStatus
3696 static u64 access_send_csr_write_bad_addr_err_cnt(
3697 const struct cntr_entry *entry,
3698 void *context, int vl, int mode, u64 data)
3700 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3702 return dd->send_err_status_cnt[2];
3705 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3706 void *context, int vl,
3709 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3711 return dd->send_err_status_cnt[1];
3714 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3715 void *context, int vl, int mode,
3718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3720 return dd->send_err_status_cnt[0];
3724 * Software counters corresponding to each of the
3725 * error status bits within SendCtxtErrStatus
3727 static u64 access_pio_write_out_of_bounds_err_cnt(
3728 const struct cntr_entry *entry,
3729 void *context, int vl, int mode, u64 data)
3731 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3733 return dd->sw_ctxt_err_status_cnt[4];
3736 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3737 void *context, int vl, int mode,
3740 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3742 return dd->sw_ctxt_err_status_cnt[3];
3745 static u64 access_pio_write_crosses_boundary_err_cnt(
3746 const struct cntr_entry *entry,
3747 void *context, int vl, int mode, u64 data)
3749 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3751 return dd->sw_ctxt_err_status_cnt[2];
3754 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3755 void *context, int vl,
3758 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3760 return dd->sw_ctxt_err_status_cnt[1];
3763 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3764 void *context, int vl, int mode,
3767 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3769 return dd->sw_ctxt_err_status_cnt[0];
3773 * Software counters corresponding to each of the
3774 * error status bits within SendDmaEngErrStatus
3776 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3777 const struct cntr_entry *entry,
3778 void *context, int vl, int mode, u64 data)
3780 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3782 return dd->sw_send_dma_eng_err_status_cnt[23];
3785 static u64 access_sdma_header_storage_cor_err_cnt(
3786 const struct cntr_entry *entry,
3787 void *context, int vl, int mode, u64 data)
3789 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3791 return dd->sw_send_dma_eng_err_status_cnt[22];
3794 static u64 access_sdma_packet_tracking_cor_err_cnt(
3795 const struct cntr_entry *entry,
3796 void *context, int vl, int mode, u64 data)
3798 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3800 return dd->sw_send_dma_eng_err_status_cnt[21];
3803 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3804 void *context, int vl, int mode,
3807 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3809 return dd->sw_send_dma_eng_err_status_cnt[20];
3812 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3813 void *context, int vl, int mode,
3816 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3818 return dd->sw_send_dma_eng_err_status_cnt[19];
3821 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3822 const struct cntr_entry *entry,
3823 void *context, int vl, int mode, u64 data)
3825 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3827 return dd->sw_send_dma_eng_err_status_cnt[18];
3830 static u64 access_sdma_header_storage_unc_err_cnt(
3831 const struct cntr_entry *entry,
3832 void *context, int vl, int mode, u64 data)
3834 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3836 return dd->sw_send_dma_eng_err_status_cnt[17];
3839 static u64 access_sdma_packet_tracking_unc_err_cnt(
3840 const struct cntr_entry *entry,
3841 void *context, int vl, int mode, u64 data)
3843 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3845 return dd->sw_send_dma_eng_err_status_cnt[16];
3848 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3849 void *context, int vl, int mode,
3852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3854 return dd->sw_send_dma_eng_err_status_cnt[15];
3857 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3858 void *context, int vl, int mode,
3861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3863 return dd->sw_send_dma_eng_err_status_cnt[14];
3866 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3867 void *context, int vl, int mode,
3870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3872 return dd->sw_send_dma_eng_err_status_cnt[13];
3875 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3876 void *context, int vl, int mode,
3879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3881 return dd->sw_send_dma_eng_err_status_cnt[12];
3884 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3885 void *context, int vl, int mode,
3888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3890 return dd->sw_send_dma_eng_err_status_cnt[11];
3893 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3894 void *context, int vl, int mode,
3897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3899 return dd->sw_send_dma_eng_err_status_cnt[10];
3902 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3903 void *context, int vl, int mode,
3906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3908 return dd->sw_send_dma_eng_err_status_cnt[9];
3911 static u64 access_sdma_packet_desc_overflow_err_cnt(
3912 const struct cntr_entry *entry,
3913 void *context, int vl, int mode, u64 data)
3915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3917 return dd->sw_send_dma_eng_err_status_cnt[8];
3920 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3921 void *context, int vl,
3924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3926 return dd->sw_send_dma_eng_err_status_cnt[7];
3929 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3930 void *context, int vl, int mode, u64 data)
3932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3934 return dd->sw_send_dma_eng_err_status_cnt[6];
3937 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3938 void *context, int vl, int mode,
3941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3943 return dd->sw_send_dma_eng_err_status_cnt[5];
3946 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3947 void *context, int vl, int mode,
3950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3952 return dd->sw_send_dma_eng_err_status_cnt[4];
3955 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3956 const struct cntr_entry *entry,
3957 void *context, int vl, int mode, u64 data)
3959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3961 return dd->sw_send_dma_eng_err_status_cnt[3];
3964 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3965 void *context, int vl, int mode,
3968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3970 return dd->sw_send_dma_eng_err_status_cnt[2];
3973 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3974 void *context, int vl, int mode,
3977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3979 return dd->sw_send_dma_eng_err_status_cnt[1];
3982 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3983 void *context, int vl, int mode,
3986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3988 return dd->sw_send_dma_eng_err_status_cnt[0];
3991 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
3992 void *context, int vl, int mode,
3995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3998 u64 csr = entry->csr;
4000 val = read_write_csr(dd, csr, mode, data);
4001 if (mode == CNTR_MODE_R) {
4002 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4003 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4004 } else if (mode == CNTR_MODE_W) {
4005 dd->sw_rcv_bypass_packet_errors = 0;
4007 dd_dev_err(dd, "Invalid cntr register access mode");
4013 #define def_access_sw_cpu(cntr) \
4014 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4015 void *context, int vl, int mode, u64 data) \
4017 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4018 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4019 ppd->ibport_data.rvp.cntr, vl, \
4023 def_access_sw_cpu(rc_acks);
4024 def_access_sw_cpu(rc_qacks);
4025 def_access_sw_cpu(rc_delayed_comp);
4027 #define def_access_ibp_counter(cntr) \
4028 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4029 void *context, int vl, int mode, u64 data) \
4031 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4033 if (vl != CNTR_INVALID_VL) \
4036 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4040 def_access_ibp_counter(loop_pkts);
4041 def_access_ibp_counter(rc_resends);
4042 def_access_ibp_counter(rnr_naks);
4043 def_access_ibp_counter(other_naks);
4044 def_access_ibp_counter(rc_timeouts);
4045 def_access_ibp_counter(pkt_drops);
4046 def_access_ibp_counter(dmawait);
4047 def_access_ibp_counter(rc_seqnak);
4048 def_access_ibp_counter(rc_dupreq);
4049 def_access_ibp_counter(rdma_seq);
4050 def_access_ibp_counter(unaligned);
4051 def_access_ibp_counter(seq_naks);
4053 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4054 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4055 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4057 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4059 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4060 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4062 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4064 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4065 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4066 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4067 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4068 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4070 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4072 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4074 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4076 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4078 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4080 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4081 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4082 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4083 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4084 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4086 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4087 access_dc_rcv_err_cnt),
4088 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4090 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4092 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4094 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4095 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4096 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4097 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4099 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4100 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4101 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4103 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4105 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4107 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4109 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4111 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4113 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4115 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4116 CNTR_SYNTH | CNTR_VL),
4117 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4118 CNTR_SYNTH | CNTR_VL),
4119 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4120 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4121 CNTR_SYNTH | CNTR_VL),
4122 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4123 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4124 CNTR_SYNTH | CNTR_VL),
4125 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4127 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4128 CNTR_SYNTH | CNTR_VL),
4129 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4131 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4132 CNTR_SYNTH | CNTR_VL),
4134 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4136 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4138 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4140 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4142 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4144 [C_DC_CRC_MULT_LN] =
4145 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4147 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4149 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4151 [C_DC_SEQ_CRC_CNT] =
4152 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4154 [C_DC_ESC0_ONLY_CNT] =
4155 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4157 [C_DC_ESC0_PLUS1_CNT] =
4158 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4160 [C_DC_ESC0_PLUS2_CNT] =
4161 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4163 [C_DC_REINIT_FROM_PEER_CNT] =
4164 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4166 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4168 [C_DC_MISC_FLG_CNT] =
4169 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4171 [C_DC_PRF_GOOD_LTP_CNT] =
4172 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4173 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4174 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4176 [C_DC_PRF_RX_FLIT_CNT] =
4177 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4178 [C_DC_PRF_TX_FLIT_CNT] =
4179 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4180 [C_DC_PRF_CLK_CNTR] =
4181 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4182 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4183 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4184 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4185 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4187 [C_DC_PG_STS_TX_SBE_CNT] =
4188 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4189 [C_DC_PG_STS_TX_MBE_CNT] =
4190 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4192 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4193 access_sw_cpu_intr),
4194 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4195 access_sw_cpu_rcv_limit),
4196 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4197 access_sw_vtx_wait),
4198 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4199 access_sw_pio_wait),
4200 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4201 access_sw_pio_drain),
4202 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4203 access_sw_kmem_wait),
4204 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4205 access_sw_send_schedule),
4206 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4207 SEND_DMA_DESC_FETCHED_CNT, 0,
4208 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4209 dev_access_u32_csr),
4210 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4211 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4212 access_sde_int_cnt),
4213 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4214 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4215 access_sde_err_cnt),
4216 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4217 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4218 access_sde_idle_int_cnt),
4219 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4220 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4221 access_sde_progress_int_cnt),
4222 /* MISC_ERR_STATUS */
4223 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4225 access_misc_pll_lock_fail_err_cnt),
4226 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4228 access_misc_mbist_fail_err_cnt),
4229 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4231 access_misc_invalid_eep_cmd_err_cnt),
4232 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4234 access_misc_efuse_done_parity_err_cnt),
4235 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4237 access_misc_efuse_write_err_cnt),
4238 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4240 access_misc_efuse_read_bad_addr_err_cnt),
4241 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4243 access_misc_efuse_csr_parity_err_cnt),
4244 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4246 access_misc_fw_auth_failed_err_cnt),
4247 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4249 access_misc_key_mismatch_err_cnt),
4250 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4252 access_misc_sbus_write_failed_err_cnt),
4253 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4255 access_misc_csr_write_bad_addr_err_cnt),
4256 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4258 access_misc_csr_read_bad_addr_err_cnt),
4259 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4261 access_misc_csr_parity_err_cnt),
4263 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4265 access_sw_cce_err_status_aggregated_cnt),
4266 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4268 access_cce_msix_csr_parity_err_cnt),
4269 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4271 access_cce_int_map_unc_err_cnt),
4272 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4274 access_cce_int_map_cor_err_cnt),
4275 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4277 access_cce_msix_table_unc_err_cnt),
4278 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4280 access_cce_msix_table_cor_err_cnt),
4281 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4283 access_cce_rxdma_conv_fifo_parity_err_cnt),
4284 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4286 access_cce_rcpl_async_fifo_parity_err_cnt),
4287 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4289 access_cce_seg_write_bad_addr_err_cnt),
4290 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4292 access_cce_seg_read_bad_addr_err_cnt),
4293 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4295 access_la_triggered_cnt),
4296 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4298 access_cce_trgt_cpl_timeout_err_cnt),
4299 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4301 access_pcic_receive_parity_err_cnt),
4302 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4304 access_pcic_transmit_back_parity_err_cnt),
4305 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4307 access_pcic_transmit_front_parity_err_cnt),
4308 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4310 access_pcic_cpl_dat_q_unc_err_cnt),
4311 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4313 access_pcic_cpl_hd_q_unc_err_cnt),
4314 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4316 access_pcic_post_dat_q_unc_err_cnt),
4317 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4319 access_pcic_post_hd_q_unc_err_cnt),
4320 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4322 access_pcic_retry_sot_mem_unc_err_cnt),
4323 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4325 access_pcic_retry_mem_unc_err),
4326 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4328 access_pcic_n_post_dat_q_parity_err_cnt),
4329 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4331 access_pcic_n_post_h_q_parity_err_cnt),
4332 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4334 access_pcic_cpl_dat_q_cor_err_cnt),
4335 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4337 access_pcic_cpl_hd_q_cor_err_cnt),
4338 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4340 access_pcic_post_dat_q_cor_err_cnt),
4341 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4343 access_pcic_post_hd_q_cor_err_cnt),
4344 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4346 access_pcic_retry_sot_mem_cor_err_cnt),
4347 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4349 access_pcic_retry_mem_cor_err_cnt),
4350 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4351 "CceCli1AsyncFifoDbgParityError", 0, 0,
4353 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4354 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4355 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4357 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4359 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4360 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4362 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4363 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4364 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4366 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4367 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4369 access_cce_cli2_async_fifo_parity_err_cnt),
4370 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4372 access_cce_csr_cfg_bus_parity_err_cnt),
4373 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4375 access_cce_cli0_async_fifo_parity_err_cnt),
4376 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4378 access_cce_rspd_data_parity_err_cnt),
4379 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4381 access_cce_trgt_access_err_cnt),
4382 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4384 access_cce_trgt_async_fifo_parity_err_cnt),
4385 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4387 access_cce_csr_write_bad_addr_err_cnt),
4388 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4390 access_cce_csr_read_bad_addr_err_cnt),
4391 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4393 access_ccs_csr_parity_err_cnt),
4396 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4398 access_rx_csr_parity_err_cnt),
4399 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4401 access_rx_csr_write_bad_addr_err_cnt),
4402 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4404 access_rx_csr_read_bad_addr_err_cnt),
4405 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4407 access_rx_dma_csr_unc_err_cnt),
4408 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4410 access_rx_dma_dq_fsm_encoding_err_cnt),
4411 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4413 access_rx_dma_eq_fsm_encoding_err_cnt),
4414 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4416 access_rx_dma_csr_parity_err_cnt),
4417 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4419 access_rx_rbuf_data_cor_err_cnt),
4420 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4422 access_rx_rbuf_data_unc_err_cnt),
4423 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4425 access_rx_dma_data_fifo_rd_cor_err_cnt),
4426 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4428 access_rx_dma_data_fifo_rd_unc_err_cnt),
4429 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4431 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4432 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4434 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4435 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4437 access_rx_rbuf_desc_part2_cor_err_cnt),
4438 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4440 access_rx_rbuf_desc_part2_unc_err_cnt),
4441 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4443 access_rx_rbuf_desc_part1_cor_err_cnt),
4444 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4446 access_rx_rbuf_desc_part1_unc_err_cnt),
4447 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4449 access_rx_hq_intr_fsm_err_cnt),
4450 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4452 access_rx_hq_intr_csr_parity_err_cnt),
4453 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4455 access_rx_lookup_csr_parity_err_cnt),
4456 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4458 access_rx_lookup_rcv_array_cor_err_cnt),
4459 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4461 access_rx_lookup_rcv_array_unc_err_cnt),
4462 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4464 access_rx_lookup_des_part2_parity_err_cnt),
4465 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4467 access_rx_lookup_des_part1_unc_cor_err_cnt),
4468 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4470 access_rx_lookup_des_part1_unc_err_cnt),
4471 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4473 access_rx_rbuf_next_free_buf_cor_err_cnt),
4474 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4476 access_rx_rbuf_next_free_buf_unc_err_cnt),
4477 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4478 "RxRbufFlInitWrAddrParityErr", 0, 0,
4480 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4481 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4483 access_rx_rbuf_fl_initdone_parity_err_cnt),
4484 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4486 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4487 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4489 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4490 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4492 access_rx_rbuf_empty_err_cnt),
4493 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4495 access_rx_rbuf_full_err_cnt),
4496 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4498 access_rbuf_bad_lookup_err_cnt),
4499 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4501 access_rbuf_ctx_id_parity_err_cnt),
4502 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4504 access_rbuf_csr_qeopdw_parity_err_cnt),
4505 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4506 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4508 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4509 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4510 "RxRbufCsrQTlPtrParityErr", 0, 0,
4512 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4513 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4515 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4516 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4518 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4519 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4521 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4522 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4524 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4525 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4526 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4528 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4529 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4531 access_rx_rbuf_block_list_read_cor_err_cnt),
4532 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4534 access_rx_rbuf_block_list_read_unc_err_cnt),
4535 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4537 access_rx_rbuf_lookup_des_cor_err_cnt),
4538 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4540 access_rx_rbuf_lookup_des_unc_err_cnt),
4541 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4542 "RxRbufLookupDesRegUncCorErr", 0, 0,
4544 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4545 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4547 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4548 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4550 access_rx_rbuf_free_list_cor_err_cnt),
4551 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4553 access_rx_rbuf_free_list_unc_err_cnt),
4554 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4556 access_rx_rcv_fsm_encoding_err_cnt),
4557 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4559 access_rx_dma_flag_cor_err_cnt),
4560 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4562 access_rx_dma_flag_unc_err_cnt),
4563 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4565 access_rx_dc_sop_eop_parity_err_cnt),
4566 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4568 access_rx_rcv_csr_parity_err_cnt),
4569 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4571 access_rx_rcv_qp_map_table_cor_err_cnt),
4572 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4574 access_rx_rcv_qp_map_table_unc_err_cnt),
4575 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4577 access_rx_rcv_data_cor_err_cnt),
4578 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4580 access_rx_rcv_data_unc_err_cnt),
4581 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4583 access_rx_rcv_hdr_cor_err_cnt),
4584 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4586 access_rx_rcv_hdr_unc_err_cnt),
4587 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4589 access_rx_dc_intf_parity_err_cnt),
4590 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4592 access_rx_dma_csr_cor_err_cnt),
4593 /* SendPioErrStatus */
4594 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4596 access_pio_pec_sop_head_parity_err_cnt),
4597 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4599 access_pio_pcc_sop_head_parity_err_cnt),
4600 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4602 access_pio_last_returned_cnt_parity_err_cnt),
4603 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4605 access_pio_current_free_cnt_parity_err_cnt),
4606 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4608 access_pio_reserved_31_err_cnt),
4609 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4611 access_pio_reserved_30_err_cnt),
4612 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4614 access_pio_ppmc_sop_len_err_cnt),
4615 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4617 access_pio_ppmc_bqc_mem_parity_err_cnt),
4618 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4620 access_pio_vl_fifo_parity_err_cnt),
4621 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4623 access_pio_vlf_sop_parity_err_cnt),
4624 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4626 access_pio_vlf_v1_len_parity_err_cnt),
4627 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4629 access_pio_block_qw_count_parity_err_cnt),
4630 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4632 access_pio_write_qw_valid_parity_err_cnt),
4633 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4635 access_pio_state_machine_err_cnt),
4636 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4638 access_pio_write_data_parity_err_cnt),
4639 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4641 access_pio_host_addr_mem_cor_err_cnt),
4642 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4644 access_pio_host_addr_mem_unc_err_cnt),
4645 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4647 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4648 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4650 access_pio_init_sm_in_err_cnt),
4651 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4653 access_pio_ppmc_pbl_fifo_err_cnt),
4654 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4656 access_pio_credit_ret_fifo_parity_err_cnt),
4657 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4659 access_pio_v1_len_mem_bank1_cor_err_cnt),
4660 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4662 access_pio_v1_len_mem_bank0_cor_err_cnt),
4663 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4665 access_pio_v1_len_mem_bank1_unc_err_cnt),
4666 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4668 access_pio_v1_len_mem_bank0_unc_err_cnt),
4669 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4671 access_pio_sm_pkt_reset_parity_err_cnt),
4672 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4674 access_pio_pkt_evict_fifo_parity_err_cnt),
4675 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4676 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4678 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4679 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4681 access_pio_sbrdctl_crrel_parity_err_cnt),
4682 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4684 access_pio_pec_fifo_parity_err_cnt),
4685 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4687 access_pio_pcc_fifo_parity_err_cnt),
4688 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4690 access_pio_sb_mem_fifo1_err_cnt),
4691 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4693 access_pio_sb_mem_fifo0_err_cnt),
4694 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4696 access_pio_csr_parity_err_cnt),
4697 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4699 access_pio_write_addr_parity_err_cnt),
4700 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4702 access_pio_write_bad_ctxt_err_cnt),
4703 /* SendDmaErrStatus */
4704 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4706 access_sdma_pcie_req_tracking_cor_err_cnt),
4707 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4709 access_sdma_pcie_req_tracking_unc_err_cnt),
4710 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4712 access_sdma_csr_parity_err_cnt),
4713 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4715 access_sdma_rpy_tag_err_cnt),
4716 /* SendEgressErrStatus */
4717 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4719 access_tx_read_pio_memory_csr_unc_err_cnt),
4720 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4722 access_tx_read_sdma_memory_csr_err_cnt),
4723 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4725 access_tx_egress_fifo_cor_err_cnt),
4726 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4728 access_tx_read_pio_memory_cor_err_cnt),
4729 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4731 access_tx_read_sdma_memory_cor_err_cnt),
4732 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4734 access_tx_sb_hdr_cor_err_cnt),
4735 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4737 access_tx_credit_overrun_err_cnt),
4738 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4740 access_tx_launch_fifo8_cor_err_cnt),
4741 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4743 access_tx_launch_fifo7_cor_err_cnt),
4744 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4746 access_tx_launch_fifo6_cor_err_cnt),
4747 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4749 access_tx_launch_fifo5_cor_err_cnt),
4750 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4752 access_tx_launch_fifo4_cor_err_cnt),
4753 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4755 access_tx_launch_fifo3_cor_err_cnt),
4756 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4758 access_tx_launch_fifo2_cor_err_cnt),
4759 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4761 access_tx_launch_fifo1_cor_err_cnt),
4762 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4764 access_tx_launch_fifo0_cor_err_cnt),
4765 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4767 access_tx_credit_return_vl_err_cnt),
4768 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4770 access_tx_hcrc_insertion_err_cnt),
4771 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4773 access_tx_egress_fifo_unc_err_cnt),
4774 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4776 access_tx_read_pio_memory_unc_err_cnt),
4777 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4779 access_tx_read_sdma_memory_unc_err_cnt),
4780 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4782 access_tx_sb_hdr_unc_err_cnt),
4783 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4785 access_tx_credit_return_partiy_err_cnt),
4786 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4788 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4789 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4791 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4792 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4794 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4795 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4797 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4798 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4800 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4801 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4803 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4804 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4806 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4807 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4809 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4810 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4812 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4813 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4815 access_tx_sdma15_disallowed_packet_err_cnt),
4816 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4818 access_tx_sdma14_disallowed_packet_err_cnt),
4819 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4821 access_tx_sdma13_disallowed_packet_err_cnt),
4822 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4824 access_tx_sdma12_disallowed_packet_err_cnt),
4825 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4827 access_tx_sdma11_disallowed_packet_err_cnt),
4828 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4830 access_tx_sdma10_disallowed_packet_err_cnt),
4831 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4833 access_tx_sdma9_disallowed_packet_err_cnt),
4834 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4836 access_tx_sdma8_disallowed_packet_err_cnt),
4837 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4839 access_tx_sdma7_disallowed_packet_err_cnt),
4840 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4842 access_tx_sdma6_disallowed_packet_err_cnt),
4843 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4845 access_tx_sdma5_disallowed_packet_err_cnt),
4846 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4848 access_tx_sdma4_disallowed_packet_err_cnt),
4849 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4851 access_tx_sdma3_disallowed_packet_err_cnt),
4852 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4854 access_tx_sdma2_disallowed_packet_err_cnt),
4855 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4857 access_tx_sdma1_disallowed_packet_err_cnt),
4858 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4860 access_tx_sdma0_disallowed_packet_err_cnt),
4861 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4863 access_tx_config_parity_err_cnt),
4864 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4866 access_tx_sbrd_ctl_csr_parity_err_cnt),
4867 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4869 access_tx_launch_csr_parity_err_cnt),
4870 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4872 access_tx_illegal_vl_err_cnt),
4873 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4874 "TxSbrdCtlStateMachineParityErr", 0, 0,
4876 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4877 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4879 access_egress_reserved_10_err_cnt),
4880 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4882 access_egress_reserved_9_err_cnt),
4883 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4885 access_tx_sdma_launch_intf_parity_err_cnt),
4886 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4888 access_tx_pio_launch_intf_parity_err_cnt),
4889 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4891 access_egress_reserved_6_err_cnt),
4892 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4894 access_tx_incorrect_link_state_err_cnt),
4895 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4897 access_tx_linkdown_err_cnt),
4898 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4899 "EgressFifoUnderrunOrParityErr", 0, 0,
4901 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4902 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4904 access_egress_reserved_2_err_cnt),
4905 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4907 access_tx_pkt_integrity_mem_unc_err_cnt),
4908 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4910 access_tx_pkt_integrity_mem_cor_err_cnt),
4912 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4914 access_send_csr_write_bad_addr_err_cnt),
4915 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4917 access_send_csr_read_bad_addr_err_cnt),
4918 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4920 access_send_csr_parity_cnt),
4921 /* SendCtxtErrStatus */
4922 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4924 access_pio_write_out_of_bounds_err_cnt),
4925 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4927 access_pio_write_overflow_err_cnt),
4928 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4930 access_pio_write_crosses_boundary_err_cnt),
4931 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4933 access_pio_disallowed_packet_err_cnt),
4934 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4936 access_pio_inconsistent_sop_err_cnt),
4937 /* SendDmaEngErrStatus */
4938 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4940 access_sdma_header_request_fifo_cor_err_cnt),
4941 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4943 access_sdma_header_storage_cor_err_cnt),
4944 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4946 access_sdma_packet_tracking_cor_err_cnt),
4947 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4949 access_sdma_assembly_cor_err_cnt),
4950 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4952 access_sdma_desc_table_cor_err_cnt),
4953 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4955 access_sdma_header_request_fifo_unc_err_cnt),
4956 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4958 access_sdma_header_storage_unc_err_cnt),
4959 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4961 access_sdma_packet_tracking_unc_err_cnt),
4962 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4964 access_sdma_assembly_unc_err_cnt),
4965 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4967 access_sdma_desc_table_unc_err_cnt),
4968 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4970 access_sdma_timeout_err_cnt),
4971 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4973 access_sdma_header_length_err_cnt),
4974 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4976 access_sdma_header_address_err_cnt),
4977 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4979 access_sdma_header_select_err_cnt),
4980 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4982 access_sdma_reserved_9_err_cnt),
4983 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4985 access_sdma_packet_desc_overflow_err_cnt),
4986 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4988 access_sdma_length_mismatch_err_cnt),
4989 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4991 access_sdma_halt_err_cnt),
4992 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4994 access_sdma_mem_read_err_cnt),
4995 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4997 access_sdma_first_desc_err_cnt),
4998 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5000 access_sdma_tail_out_of_bounds_err_cnt),
5001 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5003 access_sdma_too_long_err_cnt),
5004 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5006 access_sdma_gen_mismatch_err_cnt),
5007 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5009 access_sdma_wrong_dw_err_cnt),
5012 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5013 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5015 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5017 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5019 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5021 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5023 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5025 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5027 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5028 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5029 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5030 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5031 CNTR_SYNTH | CNTR_VL),
5032 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5033 CNTR_SYNTH | CNTR_VL),
5034 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5035 CNTR_SYNTH | CNTR_VL),
5036 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5037 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5038 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5039 access_sw_link_dn_cnt),
5040 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5041 access_sw_link_up_cnt),
5042 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5043 access_sw_unknown_frame_cnt),
5044 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5045 access_sw_xmit_discards),
5046 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5047 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5048 access_sw_xmit_discards),
5049 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5050 access_xmit_constraint_errs),
5051 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5052 access_rcv_constraint_errs),
5053 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5054 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5055 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5056 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5057 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5058 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5059 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5060 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5061 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5062 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5063 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5064 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5065 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5066 access_sw_cpu_rc_acks),
5067 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5068 access_sw_cpu_rc_qacks),
5069 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5070 access_sw_cpu_rc_delayed_comp),
5071 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5072 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5073 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5074 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5075 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5076 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5077 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5078 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5079 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5080 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5081 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5082 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5083 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5084 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5085 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5086 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5087 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5088 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5089 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5090 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5091 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5092 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5093 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5094 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5095 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5096 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5097 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5098 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5099 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5100 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5101 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5102 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5103 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5104 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5105 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5106 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5107 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5108 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5109 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5110 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5111 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5112 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5113 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5114 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5115 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5116 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5117 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5118 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5119 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5120 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5121 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5122 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5123 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5124 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5125 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5126 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5127 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5128 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5129 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5130 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5131 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5132 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5133 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5134 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5135 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5136 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5137 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5138 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5139 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5140 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5141 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5142 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5143 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5144 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5145 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5146 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5147 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5148 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5149 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5150 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5153 /* ======================================================================== */
5155 /* return true if this is chip revision revision a */
5156 int is_ax(struct hfi1_devdata *dd)
5159 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5160 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5161 return (chip_rev_minor & 0xf0) == 0;
5164 /* return true if this is chip revision revision b */
5165 int is_bx(struct hfi1_devdata *dd)
5168 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5169 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5170 return (chip_rev_minor & 0xF0) == 0x10;
5174 * Append string s to buffer buf. Arguments curp and len are the current
5175 * position and remaining length, respectively.
5177 * return 0 on success, 1 on out of room
5179 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5183 int result = 0; /* success */
5186 /* add a comma, if first in the buffer */
5189 result = 1; /* out of room */
5196 /* copy the string */
5197 while ((c = *s++) != 0) {
5199 result = 1; /* out of room */
5207 /* write return values */
5215 * Using the given flag table, print a comma separated string into
5216 * the buffer. End in '*' if the buffer is too short.
5218 static char *flag_string(char *buf, int buf_len, u64 flags,
5219 struct flag_table *table, int table_size)
5227 /* make sure there is at least 2 so we can form "*" */
5231 len--; /* leave room for a nul */
5232 for (i = 0; i < table_size; i++) {
5233 if (flags & table[i].flag) {
5234 no_room = append_str(buf, &p, &len, table[i].str);
5237 flags &= ~table[i].flag;
5241 /* any undocumented bits left? */
5242 if (!no_room && flags) {
5243 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5244 no_room = append_str(buf, &p, &len, extra);
5247 /* add * if ran out of room */
5249 /* may need to back up to add space for a '*' */
5255 /* add final nul - space already allocated above */
5260 /* first 8 CCE error interrupt source names */
5261 static const char * const cce_misc_names[] = {
5262 "CceErrInt", /* 0 */
5263 "RxeErrInt", /* 1 */
5264 "MiscErrInt", /* 2 */
5265 "Reserved3", /* 3 */
5266 "PioErrInt", /* 4 */
5267 "SDmaErrInt", /* 5 */
5268 "EgressErrInt", /* 6 */
5273 * Return the miscellaneous error interrupt name.
5275 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5277 if (source < ARRAY_SIZE(cce_misc_names))
5278 strncpy(buf, cce_misc_names[source], bsize);
5280 snprintf(buf, bsize, "Reserved%u",
5281 source + IS_GENERAL_ERR_START);
5287 * Return the SDMA engine error interrupt name.
5289 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5291 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5296 * Return the send context error interrupt name.
5298 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5300 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5304 static const char * const various_names[] = {
5313 * Return the various interrupt name.
5315 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5317 if (source < ARRAY_SIZE(various_names))
5318 strncpy(buf, various_names[source], bsize);
5320 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5325 * Return the DC interrupt name.
5327 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5329 static const char * const dc_int_names[] = {
5333 "lbm" /* local block merge */
5336 if (source < ARRAY_SIZE(dc_int_names))
5337 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5339 snprintf(buf, bsize, "DCInt%u", source);
5343 static const char * const sdma_int_names[] = {
5350 * Return the SDMA engine interrupt name.
5352 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5354 /* what interrupt */
5355 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5357 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5359 if (likely(what < 3))
5360 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5362 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5367 * Return the receive available interrupt name.
5369 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5371 snprintf(buf, bsize, "RcvAvailInt%u", source);
5376 * Return the receive urgent interrupt name.
5378 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5380 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5385 * Return the send credit interrupt name.
5387 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5389 snprintf(buf, bsize, "SendCreditInt%u", source);
5394 * Return the reserved interrupt name.
5396 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5398 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5402 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5404 return flag_string(buf, buf_len, flags,
5405 cce_err_status_flags,
5406 ARRAY_SIZE(cce_err_status_flags));
5409 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5411 return flag_string(buf, buf_len, flags,
5412 rxe_err_status_flags,
5413 ARRAY_SIZE(rxe_err_status_flags));
5416 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5418 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5419 ARRAY_SIZE(misc_err_status_flags));
5422 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5424 return flag_string(buf, buf_len, flags,
5425 pio_err_status_flags,
5426 ARRAY_SIZE(pio_err_status_flags));
5429 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5431 return flag_string(buf, buf_len, flags,
5432 sdma_err_status_flags,
5433 ARRAY_SIZE(sdma_err_status_flags));
5436 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5438 return flag_string(buf, buf_len, flags,
5439 egress_err_status_flags,
5440 ARRAY_SIZE(egress_err_status_flags));
5443 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5445 return flag_string(buf, buf_len, flags,
5446 egress_err_info_flags,
5447 ARRAY_SIZE(egress_err_info_flags));
5450 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5452 return flag_string(buf, buf_len, flags,
5453 send_err_status_flags,
5454 ARRAY_SIZE(send_err_status_flags));
5457 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5463 * For most these errors, there is nothing that can be done except
5464 * report or record it.
5466 dd_dev_info(dd, "CCE Error: %s\n",
5467 cce_err_status_string(buf, sizeof(buf), reg));
5469 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5470 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5471 /* this error requires a manual drop into SPC freeze mode */
5473 start_freeze_handling(dd->pport, FREEZE_SELF);
5476 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5477 if (reg & (1ull << i)) {
5478 incr_cntr64(&dd->cce_err_status_cnt[i]);
5479 /* maintain a counter over all cce_err_status errors */
5480 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5486 * Check counters for receive errors that do not have an interrupt
5487 * associated with them.
5489 #define RCVERR_CHECK_TIME 10
5490 static void update_rcverr_timer(unsigned long opaque)
5492 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5493 struct hfi1_pportdata *ppd = dd->pport;
5494 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5496 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5497 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5498 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5499 set_link_down_reason(
5500 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5501 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5502 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5504 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5506 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5509 static int init_rcverr(struct hfi1_devdata *dd)
5511 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5512 /* Assume the hardware counter has been reset */
5513 dd->rcv_ovfl_cnt = 0;
5514 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5517 static void free_rcverr(struct hfi1_devdata *dd)
5519 if (dd->rcverr_timer.data)
5520 del_timer_sync(&dd->rcverr_timer);
5521 dd->rcverr_timer.data = 0;
5524 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5529 dd_dev_info(dd, "Receive Error: %s\n",
5530 rxe_err_status_string(buf, sizeof(buf), reg));
5532 if (reg & ALL_RXE_FREEZE_ERR) {
5536 * Freeze mode recovery is disabled for the errors
5537 * in RXE_FREEZE_ABORT_MASK
5539 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5540 flags = FREEZE_ABORT;
5542 start_freeze_handling(dd->pport, flags);
5545 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5546 if (reg & (1ull << i))
5547 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5551 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5556 dd_dev_info(dd, "Misc Error: %s",
5557 misc_err_status_string(buf, sizeof(buf), reg));
5558 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5559 if (reg & (1ull << i))
5560 incr_cntr64(&dd->misc_err_status_cnt[i]);
5564 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5569 dd_dev_info(dd, "PIO Error: %s\n",
5570 pio_err_status_string(buf, sizeof(buf), reg));
5572 if (reg & ALL_PIO_FREEZE_ERR)
5573 start_freeze_handling(dd->pport, 0);
5575 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5576 if (reg & (1ull << i))
5577 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5581 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5586 dd_dev_info(dd, "SDMA Error: %s\n",
5587 sdma_err_status_string(buf, sizeof(buf), reg));
5589 if (reg & ALL_SDMA_FREEZE_ERR)
5590 start_freeze_handling(dd->pport, 0);
5592 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5593 if (reg & (1ull << i))
5594 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5598 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5600 incr_cntr64(&ppd->port_xmit_discards);
5603 static void count_port_inactive(struct hfi1_devdata *dd)
5605 __count_port_discards(dd->pport);
5609 * We have had a "disallowed packet" error during egress. Determine the
5610 * integrity check which failed, and update relevant error counter, etc.
5612 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5613 * bit of state per integrity check, and so we can miss the reason for an
5614 * egress error if more than one packet fails the same integrity check
5615 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5617 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5620 struct hfi1_pportdata *ppd = dd->pport;
5621 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5622 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5625 /* clear down all observed info as quickly as possible after read */
5626 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5629 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5630 info, egress_err_info_string(buf, sizeof(buf), info), src);
5632 /* Eventually add other counters for each bit */
5633 if (info & PORT_DISCARD_EGRESS_ERRS) {
5637 * Count all applicable bits as individual errors and
5638 * attribute them to the packet that triggered this handler.
5639 * This may not be completely accurate due to limitations
5640 * on the available hardware error information. There is
5641 * a single information register and any number of error
5642 * packets may have occurred and contributed to it before
5643 * this routine is called. This means that:
5644 * a) If multiple packets with the same error occur before
5645 * this routine is called, earlier packets are missed.
5646 * There is only a single bit for each error type.
5647 * b) Errors may not be attributed to the correct VL.
5648 * The driver is attributing all bits in the info register
5649 * to the packet that triggered this call, but bits
5650 * could be an accumulation of different packets with
5652 * c) A single error packet may have multiple counts attached
5653 * to it. There is no way for the driver to know if
5654 * multiple bits set in the info register are due to a
5655 * single packet or multiple packets. The driver assumes
5658 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5659 for (i = 0; i < weight; i++) {
5660 __count_port_discards(ppd);
5661 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5662 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5664 incr_cntr64(&ppd->port_xmit_discards_vl
5671 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5672 * register. Does it represent a 'port inactive' error?
5674 static inline int port_inactive_err(u64 posn)
5676 return (posn >= SEES(TX_LINKDOWN) &&
5677 posn <= SEES(TX_INCORRECT_LINK_STATE));
5681 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5682 * register. Does it represent a 'disallowed packet' error?
5684 static inline int disallowed_pkt_err(int posn)
5686 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5687 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5691 * Input value is a bit position of one of the SDMA engine disallowed
5692 * packet errors. Return which engine. Use of this must be guarded by
5693 * disallowed_pkt_err().
5695 static inline int disallowed_pkt_engine(int posn)
5697 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5701 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5704 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5706 struct sdma_vl_map *m;
5710 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5714 m = rcu_dereference(dd->sdma_map);
5715 vl = m->engine_to_vl[engine];
5722 * Translate the send context (sofware index) into a VL. Return -1 if the
5723 * translation cannot be done.
5725 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5727 struct send_context_info *sci;
5728 struct send_context *sc;
5731 sci = &dd->send_contexts[sw_index];
5733 /* there is no information for user (PSM) and ack contexts */
5734 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5740 if (dd->vld[15].sc == sc)
5742 for (i = 0; i < num_vls; i++)
5743 if (dd->vld[i].sc == sc)
5749 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5751 u64 reg_copy = reg, handled = 0;
5755 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5756 start_freeze_handling(dd->pport, 0);
5757 else if (is_ax(dd) &&
5758 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5759 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5760 start_freeze_handling(dd->pport, 0);
5763 int posn = fls64(reg_copy);
5764 /* fls64() returns a 1-based offset, we want it zero based */
5765 int shift = posn - 1;
5766 u64 mask = 1ULL << shift;
5768 if (port_inactive_err(shift)) {
5769 count_port_inactive(dd);
5771 } else if (disallowed_pkt_err(shift)) {
5772 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5774 handle_send_egress_err_info(dd, vl);
5783 dd_dev_info(dd, "Egress Error: %s\n",
5784 egress_err_status_string(buf, sizeof(buf), reg));
5786 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5787 if (reg & (1ull << i))
5788 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5792 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5797 dd_dev_info(dd, "Send Error: %s\n",
5798 send_err_status_string(buf, sizeof(buf), reg));
5800 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5801 if (reg & (1ull << i))
5802 incr_cntr64(&dd->send_err_status_cnt[i]);
5807 * The maximum number of times the error clear down will loop before
5808 * blocking a repeating error. This value is arbitrary.
5810 #define MAX_CLEAR_COUNT 20
5813 * Clear and handle an error register. All error interrupts are funneled
5814 * through here to have a central location to correctly handle single-
5815 * or multi-shot errors.
5817 * For non per-context registers, call this routine with a context value
5818 * of 0 so the per-context offset is zero.
5820 * If the handler loops too many times, assume that something is wrong
5821 * and can't be fixed, so mask the error bits.
5823 static void interrupt_clear_down(struct hfi1_devdata *dd,
5825 const struct err_reg_info *eri)
5830 /* read in a loop until no more errors are seen */
5833 reg = read_kctxt_csr(dd, context, eri->status);
5836 write_kctxt_csr(dd, context, eri->clear, reg);
5837 if (likely(eri->handler))
5838 eri->handler(dd, context, reg);
5840 if (count > MAX_CLEAR_COUNT) {
5843 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5846 * Read-modify-write so any other masked bits
5849 mask = read_kctxt_csr(dd, context, eri->mask);
5851 write_kctxt_csr(dd, context, eri->mask, mask);
5858 * CCE block "misc" interrupt. Source is < 16.
5860 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5862 const struct err_reg_info *eri = &misc_errs[source];
5865 interrupt_clear_down(dd, 0, eri);
5867 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5872 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5874 return flag_string(buf, buf_len, flags,
5875 sc_err_status_flags,
5876 ARRAY_SIZE(sc_err_status_flags));
5880 * Send context error interrupt. Source (hw_context) is < 160.
5882 * All send context errors cause the send context to halt. The normal
5883 * clear-down mechanism cannot be used because we cannot clear the
5884 * error bits until several other long-running items are done first.
5885 * This is OK because with the context halted, nothing else is going
5886 * to happen on it anyway.
5888 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5889 unsigned int hw_context)
5891 struct send_context_info *sci;
5892 struct send_context *sc;
5898 sw_index = dd->hw_to_sw[hw_context];
5899 if (sw_index >= dd->num_send_contexts) {
5901 "out of range sw index %u for send context %u\n",
5902 sw_index, hw_context);
5905 sci = &dd->send_contexts[sw_index];
5908 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5909 sw_index, hw_context);
5913 /* tell the software that a halt has begun */
5914 sc_stop(sc, SCF_HALTED);
5916 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5918 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5919 send_context_err_status_string(flags, sizeof(flags),
5922 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5923 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5926 * Automatically restart halted kernel contexts out of interrupt
5927 * context. User contexts must ask the driver to restart the context.
5929 if (sc->type != SC_USER)
5930 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5933 * Update the counters for the corresponding status bits.
5934 * Note that these particular counters are aggregated over all
5937 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5938 if (status & (1ull << i))
5939 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5943 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5944 unsigned int source, u64 status)
5946 struct sdma_engine *sde;
5949 sde = &dd->per_sdma[source];
5950 #ifdef CONFIG_SDMA_VERBOSITY
5951 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5952 slashstrip(__FILE__), __LINE__, __func__);
5953 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5954 sde->this_idx, source, (unsigned long long)status);
5957 sdma_engine_error(sde, status);
5960 * Update the counters for the corresponding status bits.
5961 * Note that these particular counters are aggregated over
5962 * all 16 DMA engines.
5964 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5965 if (status & (1ull << i))
5966 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5971 * CCE block SDMA error interrupt. Source is < 16.
5973 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5975 #ifdef CONFIG_SDMA_VERBOSITY
5976 struct sdma_engine *sde = &dd->per_sdma[source];
5978 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5979 slashstrip(__FILE__), __LINE__, __func__);
5980 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5982 sdma_dumpstate(sde);
5984 interrupt_clear_down(dd, source, &sdma_eng_err);
5988 * CCE block "various" interrupt. Source is < 8.
5990 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5992 const struct err_reg_info *eri = &various_err[source];
5995 * TCritInt cannot go through interrupt_clear_down()
5996 * because it is not a second tier interrupt. The handler
5997 * should be called directly.
5999 if (source == TCRIT_INT_SOURCE)
6000 handle_temp_err(dd);
6001 else if (eri->handler)
6002 interrupt_clear_down(dd, 0, eri);
6005 "%s: Unimplemented/reserved interrupt %d\n",
6009 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6011 /* src_ctx is always zero */
6012 struct hfi1_pportdata *ppd = dd->pport;
6013 unsigned long flags;
6014 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6016 if (reg & QSFP_HFI0_MODPRST_N) {
6017 if (!qsfp_mod_present(ppd)) {
6018 dd_dev_info(dd, "%s: QSFP module removed\n",
6021 ppd->driver_link_ready = 0;
6023 * Cable removed, reset all our information about the
6024 * cache and cable capabilities
6027 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6029 * We don't set cache_refresh_required here as we expect
6030 * an interrupt when a cable is inserted
6032 ppd->qsfp_info.cache_valid = 0;
6033 ppd->qsfp_info.reset_needed = 0;
6034 ppd->qsfp_info.limiting_active = 0;
6035 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6037 /* Invert the ModPresent pin now to detect plug-in */
6038 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6039 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6041 if ((ppd->offline_disabled_reason >
6043 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6044 (ppd->offline_disabled_reason ==
6045 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6046 ppd->offline_disabled_reason =
6048 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6050 if (ppd->host_link_state == HLS_DN_POLL) {
6052 * The link is still in POLL. This means
6053 * that the normal link down processing
6054 * will not happen. We have to do it here
6055 * before turning the DC off.
6057 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
6060 dd_dev_info(dd, "%s: QSFP module inserted\n",
6063 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6064 ppd->qsfp_info.cache_valid = 0;
6065 ppd->qsfp_info.cache_refresh_required = 1;
6066 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6070 * Stop inversion of ModPresent pin to detect
6071 * removal of the cable
6073 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6074 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6075 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6077 ppd->offline_disabled_reason =
6078 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6082 if (reg & QSFP_HFI0_INT_N) {
6083 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6085 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6086 ppd->qsfp_info.check_interrupt_flags = 1;
6087 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6090 /* Schedule the QSFP work only if there is a cable attached. */
6091 if (qsfp_mod_present(ppd))
6092 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6095 static int request_host_lcb_access(struct hfi1_devdata *dd)
6099 ret = do_8051_command(dd, HCMD_MISC,
6100 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6101 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6102 if (ret != HCMD_SUCCESS) {
6103 dd_dev_err(dd, "%s: command failed with error %d\n",
6106 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6109 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6113 ret = do_8051_command(dd, HCMD_MISC,
6114 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6115 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6116 if (ret != HCMD_SUCCESS) {
6117 dd_dev_err(dd, "%s: command failed with error %d\n",
6120 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6124 * Set the LCB selector - allow host access. The DCC selector always
6125 * points to the host.
6127 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6129 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6130 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6131 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6135 * Clear the LCB selector - allow 8051 access. The DCC selector always
6136 * points to the host.
6138 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6140 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6141 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6145 * Acquire LCB access from the 8051. If the host already has access,
6146 * just increment a counter. Otherwise, inform the 8051 that the
6147 * host is taking access.
6151 * -EBUSY if the 8051 has control and cannot be disturbed
6152 * -errno if unable to acquire access from the 8051
6154 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6156 struct hfi1_pportdata *ppd = dd->pport;
6160 * Use the host link state lock so the operation of this routine
6161 * { link state check, selector change, count increment } can occur
6162 * as a unit against a link state change. Otherwise there is a
6163 * race between the state change and the count increment.
6166 mutex_lock(&ppd->hls_lock);
6168 while (!mutex_trylock(&ppd->hls_lock))
6172 /* this access is valid only when the link is up */
6173 if (ppd->host_link_state & HLS_DOWN) {
6174 dd_dev_info(dd, "%s: link state %s not up\n",
6175 __func__, link_state_name(ppd->host_link_state));
6180 if (dd->lcb_access_count == 0) {
6181 ret = request_host_lcb_access(dd);
6184 "%s: unable to acquire LCB access, err %d\n",
6188 set_host_lcb_access(dd);
6190 dd->lcb_access_count++;
6192 mutex_unlock(&ppd->hls_lock);
6197 * Release LCB access by decrementing the use count. If the count is moving
6198 * from 1 to 0, inform 8051 that it has control back.
6202 * -errno if unable to release access to the 8051
6204 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6209 * Use the host link state lock because the acquire needed it.
6210 * Here, we only need to keep { selector change, count decrement }
6214 mutex_lock(&dd->pport->hls_lock);
6216 while (!mutex_trylock(&dd->pport->hls_lock))
6220 if (dd->lcb_access_count == 0) {
6221 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6226 if (dd->lcb_access_count == 1) {
6227 set_8051_lcb_access(dd);
6228 ret = request_8051_lcb_access(dd);
6231 "%s: unable to release LCB access, err %d\n",
6233 /* restore host access if the grant didn't work */
6234 set_host_lcb_access(dd);
6238 dd->lcb_access_count--;
6240 mutex_unlock(&dd->pport->hls_lock);
6245 * Initialize LCB access variables and state. Called during driver load,
6246 * after most of the initialization is finished.
6248 * The DC default is LCB access on for the host. The driver defaults to
6249 * leaving access to the 8051. Assign access now - this constrains the call
6250 * to this routine to be after all LCB set-up is done. In particular, after
6251 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6253 static void init_lcb_access(struct hfi1_devdata *dd)
6255 dd->lcb_access_count = 0;
6259 * Write a response back to a 8051 request.
6261 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6263 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6264 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6266 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6267 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6271 * Handle host requests from the 8051.
6273 static void handle_8051_request(struct hfi1_pportdata *ppd)
6275 struct hfi1_devdata *dd = ppd->dd;
6280 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6281 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6282 return; /* no request */
6284 /* zero out COMPLETED so the response is seen */
6285 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6287 /* extract request details */
6288 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6289 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6290 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6291 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6294 case HREQ_LOAD_CONFIG:
6295 case HREQ_SAVE_CONFIG:
6296 case HREQ_READ_CONFIG:
6297 case HREQ_SET_TX_EQ_ABS:
6298 case HREQ_SET_TX_EQ_REL:
6300 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6302 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6304 case HREQ_CONFIG_DONE:
6305 hreq_response(dd, HREQ_SUCCESS, 0);
6308 case HREQ_INTERFACE_TEST:
6309 hreq_response(dd, HREQ_SUCCESS, data);
6312 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6313 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6319 * Set up allocation unit vaulue.
6321 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6323 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6325 /* do not modify other values in the register */
6326 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6327 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6328 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6332 * Set up initial VL15 credits of the remote. Assumes the rest of
6333 * the CM credit registers are zero from a previous global or credit reset.
6334 * Shared limit for VL15 will always be 0.
6336 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6338 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6340 /* set initial values for total and shared credit limit */
6341 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6342 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6345 * Set total limit to be equal to VL15 credits.
6346 * Leave shared limit at 0.
6348 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6349 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6351 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6352 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6356 * Zero all credit details from the previous connection and
6357 * reset the CM manager's internal counters.
6359 void reset_link_credits(struct hfi1_devdata *dd)
6363 /* remove all previous VL credit limits */
6364 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6365 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6366 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6367 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6368 /* reset the CM block */
6369 pio_send_control(dd, PSC_CM_RESET);
6370 /* reset cached value */
6371 dd->vl15buf_cached = 0;
6374 /* convert a vCU to a CU */
6375 static u32 vcu_to_cu(u8 vcu)
6380 /* convert a CU to a vCU */
6381 static u8 cu_to_vcu(u32 cu)
6386 /* convert a vAU to an AU */
6387 static u32 vau_to_au(u8 vau)
6389 return 8 * (1 << vau);
6392 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6394 ppd->sm_trap_qp = 0x0;
6399 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6401 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6405 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6406 write_csr(dd, DC_LCB_CFG_RUN, 0);
6407 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6408 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6409 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6410 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6411 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6412 reg = read_csr(dd, DCC_CFG_RESET);
6413 write_csr(dd, DCC_CFG_RESET, reg |
6414 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6415 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6416 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6418 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6419 write_csr(dd, DCC_CFG_RESET, reg);
6420 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6425 * This routine should be called after the link has been transitioned to
6426 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6429 * The expectation is that the caller of this routine would have taken
6430 * care of properly transitioning the link into the correct state.
6431 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6432 * before calling this function.
6434 static void _dc_shutdown(struct hfi1_devdata *dd)
6436 lockdep_assert_held(&dd->dc8051_lock);
6438 if (dd->dc_shutdown)
6441 dd->dc_shutdown = 1;
6442 /* Shutdown the LCB */
6443 lcb_shutdown(dd, 1);
6445 * Going to OFFLINE would have causes the 8051 to put the
6446 * SerDes into reset already. Just need to shut down the 8051,
6449 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6452 static void dc_shutdown(struct hfi1_devdata *dd)
6454 mutex_lock(&dd->dc8051_lock);
6456 mutex_unlock(&dd->dc8051_lock);
6460 * Calling this after the DC has been brought out of reset should not
6462 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6463 * before calling this function.
6465 static void _dc_start(struct hfi1_devdata *dd)
6467 lockdep_assert_held(&dd->dc8051_lock);
6469 if (!dd->dc_shutdown)
6472 /* Take the 8051 out of reset */
6473 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6474 /* Wait until 8051 is ready */
6475 if (wait_fm_ready(dd, TIMEOUT_8051_START))
6476 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6479 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6480 write_csr(dd, DCC_CFG_RESET, 0x10);
6481 /* lcb_shutdown() with abort=1 does not restore these */
6482 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6483 dd->dc_shutdown = 0;
6486 static void dc_start(struct hfi1_devdata *dd)
6488 mutex_lock(&dd->dc8051_lock);
6490 mutex_unlock(&dd->dc8051_lock);
6494 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6496 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6498 u64 rx_radr, tx_radr;
6501 if (dd->icode != ICODE_FPGA_EMULATION)
6505 * These LCB defaults on emulator _s are good, nothing to do here:
6506 * LCB_CFG_TX_FIFOS_RADR
6507 * LCB_CFG_RX_FIFOS_RADR
6509 * LCB_CFG_IGNORE_LOST_RCLK
6511 if (is_emulator_s(dd))
6513 /* else this is _p */
6515 version = emulator_rev(dd);
6517 version = 0x2d; /* all B0 use 0x2d or higher settings */
6519 if (version <= 0x12) {
6520 /* release 0x12 and below */
6523 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6524 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6525 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6528 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6529 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6530 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6532 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6533 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6535 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6536 } else if (version <= 0x18) {
6537 /* release 0x13 up to 0x18 */
6538 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6540 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6541 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6542 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6543 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6544 } else if (version == 0x19) {
6546 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6548 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6549 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6550 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6551 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6552 } else if (version == 0x1a) {
6554 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6556 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6557 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6558 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6559 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6560 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6562 /* release 0x1b and higher */
6563 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6565 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6566 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6567 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6568 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6571 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6572 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6573 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6574 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6575 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6579 * Handle a SMA idle message
6581 * This is a work-queue function outside of the interrupt.
6583 void handle_sma_message(struct work_struct *work)
6585 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6587 struct hfi1_devdata *dd = ppd->dd;
6592 * msg is bytes 1-4 of the 40-bit idle message - the command code
6595 ret = read_idle_sma(dd, &msg);
6598 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6600 * React to the SMA message. Byte[1] (0 for us) is the command.
6602 switch (msg & 0xff) {
6605 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6608 * Only expected in INIT or ARMED, discard otherwise.
6610 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6611 ppd->neighbor_normal = 1;
6613 case SMA_IDLE_ACTIVE:
6615 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6618 * Can activate the node. Discard otherwise.
6620 if (ppd->host_link_state == HLS_UP_ARMED &&
6621 ppd->is_active_optimize_enabled) {
6622 ppd->neighbor_normal = 1;
6623 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6627 "%s: received Active SMA idle message, couldn't set link to Active\n",
6633 "%s: received unexpected SMA idle message 0x%llx\n",
6639 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6642 unsigned long flags;
6644 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6645 rcvctrl = read_csr(dd, RCV_CTRL);
6648 write_csr(dd, RCV_CTRL, rcvctrl);
6649 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6652 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6654 adjust_rcvctrl(dd, add, 0);
6657 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6659 adjust_rcvctrl(dd, 0, clear);
6663 * Called from all interrupt handlers to start handling an SPC freeze.
6665 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6667 struct hfi1_devdata *dd = ppd->dd;
6668 struct send_context *sc;
6671 if (flags & FREEZE_SELF)
6672 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6674 /* enter frozen mode */
6675 dd->flags |= HFI1_FROZEN;
6677 /* notify all SDMA engines that they are going into a freeze */
6678 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6680 /* do halt pre-handling on all enabled send contexts */
6681 for (i = 0; i < dd->num_send_contexts; i++) {
6682 sc = dd->send_contexts[i].sc;
6683 if (sc && (sc->flags & SCF_ENABLED))
6684 sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6687 /* Send context are frozen. Notify user space */
6688 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6690 if (flags & FREEZE_ABORT) {
6692 "Aborted freeze recovery. Please REBOOT system\n");
6695 /* queue non-interrupt handler */
6696 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6700 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6701 * depending on the "freeze" parameter.
6703 * No need to return an error if it times out, our only option
6704 * is to proceed anyway.
6706 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6708 unsigned long timeout;
6711 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6713 reg = read_csr(dd, CCE_STATUS);
6715 /* waiting until all indicators are set */
6716 if ((reg & ALL_FROZE) == ALL_FROZE)
6717 return; /* all done */
6719 /* waiting until all indicators are clear */
6720 if ((reg & ALL_FROZE) == 0)
6721 return; /* all done */
6724 if (time_after(jiffies, timeout)) {
6726 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6727 freeze ? "" : "un", reg & ALL_FROZE,
6728 freeze ? ALL_FROZE : 0ull);
6731 usleep_range(80, 120);
6736 * Do all freeze handling for the RXE block.
6738 static void rxe_freeze(struct hfi1_devdata *dd)
6743 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6745 /* disable all receive contexts */
6746 for (i = 0; i < dd->num_rcv_contexts; i++)
6747 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6751 * Unfreeze handling for the RXE block - kernel contexts only.
6752 * This will also enable the port. User contexts will do unfreeze
6753 * handling on a per-context basis as they call into the driver.
6756 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6761 /* enable all kernel contexts */
6762 for (i = 0; i < dd->num_rcv_contexts; i++) {
6763 struct hfi1_ctxtdata *rcd = dd->rcd[i];
6765 /* Ensure all non-user contexts(including vnic) are enabled */
6766 if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER))
6769 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6770 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6771 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6772 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6773 hfi1_rcvctrl(dd, rcvmask, i);
6777 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6781 * Non-interrupt SPC freeze handling.
6783 * This is a work-queue function outside of the triggering interrupt.
6785 void handle_freeze(struct work_struct *work)
6787 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6789 struct hfi1_devdata *dd = ppd->dd;
6791 /* wait for freeze indicators on all affected blocks */
6792 wait_for_freeze_status(dd, 1);
6794 /* SPC is now frozen */
6796 /* do send PIO freeze steps */
6799 /* do send DMA freeze steps */
6802 /* do send egress freeze steps - nothing to do */
6804 /* do receive freeze steps */
6808 * Unfreeze the hardware - clear the freeze, wait for each
6809 * block's frozen bit to clear, then clear the frozen flag.
6811 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6812 wait_for_freeze_status(dd, 0);
6815 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6816 wait_for_freeze_status(dd, 1);
6817 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6818 wait_for_freeze_status(dd, 0);
6821 /* do send PIO unfreeze steps for kernel contexts */
6822 pio_kernel_unfreeze(dd);
6824 /* do send DMA unfreeze steps */
6827 /* do send egress unfreeze steps - nothing to do */
6829 /* do receive unfreeze steps for kernel contexts */
6830 rxe_kernel_unfreeze(dd);
6833 * The unfreeze procedure touches global device registers when
6834 * it disables and re-enables RXE. Mark the device unfrozen
6835 * after all that is done so other parts of the driver waiting
6836 * for the device to unfreeze don't do things out of order.
6838 * The above implies that the meaning of HFI1_FROZEN flag is
6839 * "Device has gone into freeze mode and freeze mode handling
6840 * is still in progress."
6842 * The flag will be removed when freeze mode processing has
6845 dd->flags &= ~HFI1_FROZEN;
6846 wake_up(&dd->event_queue);
6848 /* no longer frozen */
6852 * Handle a link up interrupt from the 8051.
6854 * This is a work-queue function outside of the interrupt.
6856 void handle_link_up(struct work_struct *work)
6858 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6860 struct hfi1_devdata *dd = ppd->dd;
6862 set_link_state(ppd, HLS_UP_INIT);
6864 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6867 * OPA specifies that certain counters are cleared on a transition
6868 * to link up, so do that.
6870 clear_linkup_counters(dd);
6872 * And (re)set link up default values.
6874 set_linkup_defaults(ppd);
6877 * Set VL15 credits. Use cached value from verify cap interrupt.
6878 * In case of quick linkup or simulator, vl15 value will be set by
6879 * handle_linkup_change. VerifyCap interrupt handler will not be
6880 * called in those scenarios.
6882 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6883 set_up_vl15(dd, dd->vl15buf_cached);
6885 /* enforce link speed enabled */
6886 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6887 /* oops - current speed is not enabled, bounce */
6889 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6890 ppd->link_speed_active, ppd->link_speed_enabled);
6891 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6892 OPA_LINKDOWN_REASON_SPEED_POLICY);
6893 set_link_state(ppd, HLS_DN_OFFLINE);
6899 * Several pieces of LNI information were cached for SMA in ppd.
6900 * Reset these on link down
6902 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6904 ppd->neighbor_guid = 0;
6905 ppd->neighbor_port_number = 0;
6906 ppd->neighbor_type = 0;
6907 ppd->neighbor_fm_security = 0;
6910 static const char * const link_down_reason_strs[] = {
6911 [OPA_LINKDOWN_REASON_NONE] = "None",
6912 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
6913 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6914 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6915 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6916 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6917 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6918 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6919 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6920 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6921 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6922 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6923 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6924 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6925 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6926 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6927 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6928 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6929 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6930 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6931 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6932 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6933 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6934 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6935 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6936 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6937 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6938 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6939 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6940 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6941 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6942 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6943 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6944 "Excessive buffer overrun",
6945 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6946 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6947 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6948 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6949 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6950 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6951 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6952 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6953 "Local media not installed",
6954 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6955 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6956 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6957 "End to end not installed",
6958 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6959 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6960 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6961 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6962 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6963 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6966 /* return the neighbor link down reason string */
6967 static const char *link_down_reason_str(u8 reason)
6969 const char *str = NULL;
6971 if (reason < ARRAY_SIZE(link_down_reason_strs))
6972 str = link_down_reason_strs[reason];
6980 * Handle a link down interrupt from the 8051.
6982 * This is a work-queue function outside of the interrupt.
6984 void handle_link_down(struct work_struct *work)
6986 u8 lcl_reason, neigh_reason = 0;
6987 u8 link_down_reason;
6988 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6991 static const char ldr_str[] = "Link down reason: ";
6993 if ((ppd->host_link_state &
6994 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6995 ppd->port_type == PORT_TYPE_FIXED)
6996 ppd->offline_disabled_reason =
6997 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6999 /* Go offline first, then deal with reading/writing through 8051 */
7000 was_up = !!(ppd->host_link_state & HLS_UP);
7001 set_link_state(ppd, HLS_DN_OFFLINE);
7005 /* link down reason is only valid if the link was up */
7006 read_link_down_reason(ppd->dd, &link_down_reason);
7007 switch (link_down_reason) {
7008 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7009 /* the link went down, no idle message reason */
7010 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7013 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7015 * The neighbor reason is only valid if an idle message
7016 * was received for it.
7018 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7019 dd_dev_info(ppd->dd,
7020 "%sNeighbor link down message %d, %s\n",
7021 ldr_str, neigh_reason,
7022 link_down_reason_str(neigh_reason));
7024 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7025 dd_dev_info(ppd->dd,
7026 "%sHost requested link to go offline\n",
7030 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7031 ldr_str, link_down_reason);
7036 * If no reason, assume peer-initiated but missed
7037 * LinkGoingDown idle flits.
7039 if (neigh_reason == 0)
7040 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7042 /* went down while polling or going up */
7043 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7046 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7048 /* inform the SMA when the link transitions from up to down */
7049 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7050 ppd->neigh_link_down_reason.sma == 0) {
7051 ppd->local_link_down_reason.sma =
7052 ppd->local_link_down_reason.latest;
7053 ppd->neigh_link_down_reason.sma =
7054 ppd->neigh_link_down_reason.latest;
7057 reset_neighbor_info(ppd);
7059 /* disable the port */
7060 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7063 * If there is no cable attached, turn the DC off. Otherwise,
7064 * start the link bring up.
7066 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7067 dc_shutdown(ppd->dd);
7072 void handle_link_bounce(struct work_struct *work)
7074 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7078 * Only do something if the link is currently up.
7080 if (ppd->host_link_state & HLS_UP) {
7081 set_link_state(ppd, HLS_DN_OFFLINE);
7084 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7085 __func__, link_state_name(ppd->host_link_state));
7090 * Mask conversion: Capability exchange to Port LTP. The capability
7091 * exchange has an implicit 16b CRC that is mandatory.
7093 static int cap_to_port_ltp(int cap)
7095 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7097 if (cap & CAP_CRC_14B)
7098 port_ltp |= PORT_LTP_CRC_MODE_14;
7099 if (cap & CAP_CRC_48B)
7100 port_ltp |= PORT_LTP_CRC_MODE_48;
7101 if (cap & CAP_CRC_12B_16B_PER_LANE)
7102 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7108 * Convert an OPA Port LTP mask to capability mask
7110 int port_ltp_to_cap(int port_ltp)
7114 if (port_ltp & PORT_LTP_CRC_MODE_14)
7115 cap_mask |= CAP_CRC_14B;
7116 if (port_ltp & PORT_LTP_CRC_MODE_48)
7117 cap_mask |= CAP_CRC_48B;
7118 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7119 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7125 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7127 static int lcb_to_port_ltp(int lcb_crc)
7131 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7132 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7133 else if (lcb_crc == LCB_CRC_48B)
7134 port_ltp = PORT_LTP_CRC_MODE_48;
7135 else if (lcb_crc == LCB_CRC_14B)
7136 port_ltp = PORT_LTP_CRC_MODE_14;
7138 port_ltp = PORT_LTP_CRC_MODE_16;
7144 * Our neighbor has indicated that we are allowed to act as a fabric
7145 * manager, so place the full management partition key in the second
7146 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7147 * that we should already have the limited management partition key in
7148 * array element 1, and also that the port is not yet up when
7149 * add_full_mgmt_pkey() is invoked.
7151 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7153 struct hfi1_devdata *dd = ppd->dd;
7155 /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
7156 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7157 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7158 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7159 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7160 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7161 hfi1_event_pkey_change(ppd->dd, ppd->port);
7164 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7166 if (ppd->pkeys[2] != 0) {
7168 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7169 hfi1_event_pkey_change(ppd->dd, ppd->port);
7174 * Convert the given link width to the OPA link width bitmask.
7176 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7181 * Simulator and quick linkup do not set the width.
7182 * Just set it to 4x without complaint.
7184 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7185 return OPA_LINK_WIDTH_4X;
7186 return 0; /* no lanes up */
7187 case 1: return OPA_LINK_WIDTH_1X;
7188 case 2: return OPA_LINK_WIDTH_2X;
7189 case 3: return OPA_LINK_WIDTH_3X;
7191 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7194 case 4: return OPA_LINK_WIDTH_4X;
7199 * Do a population count on the bottom nibble.
7201 static const u8 bit_counts[16] = {
7202 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7205 static inline u8 nibble_to_count(u8 nibble)
7207 return bit_counts[nibble & 0xf];
7211 * Read the active lane information from the 8051 registers and return
7214 * Active lane information is found in these 8051 registers:
7218 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7224 u8 tx_polarity_inversion;
7225 u8 rx_polarity_inversion;
7228 /* read the active lanes */
7229 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7230 &rx_polarity_inversion, &max_rate);
7231 read_local_lni(dd, &enable_lane_rx);
7233 /* convert to counts */
7234 tx = nibble_to_count(enable_lane_tx);
7235 rx = nibble_to_count(enable_lane_rx);
7238 * Set link_speed_active here, overriding what was set in
7239 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7240 * set the max_rate field in handle_verify_cap until v0.19.
7242 if ((dd->icode == ICODE_RTL_SILICON) &&
7243 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7244 /* max_rate: 0 = 12.5G, 1 = 25G */
7247 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7251 "%s: unexpected max rate %d, using 25Gb\n",
7252 __func__, (int)max_rate);
7255 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7261 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7262 enable_lane_tx, tx, enable_lane_rx, rx);
7263 *tx_width = link_width_to_bits(dd, tx);
7264 *rx_width = link_width_to_bits(dd, rx);
7268 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7269 * Valid after the end of VerifyCap and during LinkUp. Does not change
7270 * after link up. I.e. look elsewhere for downgrade information.
7273 * + bits [7:4] contain the number of active transmitters
7274 * + bits [3:0] contain the number of active receivers
7275 * These are numbers 1 through 4 and can be different values if the
7276 * link is asymmetric.
7278 * verify_cap_local_fm_link_width[0] retains its original value.
7280 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7284 u8 misc_bits, local_flags;
7285 u16 active_tx, active_rx;
7287 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7289 rx = (widths >> 8) & 0xf;
7291 *tx_width = link_width_to_bits(dd, tx);
7292 *rx_width = link_width_to_bits(dd, rx);
7294 /* print the active widths */
7295 get_link_widths(dd, &active_tx, &active_rx);
7299 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7300 * hardware information when the link first comes up.
7302 * The link width is not available until after VerifyCap.AllFramesReceived
7303 * (the trigger for handle_verify_cap), so this is outside that routine
7304 * and should be called when the 8051 signals linkup.
7306 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7308 u16 tx_width, rx_width;
7310 /* get end-of-LNI link widths */
7311 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7313 /* use tx_width as the link is supposed to be symmetric on link up */
7314 ppd->link_width_active = tx_width;
7315 /* link width downgrade active (LWD.A) starts out matching LW.A */
7316 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7317 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7318 /* per OPA spec, on link up LWD.E resets to LWD.S */
7319 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7320 /* cache the active egress rate (units {10^6 bits/sec]) */
7321 ppd->current_egress_rate = active_egress_rate(ppd);
7325 * Handle a verify capabilities interrupt from the 8051.
7327 * This is a work-queue function outside of the interrupt.
7329 void handle_verify_cap(struct work_struct *work)
7331 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7333 struct hfi1_devdata *dd = ppd->dd;
7335 u8 power_management;
7345 u16 active_tx, active_rx;
7346 u8 partner_supported_crc;
7350 set_link_state(ppd, HLS_VERIFY_CAP);
7352 lcb_shutdown(dd, 0);
7353 adjust_lcb_for_fpga_serdes(dd);
7355 read_vc_remote_phy(dd, &power_management, &continious);
7356 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7357 &partner_supported_crc);
7358 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7359 read_remote_device_id(dd, &device_id, &device_rev);
7361 * And the 'MgmtAllowed' information, which is exchanged during
7362 * LNI, is also be available at this point.
7364 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7365 /* print the active widths */
7366 get_link_widths(dd, &active_tx, &active_rx);
7368 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7369 (int)power_management, (int)continious);
7371 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7372 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7373 (int)partner_supported_crc);
7374 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7375 (u32)remote_tx_rate, (u32)link_widths);
7376 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7377 (u32)device_id, (u32)device_rev);
7379 * The peer vAU value just read is the peer receiver value. HFI does
7380 * not support a transmit vAU of 0 (AU == 8). We advertised that
7381 * with Z=1 in the fabric capabilities sent to the peer. The peer
7382 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7383 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7384 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7385 * subject to the Z value exception.
7389 set_up_vau(dd, vau);
7392 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7393 * credits value and wait for link-up interrupt ot set it.
7396 dd->vl15buf_cached = vl15buf;
7398 /* set up the LCB CRC mode */
7399 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7401 /* order is important: use the lowest bit in common */
7402 if (crc_mask & CAP_CRC_14B)
7403 crc_val = LCB_CRC_14B;
7404 else if (crc_mask & CAP_CRC_48B)
7405 crc_val = LCB_CRC_48B;
7406 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7407 crc_val = LCB_CRC_12B_16B_PER_LANE;
7409 crc_val = LCB_CRC_16B;
7411 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7412 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7413 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7415 /* set (14b only) or clear sideband credit */
7416 reg = read_csr(dd, SEND_CM_CTRL);
7417 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7418 write_csr(dd, SEND_CM_CTRL,
7419 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7421 write_csr(dd, SEND_CM_CTRL,
7422 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7425 ppd->link_speed_active = 0; /* invalid value */
7426 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7427 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7428 switch (remote_tx_rate) {
7430 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7433 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7437 /* actual rate is highest bit of the ANDed rates */
7438 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7441 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7443 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7445 if (ppd->link_speed_active == 0) {
7446 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7447 __func__, (int)remote_tx_rate);
7448 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7452 * Cache the values of the supported, enabled, and active
7453 * LTP CRC modes to return in 'portinfo' queries. But the bit
7454 * flags that are returned in the portinfo query differ from
7455 * what's in the link_crc_mask, crc_sizes, and crc_val
7456 * variables. Convert these here.
7458 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7459 /* supported crc modes */
7460 ppd->port_ltp_crc_mode |=
7461 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7462 /* enabled crc modes */
7463 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7464 /* active crc mode */
7466 /* set up the remote credit return table */
7467 assign_remote_cm_au_table(dd, vcu);
7470 * The LCB is reset on entry to handle_verify_cap(), so this must
7471 * be applied on every link up.
7473 * Adjust LCB error kill enable to kill the link if
7474 * these RBUF errors are seen:
7475 * REPLAY_BUF_MBE_SMASK
7476 * FLIT_INPUT_BUF_MBE_SMASK
7478 if (is_ax(dd)) { /* fixed in B0 */
7479 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7480 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7481 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7482 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7485 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7486 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7488 /* give 8051 access to the LCB CSRs */
7489 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7490 set_8051_lcb_access(dd);
7492 if (ppd->mgmt_allowed)
7493 add_full_mgmt_pkey(ppd);
7495 /* tell the 8051 to go to LinkUp */
7496 set_link_state(ppd, HLS_GOING_UP);
7500 * Apply the link width downgrade enabled policy against the current active
7503 * Called when the enabled policy changes or the active link widths change.
7505 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7512 /* use the hls lock to avoid a race with actual link up */
7515 mutex_lock(&ppd->hls_lock);
7516 /* only apply if the link is up */
7517 if (ppd->host_link_state & HLS_DOWN) {
7518 /* still going up..wait and retry */
7519 if (ppd->host_link_state & HLS_GOING_UP) {
7520 if (++tries < 1000) {
7521 mutex_unlock(&ppd->hls_lock);
7522 usleep_range(100, 120); /* arbitrary */
7526 "%s: giving up waiting for link state change\n",
7532 lwde = ppd->link_width_downgrade_enabled;
7534 if (refresh_widths) {
7535 get_link_widths(ppd->dd, &tx, &rx);
7536 ppd->link_width_downgrade_tx_active = tx;
7537 ppd->link_width_downgrade_rx_active = rx;
7540 if (ppd->link_width_downgrade_tx_active == 0 ||
7541 ppd->link_width_downgrade_rx_active == 0) {
7542 /* the 8051 reported a dead link as a downgrade */
7543 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7544 } else if (lwde == 0) {
7545 /* downgrade is disabled */
7547 /* bounce if not at starting active width */
7548 if ((ppd->link_width_active !=
7549 ppd->link_width_downgrade_tx_active) ||
7550 (ppd->link_width_active !=
7551 ppd->link_width_downgrade_rx_active)) {
7553 "Link downgrade is disabled and link has downgraded, downing link\n");
7555 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7556 ppd->link_width_active,
7557 ppd->link_width_downgrade_tx_active,
7558 ppd->link_width_downgrade_rx_active);
7561 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7562 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7563 /* Tx or Rx is outside the enabled policy */
7565 "Link is outside of downgrade allowed, downing link\n");
7567 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7568 lwde, ppd->link_width_downgrade_tx_active,
7569 ppd->link_width_downgrade_rx_active);
7574 mutex_unlock(&ppd->hls_lock);
7577 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7578 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7579 set_link_state(ppd, HLS_DN_OFFLINE);
7585 * Handle a link downgrade interrupt from the 8051.
7587 * This is a work-queue function outside of the interrupt.
7589 void handle_link_downgrade(struct work_struct *work)
7591 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7592 link_downgrade_work);
7594 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7595 apply_link_downgrade_policy(ppd, 1);
7598 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7600 return flag_string(buf, buf_len, flags, dcc_err_flags,
7601 ARRAY_SIZE(dcc_err_flags));
7604 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7606 return flag_string(buf, buf_len, flags, lcb_err_flags,
7607 ARRAY_SIZE(lcb_err_flags));
7610 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7612 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7613 ARRAY_SIZE(dc8051_err_flags));
7616 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7618 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7619 ARRAY_SIZE(dc8051_info_err_flags));
7622 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7624 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7625 ARRAY_SIZE(dc8051_info_host_msg_flags));
7628 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7630 struct hfi1_pportdata *ppd = dd->pport;
7631 u64 info, err, host_msg;
7632 int queue_link_down = 0;
7635 /* look at the flags */
7636 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7637 /* 8051 information set by firmware */
7638 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7639 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7640 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7641 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7643 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7644 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7647 * Handle error flags.
7649 if (err & FAILED_LNI) {
7651 * LNI error indications are cleared by the 8051
7652 * only when starting polling. Only pay attention
7653 * to them when in the states that occur during
7656 if (ppd->host_link_state
7657 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7658 queue_link_down = 1;
7659 dd_dev_info(dd, "Link error: %s\n",
7660 dc8051_info_err_string(buf,
7665 err &= ~(u64)FAILED_LNI;
7667 /* unknown frames can happen durning LNI, just count */
7668 if (err & UNKNOWN_FRAME) {
7669 ppd->unknown_frame_count++;
7670 err &= ~(u64)UNKNOWN_FRAME;
7673 /* report remaining errors, but do not do anything */
7674 dd_dev_err(dd, "8051 info error: %s\n",
7675 dc8051_info_err_string(buf, sizeof(buf),
7680 * Handle host message flags.
7682 if (host_msg & HOST_REQ_DONE) {
7684 * Presently, the driver does a busy wait for
7685 * host requests to complete. This is only an
7686 * informational message.
7687 * NOTE: The 8051 clears the host message
7688 * information *on the next 8051 command*.
7689 * Therefore, when linkup is achieved,
7690 * this flag will still be set.
7692 host_msg &= ~(u64)HOST_REQ_DONE;
7694 if (host_msg & BC_SMA_MSG) {
7695 queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7696 host_msg &= ~(u64)BC_SMA_MSG;
7698 if (host_msg & LINKUP_ACHIEVED) {
7699 dd_dev_info(dd, "8051: Link up\n");
7700 queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7701 host_msg &= ~(u64)LINKUP_ACHIEVED;
7703 if (host_msg & EXT_DEVICE_CFG_REQ) {
7704 handle_8051_request(ppd);
7705 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7707 if (host_msg & VERIFY_CAP_FRAME) {
7708 queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7709 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7711 if (host_msg & LINK_GOING_DOWN) {
7712 const char *extra = "";
7713 /* no downgrade action needed if going down */
7714 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7715 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7716 extra = " (ignoring downgrade)";
7718 dd_dev_info(dd, "8051: Link down%s\n", extra);
7719 queue_link_down = 1;
7720 host_msg &= ~(u64)LINK_GOING_DOWN;
7722 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7723 queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7724 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7727 /* report remaining messages, but do not do anything */
7728 dd_dev_info(dd, "8051 info host message: %s\n",
7729 dc8051_info_host_msg_string(buf,
7734 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7736 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7738 * Lost the 8051 heartbeat. If this happens, we
7739 * receive constant interrupts about it. Disable
7740 * the interrupt after the first.
7742 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7743 write_csr(dd, DC_DC8051_ERR_EN,
7744 read_csr(dd, DC_DC8051_ERR_EN) &
7745 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7747 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7750 /* report the error, but do not do anything */
7751 dd_dev_err(dd, "8051 error: %s\n",
7752 dc8051_err_string(buf, sizeof(buf), reg));
7755 if (queue_link_down) {
7757 * if the link is already going down or disabled, do not
7760 if ((ppd->host_link_state &
7761 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7762 ppd->link_enabled == 0) {
7763 dd_dev_info(dd, "%s: not queuing link down\n",
7766 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7771 static const char * const fm_config_txt[] = {
7773 "BadHeadDist: Distance violation between two head flits",
7775 "BadTailDist: Distance violation between two tail flits",
7777 "BadCtrlDist: Distance violation between two credit control flits",
7779 "BadCrdAck: Credits return for unsupported VL",
7781 "UnsupportedVLMarker: Received VL Marker",
7783 "BadPreempt: Exceeded the preemption nesting level",
7785 "BadControlFlit: Received unsupported control flit",
7788 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7791 static const char * const port_rcv_txt[] = {
7793 "BadPktLen: Illegal PktLen",
7795 "PktLenTooLong: Packet longer than PktLen",
7797 "PktLenTooShort: Packet shorter than PktLen",
7799 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7801 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7803 "BadL2: Illegal L2 opcode",
7805 "BadSC: Unsupported SC",
7807 "BadRC: Illegal RC",
7809 "PreemptError: Preempting with same VL",
7811 "PreemptVL15: Preempting a VL15 packet",
7814 #define OPA_LDR_FMCONFIG_OFFSET 16
7815 #define OPA_LDR_PORTRCV_OFFSET 0
7816 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7818 u64 info, hdr0, hdr1;
7821 struct hfi1_pportdata *ppd = dd->pport;
7825 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7826 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7827 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7828 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7829 /* set status bit */
7830 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7832 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7835 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7836 struct hfi1_pportdata *ppd = dd->pport;
7837 /* this counter saturates at (2^32) - 1 */
7838 if (ppd->link_downed < (u32)UINT_MAX)
7840 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7843 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7844 u8 reason_valid = 1;
7846 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7847 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7848 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7849 /* set status bit */
7850 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7860 extra = fm_config_txt[info];
7863 extra = fm_config_txt[info];
7864 if (ppd->port_error_action &
7865 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7868 * lcl_reason cannot be derived from info
7872 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7877 snprintf(buf, sizeof(buf), "reserved%lld", info);
7882 if (reason_valid && !do_bounce) {
7883 do_bounce = ppd->port_error_action &
7884 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7885 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7888 /* just report this */
7889 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7891 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7894 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7895 u8 reason_valid = 1;
7897 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7898 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7899 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7900 if (!(dd->err_info_rcvport.status_and_code &
7901 OPA_EI_STATUS_SMASK)) {
7902 dd->err_info_rcvport.status_and_code =
7903 info & OPA_EI_CODE_SMASK;
7904 /* set status bit */
7905 dd->err_info_rcvport.status_and_code |=
7906 OPA_EI_STATUS_SMASK;
7908 * save first 2 flits in the packet that caused
7911 dd->err_info_rcvport.packet_flit1 = hdr0;
7912 dd->err_info_rcvport.packet_flit2 = hdr1;
7925 extra = port_rcv_txt[info];
7929 snprintf(buf, sizeof(buf), "reserved%lld", info);
7934 if (reason_valid && !do_bounce) {
7935 do_bounce = ppd->port_error_action &
7936 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7937 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7940 /* just report this */
7941 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
7942 " hdr0 0x%llx, hdr1 0x%llx\n",
7945 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7948 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7949 /* informative only */
7950 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
7951 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7953 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7954 /* informative only */
7955 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
7956 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7959 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
7960 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
7962 /* report any remaining errors */
7964 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
7965 dcc_err_string(buf, sizeof(buf), reg));
7967 if (lcl_reason == 0)
7968 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7971 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
7973 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7974 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7978 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7982 dd_dev_info(dd, "LCB Error: %s\n",
7983 lcb_err_string(buf, sizeof(buf), reg));
7987 * CCE block DC interrupt. Source is < 8.
7989 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7991 const struct err_reg_info *eri = &dc_errs[source];
7994 interrupt_clear_down(dd, 0, eri);
7995 } else if (source == 3 /* dc_lbm_int */) {
7997 * This indicates that a parity error has occurred on the
7998 * address/control lines presented to the LBM. The error
7999 * is a single pulse, there is no associated error flag,
8000 * and it is non-maskable. This is because if a parity
8001 * error occurs on the request the request is dropped.
8002 * This should never occur, but it is nice to know if it
8005 dd_dev_err(dd, "Parity error in DC LBM block\n");
8007 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8012 * TX block send credit interrupt. Source is < 160.
8014 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8016 sc_group_release_update(dd, source);
8020 * TX block SDMA interrupt. Source is < 48.
8022 * SDMA interrupts are grouped by type:
8025 * N - 2N-1 = SDmaProgress
8026 * 2N - 3N-1 = SDmaIdle
8028 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8030 /* what interrupt */
8031 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8033 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8035 #ifdef CONFIG_SDMA_VERBOSITY
8036 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8037 slashstrip(__FILE__), __LINE__, __func__);
8038 sdma_dumpstate(&dd->per_sdma[which]);
8041 if (likely(what < 3 && which < dd->num_sdma)) {
8042 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8044 /* should not happen */
8045 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8050 * RX block receive available interrupt. Source is < 160.
8052 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8054 struct hfi1_ctxtdata *rcd;
8057 if (likely(source < dd->num_rcv_contexts)) {
8058 rcd = dd->rcd[source];
8060 /* Check for non-user contexts, including vnic */
8061 if ((source < dd->first_dyn_alloc_ctxt) ||
8062 (rcd->sc && (rcd->sc->type == SC_KERNEL)))
8063 rcd->do_interrupt(rcd, 0);
8065 handle_user_interrupt(rcd);
8068 /* received an interrupt, but no rcd */
8069 err_detail = "dataless";
8071 /* received an interrupt, but are not using that context */
8072 err_detail = "out of range";
8074 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8075 err_detail, source);
8079 * RX block receive urgent interrupt. Source is < 160.
8081 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8083 struct hfi1_ctxtdata *rcd;
8086 if (likely(source < dd->num_rcv_contexts)) {
8087 rcd = dd->rcd[source];
8089 /* only pay attention to user urgent interrupts */
8090 if ((source >= dd->first_dyn_alloc_ctxt) &&
8091 (!rcd->sc || (rcd->sc->type == SC_USER)))
8092 handle_user_interrupt(rcd);
8095 /* received an interrupt, but no rcd */
8096 err_detail = "dataless";
8098 /* received an interrupt, but are not using that context */
8099 err_detail = "out of range";
8101 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8102 err_detail, source);
8106 * Reserved range interrupt. Should not be called in normal operation.
8108 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8112 dd_dev_err(dd, "unexpected %s interrupt\n",
8113 is_reserved_name(name, sizeof(name), source));
8116 static const struct is_table is_table[] = {
8119 * name func interrupt func
8121 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8122 is_misc_err_name, is_misc_err_int },
8123 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8124 is_sdma_eng_err_name, is_sdma_eng_err_int },
8125 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8126 is_sendctxt_err_name, is_sendctxt_err_int },
8127 { IS_SDMA_START, IS_SDMA_END,
8128 is_sdma_eng_name, is_sdma_eng_int },
8129 { IS_VARIOUS_START, IS_VARIOUS_END,
8130 is_various_name, is_various_int },
8131 { IS_DC_START, IS_DC_END,
8132 is_dc_name, is_dc_int },
8133 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8134 is_rcv_avail_name, is_rcv_avail_int },
8135 { IS_RCVURGENT_START, IS_RCVURGENT_END,
8136 is_rcv_urgent_name, is_rcv_urgent_int },
8137 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8138 is_send_credit_name, is_send_credit_int},
8139 { IS_RESERVED_START, IS_RESERVED_END,
8140 is_reserved_name, is_reserved_int},
8144 * Interrupt source interrupt - called when the given source has an interrupt.
8145 * Source is a bit index into an array of 64-bit integers.
8147 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8149 const struct is_table *entry;
8151 /* avoids a double compare by walking the table in-order */
8152 for (entry = &is_table[0]; entry->is_name; entry++) {
8153 if (source < entry->end) {
8154 trace_hfi1_interrupt(dd, entry, source);
8155 entry->is_int(dd, source - entry->start);
8159 /* fell off the end */
8160 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8164 * General interrupt handler. This is able to correctly handle
8165 * all interrupts in case INTx is used.
8167 static irqreturn_t general_interrupt(int irq, void *data)
8169 struct hfi1_devdata *dd = data;
8170 u64 regs[CCE_NUM_INT_CSRS];
8174 this_cpu_inc(*dd->int_counter);
8176 /* phase 1: scan and clear all handled interrupts */
8177 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8178 if (dd->gi_mask[i] == 0) {
8179 regs[i] = 0; /* used later */
8182 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8184 /* only clear if anything is set */
8186 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8189 /* phase 2: call the appropriate handler */
8190 for_each_set_bit(bit, (unsigned long *)®s[0],
8191 CCE_NUM_INT_CSRS * 64) {
8192 is_interrupt(dd, bit);
8198 static irqreturn_t sdma_interrupt(int irq, void *data)
8200 struct sdma_engine *sde = data;
8201 struct hfi1_devdata *dd = sde->dd;
8204 #ifdef CONFIG_SDMA_VERBOSITY
8205 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8206 slashstrip(__FILE__), __LINE__, __func__);
8207 sdma_dumpstate(sde);
8210 this_cpu_inc(*dd->int_counter);
8212 /* This read_csr is really bad in the hot path */
8213 status = read_csr(dd,
8214 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8216 if (likely(status)) {
8217 /* clear the interrupt(s) */
8219 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8222 /* handle the interrupt(s) */
8223 sdma_engine_interrupt(sde, status);
8225 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8232 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8233 * to insure that the write completed. This does NOT guarantee that
8234 * queued DMA writes to memory from the chip are pushed.
8236 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8238 struct hfi1_devdata *dd = rcd->dd;
8239 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8241 mmiowb(); /* make sure everything before is written */
8242 write_csr(dd, addr, rcd->imask);
8243 /* force the above write on the chip and get a value back */
8244 (void)read_csr(dd, addr);
8247 /* force the receive interrupt */
8248 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8250 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8254 * Return non-zero if a packet is present.
8256 * This routine is called when rechecking for packets after the RcvAvail
8257 * interrupt has been cleared down. First, do a quick check of memory for
8258 * a packet present. If not found, use an expensive CSR read of the context
8259 * tail to determine the actual tail. The CSR read is necessary because there
8260 * is no method to push pending DMAs to memory other than an interrupt and we
8261 * are trying to determine if we need to force an interrupt.
8263 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8268 if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8269 present = (rcd->seq_cnt ==
8270 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8271 else /* is RDMA rtail */
8272 present = (rcd->head != get_rcvhdrtail(rcd));
8277 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8278 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8279 return rcd->head != tail;
8283 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8284 * This routine will try to handle packets immediately (latency), but if
8285 * it finds too many, it will invoke the thread handler (bandwitdh). The
8286 * chip receive interrupt is *not* cleared down until this or the thread (if
8287 * invoked) is finished. The intent is to avoid extra interrupts while we
8288 * are processing packets anyway.
8290 static irqreturn_t receive_context_interrupt(int irq, void *data)
8292 struct hfi1_ctxtdata *rcd = data;
8293 struct hfi1_devdata *dd = rcd->dd;
8297 trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8298 this_cpu_inc(*dd->int_counter);
8299 aspm_ctx_disable(rcd);
8301 /* receive interrupt remains blocked while processing packets */
8302 disposition = rcd->do_interrupt(rcd, 0);
8305 * Too many packets were seen while processing packets in this
8306 * IRQ handler. Invoke the handler thread. The receive interrupt
8309 if (disposition == RCV_PKT_LIMIT)
8310 return IRQ_WAKE_THREAD;
8313 * The packet processor detected no more packets. Clear the receive
8314 * interrupt and recheck for a packet packet that may have arrived
8315 * after the previous check and interrupt clear. If a packet arrived,
8316 * force another interrupt.
8318 clear_recv_intr(rcd);
8319 present = check_packet_present(rcd);
8321 force_recv_intr(rcd);
8327 * Receive packet thread handler. This expects to be invoked with the
8328 * receive interrupt still blocked.
8330 static irqreturn_t receive_context_thread(int irq, void *data)
8332 struct hfi1_ctxtdata *rcd = data;
8335 /* receive interrupt is still blocked from the IRQ handler */
8336 (void)rcd->do_interrupt(rcd, 1);
8339 * The packet processor will only return if it detected no more
8340 * packets. Hold IRQs here so we can safely clear the interrupt and
8341 * recheck for a packet that may have arrived after the previous
8342 * check and the interrupt clear. If a packet arrived, force another
8345 local_irq_disable();
8346 clear_recv_intr(rcd);
8347 present = check_packet_present(rcd);
8349 force_recv_intr(rcd);
8355 /* ========================================================================= */
8357 u32 read_physical_state(struct hfi1_devdata *dd)
8361 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8362 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8363 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8366 u32 read_logical_state(struct hfi1_devdata *dd)
8370 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8371 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8372 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8375 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8379 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8380 /* clear current state, set new state */
8381 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8382 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8383 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8387 * Use the 8051 to read a LCB CSR.
8389 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8394 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8395 if (acquire_lcb_access(dd, 0) == 0) {
8396 *data = read_csr(dd, addr);
8397 release_lcb_access(dd, 0);
8403 /* register is an index of LCB registers: (offset - base) / 8 */
8404 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8405 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8406 if (ret != HCMD_SUCCESS)
8412 * Provide a cache for some of the LCB registers in case the LCB is
8414 * (The LCB is unavailable in certain link states, for example.)
8421 static struct lcb_datum lcb_cache[] = {
8422 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8423 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8424 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8427 static void update_lcb_cache(struct hfi1_devdata *dd)
8433 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8434 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8436 /* Update if we get good data */
8437 if (likely(ret != -EBUSY))
8438 lcb_cache[i].val = val;
8442 static int read_lcb_cache(u32 off, u64 *val)
8446 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8447 if (lcb_cache[i].off == off) {
8448 *val = lcb_cache[i].val;
8453 pr_warn("%s bad offset 0x%x\n", __func__, off);
8458 * Read an LCB CSR. Access may not be in host control, so check.
8459 * Return 0 on success, -EBUSY on failure.
8461 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8463 struct hfi1_pportdata *ppd = dd->pport;
8465 /* if up, go through the 8051 for the value */
8466 if (ppd->host_link_state & HLS_UP)
8467 return read_lcb_via_8051(dd, addr, data);
8468 /* if going up or down, check the cache, otherwise, no access */
8469 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8470 if (read_lcb_cache(addr, data))
8475 /* otherwise, host has access */
8476 *data = read_csr(dd, addr);
8481 * Use the 8051 to write a LCB CSR.
8483 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8488 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8489 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8490 if (acquire_lcb_access(dd, 0) == 0) {
8491 write_csr(dd, addr, data);
8492 release_lcb_access(dd, 0);
8498 /* register is an index of LCB registers: (offset - base) / 8 */
8499 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8500 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8501 if (ret != HCMD_SUCCESS)
8507 * Write an LCB CSR. Access may not be in host control, so check.
8508 * Return 0 on success, -EBUSY on failure.
8510 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8512 struct hfi1_pportdata *ppd = dd->pport;
8514 /* if up, go through the 8051 for the value */
8515 if (ppd->host_link_state & HLS_UP)
8516 return write_lcb_via_8051(dd, addr, data);
8517 /* if going up or down, no access */
8518 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8520 /* otherwise, host has access */
8521 write_csr(dd, addr, data);
8527 * < 0 = Linux error, not able to get access
8528 * > 0 = 8051 command RETURN_CODE
8530 static int do_8051_command(
8531 struct hfi1_devdata *dd,
8538 unsigned long timeout;
8540 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8542 mutex_lock(&dd->dc8051_lock);
8544 /* We can't send any commands to the 8051 if it's in reset */
8545 if (dd->dc_shutdown) {
8546 return_code = -ENODEV;
8551 * If an 8051 host command timed out previously, then the 8051 is
8554 * On first timeout, attempt to reset and restart the entire DC
8555 * block (including 8051). (Is this too big of a hammer?)
8557 * If the 8051 times out a second time, the reset did not bring it
8558 * back to healthy life. In that case, fail any subsequent commands.
8560 if (dd->dc8051_timed_out) {
8561 if (dd->dc8051_timed_out > 1) {
8563 "Previous 8051 host command timed out, skipping command %u\n",
8565 return_code = -ENXIO;
8573 * If there is no timeout, then the 8051 command interface is
8574 * waiting for a command.
8578 * When writing a LCB CSR, out_data contains the full value to
8579 * to be written, while in_data contains the relative LCB
8580 * address in 7:0. Do the work here, rather than the caller,
8581 * of distrubting the write data to where it needs to go:
8584 * 39:00 -> in_data[47:8]
8585 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8586 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8588 if (type == HCMD_WRITE_LCB_CSR) {
8589 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8590 /* must preserve COMPLETED - it is tied to hardware */
8591 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8592 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8593 reg |= ((((*out_data) >> 40) & 0xff) <<
8594 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8595 | ((((*out_data) >> 48) & 0xffff) <<
8596 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8597 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8601 * Do two writes: the first to stabilize the type and req_data, the
8602 * second to activate.
8604 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8605 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8606 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8607 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8608 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8609 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8610 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8612 /* wait for completion, alternate: interrupt */
8613 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8615 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8616 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8619 if (time_after(jiffies, timeout)) {
8620 dd->dc8051_timed_out++;
8621 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8624 return_code = -ETIMEDOUT;
8631 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8632 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8633 if (type == HCMD_READ_LCB_CSR) {
8634 /* top 16 bits are in a different register */
8635 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8636 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8638 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8641 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8642 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8643 dd->dc8051_timed_out = 0;
8645 * Clear command for next user.
8647 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8650 mutex_unlock(&dd->dc8051_lock);
8654 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8656 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8659 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8660 u8 lane_id, u32 config_data)
8665 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8666 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8667 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8668 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8669 if (ret != HCMD_SUCCESS) {
8671 "load 8051 config: field id %d, lane %d, err %d\n",
8672 (int)field_id, (int)lane_id, ret);
8678 * Read the 8051 firmware "registers". Use the RAM directly. Always
8679 * set the result, even on error.
8680 * Return 0 on success, -errno on failure
8682 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8689 /* address start depends on the lane_id */
8691 addr = (4 * NUM_GENERAL_FIELDS)
8692 + (lane_id * 4 * NUM_LANE_FIELDS);
8695 addr += field_id * 4;
8697 /* read is in 8-byte chunks, hardware will truncate the address down */
8698 ret = read_8051_data(dd, addr, 8, &big_data);
8701 /* extract the 4 bytes we want */
8703 *result = (u32)(big_data >> 32);
8705 *result = (u32)big_data;
8708 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8709 __func__, lane_id, field_id);
8715 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8720 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8721 | power_management << POWER_MANAGEMENT_SHIFT;
8722 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8723 GENERAL_CONFIG, frame);
8726 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8727 u16 vl15buf, u8 crc_sizes)
8731 frame = (u32)vau << VAU_SHIFT
8733 | (u32)vcu << VCU_SHIFT
8734 | (u32)vl15buf << VL15BUF_SHIFT
8735 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8736 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8737 GENERAL_CONFIG, frame);
8740 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8741 u8 *flag_bits, u16 *link_widths)
8745 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8747 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8748 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8749 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8752 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8759 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8760 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8761 | (u32)link_widths << LINK_WIDTH_SHIFT;
8762 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8766 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8771 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8772 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8773 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8776 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8781 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8782 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8783 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8784 & REMOTE_DEVICE_REV_MASK;
8787 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8792 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8793 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8794 STS_FM_VERSION_MAJOR_MASK;
8795 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8796 STS_FM_VERSION_MINOR_MASK;
8798 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8799 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8800 STS_FM_VERSION_PATCH_MASK;
8803 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8808 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8809 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8810 & POWER_MANAGEMENT_MASK;
8811 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8812 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8815 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8816 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8820 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8821 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8822 *z = (frame >> Z_SHIFT) & Z_MASK;
8823 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8824 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8825 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8828 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8834 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8836 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8837 & REMOTE_TX_RATE_MASK;
8838 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8841 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8845 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8846 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8849 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8853 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8854 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8857 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8859 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8862 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8864 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8867 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8873 if (dd->pport->host_link_state & HLS_UP) {
8874 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8877 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8878 & LINK_QUALITY_MASK;
8882 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8886 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8887 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8890 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8894 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8895 *ldr = (frame & 0xff);
8898 static int read_tx_settings(struct hfi1_devdata *dd,
8900 u8 *tx_polarity_inversion,
8901 u8 *rx_polarity_inversion,
8907 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8908 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8909 & ENABLE_LANE_TX_MASK;
8910 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8911 & TX_POLARITY_INVERSION_MASK;
8912 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8913 & RX_POLARITY_INVERSION_MASK;
8914 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8918 static int write_tx_settings(struct hfi1_devdata *dd,
8920 u8 tx_polarity_inversion,
8921 u8 rx_polarity_inversion,
8926 /* no need to mask, all variable sizes match field widths */
8927 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8928 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8929 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8930 | max_rate << MAX_RATE_SHIFT;
8931 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8935 * Read an idle LCB message.
8937 * Returns 0 on success, -EINVAL on error
8939 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8943 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
8944 if (ret != HCMD_SUCCESS) {
8945 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8949 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8950 /* return only the payload as we already know the type */
8951 *data_out >>= IDLE_PAYLOAD_SHIFT;
8956 * Read an idle SMA message. To be done in response to a notification from
8959 * Returns 0 on success, -EINVAL on error
8961 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8963 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8968 * Send an idle LCB message.
8970 * Returns 0 on success, -EINVAL on error
8972 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8976 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8977 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8978 if (ret != HCMD_SUCCESS) {
8979 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8987 * Send an idle SMA message.
8989 * Returns 0 on success, -EINVAL on error
8991 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8995 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8996 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8997 return send_idle_message(dd, data);
9001 * Initialize the LCB then do a quick link up. This may or may not be
9004 * return 0 on success, -errno on error
9006 static int do_quick_linkup(struct hfi1_devdata *dd)
9010 lcb_shutdown(dd, 0);
9013 /* LCB_CFG_LOOPBACK.VAL = 2 */
9014 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9015 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9016 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9017 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9020 /* start the LCBs */
9021 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9022 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9024 /* simulator only loopback steps */
9025 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9026 /* LCB_CFG_RUN.EN = 1 */
9027 write_csr(dd, DC_LCB_CFG_RUN,
9028 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9030 ret = wait_link_transfer_active(dd, 10);
9034 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9035 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9040 * When doing quick linkup and not in loopback, both
9041 * sides must be done with LCB set-up before either
9042 * starts the quick linkup. Put a delay here so that
9043 * both sides can be started and have a chance to be
9044 * done with LCB set up before resuming.
9047 "Pausing for peer to be finished with LCB set up\n");
9049 dd_dev_err(dd, "Continuing with quick linkup\n");
9052 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9053 set_8051_lcb_access(dd);
9056 * State "quick" LinkUp request sets the physical link state to
9057 * LinkUp without a verify capability sequence.
9058 * This state is in simulator v37 and later.
9060 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9061 if (ret != HCMD_SUCCESS) {
9063 "%s: set physical link state to quick LinkUp failed with return %d\n",
9066 set_host_lcb_access(dd);
9067 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9074 return 0; /* success */
9078 * Set the SerDes to internal loopback mode.
9079 * Returns 0 on success, -errno on error.
9081 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9085 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9086 if (ret == HCMD_SUCCESS)
9089 "Set physical link state to SerDes Loopback failed with return %d\n",
9097 * Do all special steps to set up loopback.
9099 static int init_loopback(struct hfi1_devdata *dd)
9101 dd_dev_info(dd, "Entering loopback mode\n");
9103 /* all loopbacks should disable self GUID check */
9104 write_csr(dd, DC_DC8051_CFG_MODE,
9105 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9108 * The simulator has only one loopback option - LCB. Switch
9109 * to that option, which includes quick link up.
9111 * Accept all valid loopback values.
9113 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9114 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9115 loopback == LOOPBACK_CABLE)) {
9116 loopback = LOOPBACK_LCB;
9121 /* handle serdes loopback */
9122 if (loopback == LOOPBACK_SERDES) {
9123 /* internal serdes loopack needs quick linkup on RTL */
9124 if (dd->icode == ICODE_RTL_SILICON)
9126 return set_serdes_loopback_mode(dd);
9129 /* LCB loopback - handled at poll time */
9130 if (loopback == LOOPBACK_LCB) {
9131 quick_linkup = 1; /* LCB is always quick linkup */
9133 /* not supported in emulation due to emulation RTL changes */
9134 if (dd->icode == ICODE_FPGA_EMULATION) {
9136 "LCB loopback not supported in emulation\n");
9142 /* external cable loopback requires no extra steps */
9143 if (loopback == LOOPBACK_CABLE)
9146 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9151 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9152 * used in the Verify Capability link width attribute.
9154 static u16 opa_to_vc_link_widths(u16 opa_widths)
9159 static const struct link_bits {
9162 } opa_link_xlate[] = {
9163 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9164 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9165 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9166 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9169 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9170 if (opa_widths & opa_link_xlate[i].from)
9171 result |= opa_link_xlate[i].to;
9177 * Set link attributes before moving to polling.
9179 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9181 struct hfi1_devdata *dd = ppd->dd;
9183 u8 tx_polarity_inversion;
9184 u8 rx_polarity_inversion;
9187 /* reset our fabric serdes to clear any lingering problems */
9188 fabric_serdes_reset(dd);
9190 /* set the local tx rate - need to read-modify-write */
9191 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9192 &rx_polarity_inversion, &ppd->local_tx_rate);
9194 goto set_local_link_attributes_fail;
9196 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9197 /* set the tx rate to the fastest enabled */
9198 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9199 ppd->local_tx_rate = 1;
9201 ppd->local_tx_rate = 0;
9203 /* set the tx rate to all enabled */
9204 ppd->local_tx_rate = 0;
9205 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9206 ppd->local_tx_rate |= 2;
9207 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9208 ppd->local_tx_rate |= 1;
9211 enable_lane_tx = 0xF; /* enable all four lanes */
9212 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9213 rx_polarity_inversion, ppd->local_tx_rate);
9214 if (ret != HCMD_SUCCESS)
9215 goto set_local_link_attributes_fail;
9218 * DC supports continuous updates.
9220 ret = write_vc_local_phy(dd,
9221 0 /* no power management */,
9222 1 /* continuous updates */);
9223 if (ret != HCMD_SUCCESS)
9224 goto set_local_link_attributes_fail;
9226 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9227 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9228 ppd->port_crc_mode_enabled);
9229 if (ret != HCMD_SUCCESS)
9230 goto set_local_link_attributes_fail;
9232 ret = write_vc_local_link_width(dd, 0, 0,
9233 opa_to_vc_link_widths(
9234 ppd->link_width_enabled));
9235 if (ret != HCMD_SUCCESS)
9236 goto set_local_link_attributes_fail;
9238 /* let peer know who we are */
9239 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9240 if (ret == HCMD_SUCCESS)
9243 set_local_link_attributes_fail:
9245 "Failed to set local link attributes, return 0x%x\n",
9251 * Call this to start the link.
9252 * Do not do anything if the link is disabled.
9253 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9255 int start_link(struct hfi1_pportdata *ppd)
9258 * Tune the SerDes to a ballpark setting for optimal signal and bit
9259 * error rate. Needs to be done before starting the link.
9263 if (!ppd->link_enabled) {
9264 dd_dev_info(ppd->dd,
9265 "%s: stopping link start because link is disabled\n",
9269 if (!ppd->driver_link_ready) {
9270 dd_dev_info(ppd->dd,
9271 "%s: stopping link start because driver is not ready\n",
9277 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9278 * pkey table can be configured properly if the HFI unit is connected
9279 * to switch port with MgmtAllowed=NO
9281 clear_full_mgmt_pkey(ppd);
9283 return set_link_state(ppd, HLS_DN_POLL);
9286 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9288 struct hfi1_devdata *dd = ppd->dd;
9290 unsigned long timeout;
9293 * Some QSFP cables have a quirk that asserts the IntN line as a side
9294 * effect of power up on plug-in. We ignore this false positive
9295 * interrupt until the module has finished powering up by waiting for
9296 * a minimum timeout of the module inrush initialization time of
9297 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9298 * module have stabilized.
9303 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9305 timeout = jiffies + msecs_to_jiffies(2000);
9307 mask = read_csr(dd, dd->hfi1_id ?
9308 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9309 if (!(mask & QSFP_HFI0_INT_N))
9311 if (time_after(jiffies, timeout)) {
9312 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9320 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9322 struct hfi1_devdata *dd = ppd->dd;
9325 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9328 * Clear the status register to avoid an immediate interrupt
9329 * when we re-enable the IntN pin
9331 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9333 mask |= (u64)QSFP_HFI0_INT_N;
9335 mask &= ~(u64)QSFP_HFI0_INT_N;
9337 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9340 void reset_qsfp(struct hfi1_pportdata *ppd)
9342 struct hfi1_devdata *dd = ppd->dd;
9343 u64 mask, qsfp_mask;
9345 /* Disable INT_N from triggering QSFP interrupts */
9346 set_qsfp_int_n(ppd, 0);
9348 /* Reset the QSFP */
9349 mask = (u64)QSFP_HFI0_RESET_N;
9351 qsfp_mask = read_csr(dd,
9352 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9355 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9361 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9363 wait_for_qsfp_init(ppd);
9366 * Allow INT_N to trigger the QSFP interrupt to watch
9367 * for alarms and warnings
9369 set_qsfp_int_n(ppd, 1);
9372 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9373 u8 *qsfp_interrupt_status)
9375 struct hfi1_devdata *dd = ppd->dd;
9377 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9378 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9379 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9382 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9383 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9384 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9388 * The remaining alarms/warnings don't matter if the link is down.
9390 if (ppd->host_link_state & HLS_DOWN)
9393 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9394 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9395 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9398 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9399 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9400 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9403 /* Byte 2 is vendor specific */
9405 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9406 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9407 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9410 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9411 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9412 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9415 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9416 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9417 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9420 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9421 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9422 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9425 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9426 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9427 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9430 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9431 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9432 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9435 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9436 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9437 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9440 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9441 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9442 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9445 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9446 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9447 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9450 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9451 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9452 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9455 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9456 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9457 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9460 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9461 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9462 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9465 /* Bytes 9-10 and 11-12 are reserved */
9466 /* Bytes 13-15 are vendor specific */
9471 /* This routine will only be scheduled if the QSFP module present is asserted */
9472 void qsfp_event(struct work_struct *work)
9474 struct qsfp_data *qd;
9475 struct hfi1_pportdata *ppd;
9476 struct hfi1_devdata *dd;
9478 qd = container_of(work, struct qsfp_data, qsfp_work);
9483 if (!qsfp_mod_present(ppd))
9487 * Turn DC back on after cable has been re-inserted. Up until
9488 * now, the DC has been in reset to save power.
9492 if (qd->cache_refresh_required) {
9493 set_qsfp_int_n(ppd, 0);
9495 wait_for_qsfp_init(ppd);
9498 * Allow INT_N to trigger the QSFP interrupt to watch
9499 * for alarms and warnings
9501 set_qsfp_int_n(ppd, 1);
9506 if (qd->check_interrupt_flags) {
9507 u8 qsfp_interrupt_status[16] = {0,};
9509 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9510 &qsfp_interrupt_status[0], 16) != 16) {
9512 "%s: Failed to read status of QSFP module\n",
9515 unsigned long flags;
9517 handle_qsfp_error_conditions(
9518 ppd, qsfp_interrupt_status);
9519 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9520 ppd->qsfp_info.check_interrupt_flags = 0;
9521 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9527 static void init_qsfp_int(struct hfi1_devdata *dd)
9529 struct hfi1_pportdata *ppd = dd->pport;
9530 u64 qsfp_mask, cce_int_mask;
9531 const int qsfp1_int_smask = QSFP1_INT % 64;
9532 const int qsfp2_int_smask = QSFP2_INT % 64;
9535 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9536 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9537 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9538 * the index of the appropriate CSR in the CCEIntMask CSR array
9540 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9541 (8 * (QSFP1_INT / 64)));
9543 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9544 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9547 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9548 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9552 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9553 /* Clear current status to avoid spurious interrupts */
9554 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9556 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9559 set_qsfp_int_n(ppd, 0);
9561 /* Handle active low nature of INT_N and MODPRST_N pins */
9562 if (qsfp_mod_present(ppd))
9563 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9565 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9570 * Do a one-time initialize of the LCB block.
9572 static void init_lcb(struct hfi1_devdata *dd)
9574 /* simulator does not correctly handle LCB cclk loopback, skip */
9575 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9578 /* the DC has been reset earlier in the driver load */
9580 /* set LCB for cclk loopback on the port */
9581 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9582 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9583 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9584 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9585 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9586 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9587 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9591 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9594 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9600 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9603 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9606 /* read byte 2, the status byte */
9607 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9613 return 0; /* success */
9617 * Values for QSFP retry.
9619 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9620 * arrived at from experience on a large cluster.
9622 #define MAX_QSFP_RETRIES 20
9623 #define QSFP_RETRY_WAIT 500 /* msec */
9626 * Try a QSFP read. If it fails, schedule a retry for later.
9627 * Called on first link activation after driver load.
9629 static void try_start_link(struct hfi1_pportdata *ppd)
9631 if (test_qsfp_read(ppd)) {
9633 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9634 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9637 dd_dev_info(ppd->dd,
9638 "QSFP not responding, waiting and retrying %d\n",
9639 (int)ppd->qsfp_retry_count);
9640 ppd->qsfp_retry_count++;
9641 queue_delayed_work(ppd->hfi1_wq, &ppd->start_link_work,
9642 msecs_to_jiffies(QSFP_RETRY_WAIT));
9645 ppd->qsfp_retry_count = 0;
9651 * Workqueue function to start the link after a delay.
9653 void handle_start_link(struct work_struct *work)
9655 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9656 start_link_work.work);
9657 try_start_link(ppd);
9660 int bringup_serdes(struct hfi1_pportdata *ppd)
9662 struct hfi1_devdata *dd = ppd->dd;
9666 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9667 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9669 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9672 guid = dd->base_guid + ppd->port - 1;
9673 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9676 /* Set linkinit_reason on power up per OPA spec */
9677 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9679 /* one-time init of the LCB */
9683 ret = init_loopback(dd);
9689 if (ppd->port_type == PORT_TYPE_QSFP) {
9690 set_qsfp_int_n(ppd, 0);
9691 wait_for_qsfp_init(ppd);
9692 set_qsfp_int_n(ppd, 1);
9695 try_start_link(ppd);
9699 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9701 struct hfi1_devdata *dd = ppd->dd;
9704 * Shut down the link and keep it down. First turn off that the
9705 * driver wants to allow the link to be up (driver_link_ready).
9706 * Then make sure the link is not automatically restarted
9707 * (link_enabled). Cancel any pending restart. And finally
9710 ppd->driver_link_ready = 0;
9711 ppd->link_enabled = 0;
9713 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9714 flush_delayed_work(&ppd->start_link_work);
9715 cancel_delayed_work_sync(&ppd->start_link_work);
9717 ppd->offline_disabled_reason =
9718 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9719 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9720 OPA_LINKDOWN_REASON_SMA_DISABLED);
9721 set_link_state(ppd, HLS_DN_OFFLINE);
9723 /* disable the port */
9724 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9727 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9729 struct hfi1_pportdata *ppd;
9732 ppd = (struct hfi1_pportdata *)(dd + 1);
9733 for (i = 0; i < dd->num_pports; i++, ppd++) {
9734 ppd->ibport_data.rvp.rc_acks = NULL;
9735 ppd->ibport_data.rvp.rc_qacks = NULL;
9736 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9737 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9738 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9739 if (!ppd->ibport_data.rvp.rc_acks ||
9740 !ppd->ibport_data.rvp.rc_delayed_comp ||
9741 !ppd->ibport_data.rvp.rc_qacks)
9748 static const char * const pt_names[] = {
9754 static const char *pt_name(u32 type)
9756 return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9760 * index is the index into the receive array
9762 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9763 u32 type, unsigned long pa, u16 order)
9766 void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9767 (dd->kregbase + RCV_ARRAY));
9769 if (!(dd->flags & HFI1_PRESENT))
9772 if (type == PT_INVALID) {
9774 } else if (type > PT_INVALID) {
9776 "unexpected receive array type %u for index %u, not handled\n",
9781 hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9782 pt_name(type), index, pa, (unsigned long)order);
9784 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9785 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9786 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9787 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9788 << RCV_ARRAY_RT_ADDR_SHIFT;
9789 writeq(reg, base + (index * 8));
9791 if (type == PT_EAGER)
9793 * Eager entries are written one-by-one so we have to push them
9794 * after we write the entry.
9801 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9803 struct hfi1_devdata *dd = rcd->dd;
9806 /* this could be optimized */
9807 for (i = rcd->eager_base; i < rcd->eager_base +
9808 rcd->egrbufs.alloced; i++)
9809 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9811 for (i = rcd->expected_base;
9812 i < rcd->expected_base + rcd->expected_count; i++)
9813 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9816 static const char * const ib_cfg_name_strings[] = {
9817 "HFI1_IB_CFG_LIDLMC",
9818 "HFI1_IB_CFG_LWID_DG_ENB",
9819 "HFI1_IB_CFG_LWID_ENB",
9821 "HFI1_IB_CFG_SPD_ENB",
9823 "HFI1_IB_CFG_RXPOL_ENB",
9824 "HFI1_IB_CFG_LREV_ENB",
9825 "HFI1_IB_CFG_LINKLATENCY",
9826 "HFI1_IB_CFG_HRTBT",
9827 "HFI1_IB_CFG_OP_VLS",
9828 "HFI1_IB_CFG_VL_HIGH_CAP",
9829 "HFI1_IB_CFG_VL_LOW_CAP",
9830 "HFI1_IB_CFG_OVERRUN_THRESH",
9831 "HFI1_IB_CFG_PHYERR_THRESH",
9832 "HFI1_IB_CFG_LINKDEFAULT",
9833 "HFI1_IB_CFG_PKEYS",
9835 "HFI1_IB_CFG_LSTATE",
9836 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9837 "HFI1_IB_CFG_PMA_TICKS",
9841 static const char *ib_cfg_name(int which)
9843 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9845 return ib_cfg_name_strings[which];
9848 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9850 struct hfi1_devdata *dd = ppd->dd;
9854 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9855 val = ppd->link_width_enabled;
9857 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9858 val = ppd->link_width_active;
9860 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9861 val = ppd->link_speed_enabled;
9863 case HFI1_IB_CFG_SPD: /* current Link speed */
9864 val = ppd->link_speed_active;
9867 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9868 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9869 case HFI1_IB_CFG_LINKLATENCY:
9872 case HFI1_IB_CFG_OP_VLS:
9873 val = ppd->vls_operational;
9875 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9876 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9878 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9879 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9881 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9882 val = ppd->overrun_threshold;
9884 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9885 val = ppd->phy_error_threshold;
9887 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9888 val = dd->link_default;
9891 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9892 case HFI1_IB_CFG_PMA_TICKS:
9895 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9898 "%s: which %s: not implemented\n",
9900 ib_cfg_name(which));
9908 * The largest MAD packet size.
9910 #define MAX_MAD_PACKET 2048
9913 * Return the maximum header bytes that can go on the _wire_
9914 * for this device. This count includes the ICRC which is
9915 * not part of the packet held in memory but it is appended
9917 * This is dependent on the device's receive header entry size.
9918 * HFI allows this to be set per-receive context, but the
9919 * driver presently enforces a global value.
9921 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9924 * The maximum non-payload (MTU) bytes in LRH.PktLen are
9925 * the Receive Header Entry Size minus the PBC (or RHF) size
9926 * plus one DW for the ICRC appended by HW.
9928 * dd->rcd[0].rcvhdrqentsize is in DW.
9929 * We use rcd[0] as all context will have the same value. Also,
9930 * the first kernel context would have been allocated by now so
9931 * we are guaranteed a valid value.
9933 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9938 * @ppd - per port data
9940 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
9941 * registers compare against LRH.PktLen, so use the max bytes included
9944 * This routine changes all VL values except VL15, which it maintains at
9947 static void set_send_length(struct hfi1_pportdata *ppd)
9949 struct hfi1_devdata *dd = ppd->dd;
9950 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9951 u32 maxvlmtu = dd->vld[15].mtu;
9952 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9953 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9954 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9958 for (i = 0; i < ppd->vls_supported; i++) {
9959 if (dd->vld[i].mtu > maxvlmtu)
9960 maxvlmtu = dd->vld[i].mtu;
9962 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9963 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9964 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9966 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9967 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9968 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9970 write_csr(dd, SEND_LEN_CHECK0, len1);
9971 write_csr(dd, SEND_LEN_CHECK1, len2);
9972 /* adjust kernel credit return thresholds based on new MTUs */
9973 /* all kernel receive contexts have the same hdrqentsize */
9974 for (i = 0; i < ppd->vls_supported; i++) {
9975 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9976 sc_mtu_to_threshold(dd->vld[i].sc,
9978 dd->rcd[0]->rcvhdrqentsize));
9979 for (j = 0; j < INIT_SC_PER_VL; j++)
9980 sc_set_cr_threshold(
9981 pio_select_send_context_vl(dd, j, i),
9984 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9985 sc_mtu_to_threshold(dd->vld[15].sc,
9987 dd->rcd[0]->rcvhdrqentsize));
9988 sc_set_cr_threshold(dd->vld[15].sc, thres);
9990 /* Adjust maximum MTU for the port in DC */
9991 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9992 (ilog2(maxvlmtu >> 8) + 1);
9993 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9994 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9995 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9996 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9997 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10000 static void set_lidlmc(struct hfi1_pportdata *ppd)
10004 struct hfi1_devdata *dd = ppd->dd;
10005 u32 mask = ~((1U << ppd->lmc) - 1);
10006 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10008 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10009 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10010 c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10011 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10012 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10013 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10014 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10017 * Iterate over all the send contexts and set their SLID check
10019 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10020 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10021 (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10022 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10024 for (i = 0; i < dd->chip_send_contexts; i++) {
10025 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10027 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10030 /* Now we have to do the same thing for the sdma engines */
10031 sdma_update_lmc(dd, mask, ppd->lid);
10034 static const char *state_completed_string(u32 completed)
10036 static const char * const state_completed[] = {
10042 if (completed < ARRAY_SIZE(state_completed))
10043 return state_completed[completed];
10048 static const char all_lanes_dead_timeout_expired[] =
10049 "All lanes were inactive – was the interconnect media removed?";
10050 static const char tx_out_of_policy[] =
10051 "Passing lanes on local port do not meet the local link width policy";
10052 static const char no_state_complete[] =
10053 "State timeout occurred before link partner completed the state";
10054 static const char * const state_complete_reasons[] = {
10055 [0x00] = "Reason unknown",
10056 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10057 [0x02] = "Link partner reported failure",
10058 [0x10] = "Unable to achieve frame sync on any lane",
10060 "Unable to find a common bit rate with the link partner",
10062 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10064 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10065 [0x14] = no_state_complete,
10067 "State timeout occurred before link partner identified equalization presets",
10069 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10070 [0x17] = tx_out_of_policy,
10071 [0x20] = all_lanes_dead_timeout_expired,
10073 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10074 [0x22] = no_state_complete,
10076 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10077 [0x24] = tx_out_of_policy,
10078 [0x30] = all_lanes_dead_timeout_expired,
10080 "State timeout occurred waiting for host to process received frames",
10081 [0x32] = no_state_complete,
10083 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10084 [0x34] = tx_out_of_policy,
10087 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10090 const char *str = NULL;
10092 if (code < ARRAY_SIZE(state_complete_reasons))
10093 str = state_complete_reasons[code];
10100 /* describe the given last state complete frame */
10101 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10102 const char *prefix)
10104 struct hfi1_devdata *dd = ppd->dd;
10112 * [ 0: 0] - success
10114 * [ 7: 4] - next state timeout
10115 * [15: 8] - reason code
10118 success = frame & 0x1;
10119 state = (frame >> 1) & 0x7;
10120 reason = (frame >> 8) & 0xff;
10121 lanes = (frame >> 16) & 0xffff;
10123 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10125 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10126 state_completed_string(state), state);
10127 dd_dev_err(dd, " state successfully completed: %s\n",
10128 success ? "yes" : "no");
10129 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10130 reason, state_complete_reason_code_string(ppd, reason));
10131 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10135 * Read the last state complete frames and explain them. This routine
10136 * expects to be called if the link went down during link negotiation
10137 * and initialization (LNI). That is, anywhere between polling and link up.
10139 static void check_lni_states(struct hfi1_pportdata *ppd)
10141 u32 last_local_state;
10142 u32 last_remote_state;
10144 read_last_local_state(ppd->dd, &last_local_state);
10145 read_last_remote_state(ppd->dd, &last_remote_state);
10148 * Don't report anything if there is nothing to report. A value of
10149 * 0 means the link was taken down while polling and there was no
10150 * training in-process.
10152 if (last_local_state == 0 && last_remote_state == 0)
10155 decode_state_complete(ppd, last_local_state, "transmitted");
10156 decode_state_complete(ppd, last_remote_state, "received");
10159 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10160 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10163 unsigned long timeout;
10165 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10166 timeout = jiffies + msecs_to_jiffies(wait_ms);
10168 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10171 if (time_after(jiffies, timeout)) {
10173 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10181 /* called when the logical link state is not down as it should be */
10182 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10184 struct hfi1_devdata *dd = ppd->dd;
10187 * Bring link up in LCB loopback
10189 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10190 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10191 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10193 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10194 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10195 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10196 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10198 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10199 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10201 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10202 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10204 wait_link_transfer_active(dd, 100);
10207 * Bring the link down again.
10209 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10210 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10211 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10213 /* call again to adjust ppd->statusp, if needed */
10214 get_logical_state(ppd);
10218 * Helper for set_link_state(). Do not call except from that routine.
10219 * Expects ppd->hls_mutex to be held.
10221 * @rem_reason value to be sent to the neighbor
10223 * LinkDownReasons only set if transition succeeds.
10225 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10227 struct hfi1_devdata *dd = ppd->dd;
10228 u32 pstate, previous_state;
10233 update_lcb_cache(dd);
10235 previous_state = ppd->host_link_state;
10236 ppd->host_link_state = HLS_GOING_OFFLINE;
10237 pstate = read_physical_state(dd);
10238 if (pstate == PLS_OFFLINE) {
10239 do_transition = 0; /* in right state */
10240 do_wait = 0; /* ...no need to wait */
10241 } else if ((pstate & 0xf0) == PLS_OFFLINE) {
10242 do_transition = 0; /* in an offline transient state */
10243 do_wait = 1; /* ...wait for it to settle */
10245 do_transition = 1; /* need to move to offline */
10246 do_wait = 1; /* ...will need to wait */
10249 if (do_transition) {
10250 ret = set_physical_link_state(dd,
10251 (rem_reason << 8) | PLS_OFFLINE);
10253 if (ret != HCMD_SUCCESS) {
10255 "Failed to transition to Offline link state, return %d\n",
10259 if (ppd->offline_disabled_reason ==
10260 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10261 ppd->offline_disabled_reason =
10262 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10266 /* it can take a while for the link to go down */
10267 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
10273 * Now in charge of LCB - must be after the physical state is
10274 * offline.quiet and before host_link_state is changed.
10276 set_host_lcb_access(dd);
10277 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10279 /* make sure the logical state is also down */
10280 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10282 force_logical_link_state_down(ppd);
10284 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10286 if (ppd->port_type == PORT_TYPE_QSFP &&
10287 ppd->qsfp_info.limiting_active &&
10288 qsfp_mod_present(ppd)) {
10291 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10293 set_qsfp_tx(ppd, 0);
10294 release_chip_resource(dd, qsfp_resource(dd));
10296 /* not fatal, but should warn */
10298 "Unable to acquire lock to turn off QSFP TX\n");
10303 * The LNI has a mandatory wait time after the physical state
10304 * moves to Offline.Quiet. The wait time may be different
10305 * depending on how the link went down. The 8051 firmware
10306 * will observe the needed wait time and only move to ready
10307 * when that is completed. The largest of the quiet timeouts
10308 * is 6s, so wait that long and then at least 0.5s more for
10309 * other transitions, and another 0.5s for a buffer.
10311 ret = wait_fm_ready(dd, 7000);
10314 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10315 /* state is really offline, so make it so */
10316 ppd->host_link_state = HLS_DN_OFFLINE;
10321 * The state is now offline and the 8051 is ready to accept host
10323 * - change our state
10324 * - notify others if we were previously in a linkup state
10326 ppd->host_link_state = HLS_DN_OFFLINE;
10327 if (previous_state & HLS_UP) {
10328 /* went down while link was up */
10329 handle_linkup_change(dd, 0);
10330 } else if (previous_state
10331 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10332 /* went down while attempting link up */
10333 check_lni_states(ppd);
10336 /* the active link width (downgrade) is 0 on link down */
10337 ppd->link_width_active = 0;
10338 ppd->link_width_downgrade_tx_active = 0;
10339 ppd->link_width_downgrade_rx_active = 0;
10340 ppd->current_egress_rate = 0;
10344 /* return the link state name */
10345 static const char *link_state_name(u32 state)
10348 int n = ilog2(state);
10349 static const char * const names[] = {
10350 [__HLS_UP_INIT_BP] = "INIT",
10351 [__HLS_UP_ARMED_BP] = "ARMED",
10352 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10353 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10354 [__HLS_DN_POLL_BP] = "POLL",
10355 [__HLS_DN_DISABLE_BP] = "DISABLE",
10356 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10357 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10358 [__HLS_GOING_UP_BP] = "GOING_UP",
10359 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10360 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10363 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10364 return name ? name : "unknown";
10367 /* return the link state reason name */
10368 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10370 if (state == HLS_UP_INIT) {
10371 switch (ppd->linkinit_reason) {
10372 case OPA_LINKINIT_REASON_LINKUP:
10374 case OPA_LINKINIT_REASON_FLAPPING:
10375 return "(FLAPPING)";
10376 case OPA_LINKINIT_OUTSIDE_POLICY:
10377 return "(OUTSIDE_POLICY)";
10378 case OPA_LINKINIT_QUARANTINED:
10379 return "(QUARANTINED)";
10380 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10381 return "(INSUFIC_CAPABILITY)";
10390 * driver_physical_state - convert the driver's notion of a port's
10391 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10392 * Return -1 (converted to a u32) to indicate error.
10394 u32 driver_physical_state(struct hfi1_pportdata *ppd)
10396 switch (ppd->host_link_state) {
10399 case HLS_UP_ACTIVE:
10400 return IB_PORTPHYSSTATE_LINKUP;
10402 return IB_PORTPHYSSTATE_POLLING;
10403 case HLS_DN_DISABLE:
10404 return IB_PORTPHYSSTATE_DISABLED;
10405 case HLS_DN_OFFLINE:
10406 return OPA_PORTPHYSSTATE_OFFLINE;
10407 case HLS_VERIFY_CAP:
10408 return IB_PORTPHYSSTATE_POLLING;
10410 return IB_PORTPHYSSTATE_POLLING;
10411 case HLS_GOING_OFFLINE:
10412 return OPA_PORTPHYSSTATE_OFFLINE;
10413 case HLS_LINK_COOLDOWN:
10414 return OPA_PORTPHYSSTATE_OFFLINE;
10415 case HLS_DN_DOWNDEF:
10417 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10418 ppd->host_link_state);
10424 * driver_logical_state - convert the driver's notion of a port's
10425 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10426 * (converted to a u32) to indicate error.
10428 u32 driver_logical_state(struct hfi1_pportdata *ppd)
10430 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10431 return IB_PORT_DOWN;
10433 switch (ppd->host_link_state & HLS_UP) {
10435 return IB_PORT_INIT;
10437 return IB_PORT_ARMED;
10438 case HLS_UP_ACTIVE:
10439 return IB_PORT_ACTIVE;
10441 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10442 ppd->host_link_state);
10447 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10448 u8 neigh_reason, u8 rem_reason)
10450 if (ppd->local_link_down_reason.latest == 0 &&
10451 ppd->neigh_link_down_reason.latest == 0) {
10452 ppd->local_link_down_reason.latest = lcl_reason;
10453 ppd->neigh_link_down_reason.latest = neigh_reason;
10454 ppd->remote_link_down_reason = rem_reason;
10459 * Change the physical and/or logical link state.
10461 * Do not call this routine while inside an interrupt. It contains
10462 * calls to routines that can take multiple seconds to finish.
10464 * Returns 0 on success, -errno on failure.
10466 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10468 struct hfi1_devdata *dd = ppd->dd;
10469 struct ib_event event = {.device = NULL};
10471 int orig_new_state, poll_bounce;
10473 mutex_lock(&ppd->hls_lock);
10475 orig_new_state = state;
10476 if (state == HLS_DN_DOWNDEF)
10477 state = dd->link_default;
10479 /* interpret poll -> poll as a link bounce */
10480 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10481 state == HLS_DN_POLL;
10483 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10484 link_state_name(ppd->host_link_state),
10485 link_state_name(orig_new_state),
10486 poll_bounce ? "(bounce) " : "",
10487 link_state_reason_name(ppd, state));
10490 * If we're going to a (HLS_*) link state that implies the logical
10491 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10492 * reset is_sm_config_started to 0.
10494 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10495 ppd->is_sm_config_started = 0;
10498 * Do nothing if the states match. Let a poll to poll link bounce
10501 if (ppd->host_link_state == state && !poll_bounce)
10506 if (ppd->host_link_state == HLS_DN_POLL &&
10507 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10509 * Quick link up jumps from polling to here.
10511 * Whether in normal or loopback mode, the
10512 * simulator jumps from polling to link up.
10513 * Accept that here.
10516 } else if (ppd->host_link_state != HLS_GOING_UP) {
10521 * Wait for Link_Up physical state.
10522 * Physical and Logical states should already be
10523 * be transitioned to LinkUp and LinkInit respectively.
10525 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10528 "%s: physical state did not change to LINK-UP\n",
10533 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10536 "%s: logical state did not change to INIT\n",
10539 /* clear old transient LINKINIT_REASON code */
10540 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10541 ppd->linkinit_reason =
10542 OPA_LINKINIT_REASON_LINKUP;
10544 /* enable the port */
10545 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10547 handle_linkup_change(dd, 1);
10548 ppd->host_link_state = HLS_UP_INIT;
10552 if (ppd->host_link_state != HLS_UP_INIT)
10555 ppd->host_link_state = HLS_UP_ARMED;
10556 set_logical_state(dd, LSTATE_ARMED);
10557 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10559 /* logical state didn't change, stay at init */
10560 ppd->host_link_state = HLS_UP_INIT;
10562 "%s: logical state did not change to ARMED\n",
10566 * The simulator does not currently implement SMA messages,
10567 * so neighbor_normal is not set. Set it here when we first
10570 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10571 ppd->neighbor_normal = 1;
10573 case HLS_UP_ACTIVE:
10574 if (ppd->host_link_state != HLS_UP_ARMED)
10577 ppd->host_link_state = HLS_UP_ACTIVE;
10578 set_logical_state(dd, LSTATE_ACTIVE);
10579 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10581 /* logical state didn't change, stay at armed */
10582 ppd->host_link_state = HLS_UP_ARMED;
10584 "%s: logical state did not change to ACTIVE\n",
10587 /* tell all engines to go running */
10588 sdma_all_running(dd);
10590 /* Signal the IB layer that the port has went active */
10591 event.device = &dd->verbs_dev.rdi.ibdev;
10592 event.element.port_num = ppd->port;
10593 event.event = IB_EVENT_PORT_ACTIVE;
10597 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10598 ppd->host_link_state == HLS_DN_OFFLINE) &&
10601 /* Hand LED control to the DC */
10602 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10604 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10605 u8 tmp = ppd->link_enabled;
10607 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10609 ppd->link_enabled = tmp;
10612 ppd->remote_link_down_reason = 0;
10614 if (ppd->driver_link_ready)
10615 ppd->link_enabled = 1;
10618 set_all_slowpath(ppd->dd);
10619 ret = set_local_link_attributes(ppd);
10623 ppd->port_error_action = 0;
10624 ppd->host_link_state = HLS_DN_POLL;
10626 if (quick_linkup) {
10627 /* quick linkup does not go into polling */
10628 ret = do_quick_linkup(dd);
10630 ret1 = set_physical_link_state(dd, PLS_POLLING);
10631 if (ret1 != HCMD_SUCCESS) {
10633 "Failed to transition to Polling link state, return 0x%x\n",
10638 ppd->offline_disabled_reason =
10639 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10641 * If an error occurred above, go back to offline. The
10642 * caller may reschedule another attempt.
10645 goto_offline(ppd, 0);
10647 cache_physical_state(ppd);
10649 case HLS_DN_DISABLE:
10650 /* link is disabled */
10651 ppd->link_enabled = 0;
10653 /* allow any state to transition to disabled */
10655 /* must transition to offline first */
10656 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10657 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10660 ppd->remote_link_down_reason = 0;
10663 if (!dd->dc_shutdown) {
10664 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10665 if (ret1 != HCMD_SUCCESS) {
10667 "Failed to transition to Disabled link state, return 0x%x\n",
10672 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10675 "%s: physical state did not change to DISABLED\n",
10681 ppd->host_link_state = HLS_DN_DISABLE;
10683 case HLS_DN_OFFLINE:
10684 if (ppd->host_link_state == HLS_DN_DISABLE)
10687 /* allow any state to transition to offline */
10688 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10690 ppd->remote_link_down_reason = 0;
10692 case HLS_VERIFY_CAP:
10693 if (ppd->host_link_state != HLS_DN_POLL)
10695 ppd->host_link_state = HLS_VERIFY_CAP;
10696 cache_physical_state(ppd);
10699 if (ppd->host_link_state != HLS_VERIFY_CAP)
10702 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10703 if (ret1 != HCMD_SUCCESS) {
10705 "Failed to transition to link up state, return 0x%x\n",
10710 ppd->host_link_state = HLS_GOING_UP;
10713 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10714 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10716 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10725 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10726 __func__, link_state_name(ppd->host_link_state),
10727 link_state_name(state));
10731 mutex_unlock(&ppd->hls_lock);
10734 ib_dispatch_event(&event);
10739 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10745 case HFI1_IB_CFG_LIDLMC:
10748 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10750 * The VL Arbitrator high limit is sent in units of 4k
10751 * bytes, while HFI stores it in units of 64 bytes.
10754 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10755 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10756 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10758 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10759 /* HFI only supports POLL as the default link down state */
10760 if (val != HLS_DN_POLL)
10763 case HFI1_IB_CFG_OP_VLS:
10764 if (ppd->vls_operational != val) {
10765 ppd->vls_operational = val;
10771 * For link width, link width downgrade, and speed enable, always AND
10772 * the setting with what is actually supported. This has two benefits.
10773 * First, enabled can't have unsupported values, no matter what the
10774 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10775 * "fill in with your supported value" have all the bits in the
10776 * field set, so simply ANDing with supported has the desired result.
10778 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10779 ppd->link_width_enabled = val & ppd->link_width_supported;
10781 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10782 ppd->link_width_downgrade_enabled =
10783 val & ppd->link_width_downgrade_supported;
10785 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10786 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10788 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10790 * HFI does not follow IB specs, save this value
10791 * so we can report it, if asked.
10793 ppd->overrun_threshold = val;
10795 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10797 * HFI does not follow IB specs, save this value
10798 * so we can report it, if asked.
10800 ppd->phy_error_threshold = val;
10803 case HFI1_IB_CFG_MTU:
10804 set_send_length(ppd);
10807 case HFI1_IB_CFG_PKEYS:
10808 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10809 set_partition_keys(ppd);
10813 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10814 dd_dev_info(ppd->dd,
10815 "%s: which %s, val 0x%x: not implemented\n",
10816 __func__, ib_cfg_name(which), val);
10822 /* begin functions related to vl arbitration table caching */
10823 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10827 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10828 VL_ARB_LOW_PRIO_TABLE_SIZE);
10829 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10830 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10833 * Note that we always return values directly from the
10834 * 'vl_arb_cache' (and do no CSR reads) in response to a
10835 * 'Get(VLArbTable)'. This is obviously correct after a
10836 * 'Set(VLArbTable)', since the cache will then be up to
10837 * date. But it's also correct prior to any 'Set(VLArbTable)'
10838 * since then both the cache, and the relevant h/w registers
10842 for (i = 0; i < MAX_PRIO_TABLE; i++)
10843 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10847 * vl_arb_lock_cache
10849 * All other vl_arb_* functions should be called only after locking
10852 static inline struct vl_arb_cache *
10853 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10855 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10857 spin_lock(&ppd->vl_arb_cache[idx].lock);
10858 return &ppd->vl_arb_cache[idx];
10861 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10863 spin_unlock(&ppd->vl_arb_cache[idx].lock);
10866 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10867 struct ib_vl_weight_elem *vl)
10869 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10872 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10873 struct ib_vl_weight_elem *vl)
10875 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10878 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10879 struct ib_vl_weight_elem *vl)
10881 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10884 /* end functions related to vl arbitration table caching */
10886 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10887 u32 size, struct ib_vl_weight_elem *vl)
10889 struct hfi1_devdata *dd = ppd->dd;
10891 unsigned int i, is_up = 0;
10892 int drain, ret = 0;
10894 mutex_lock(&ppd->hls_lock);
10896 if (ppd->host_link_state & HLS_UP)
10899 drain = !is_ax(dd) && is_up;
10903 * Before adjusting VL arbitration weights, empty per-VL
10904 * FIFOs, otherwise a packet whose VL weight is being
10905 * set to 0 could get stuck in a FIFO with no chance to
10908 ret = stop_drain_data_vls(dd);
10913 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10918 for (i = 0; i < size; i++, vl++) {
10920 * NOTE: The low priority shift and mask are used here, but
10921 * they are the same for both the low and high registers.
10923 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10924 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10925 | (((u64)vl->weight
10926 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10927 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10928 write_csr(dd, target + (i * 8), reg);
10930 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10933 open_fill_data_vls(dd); /* reopen all VLs */
10936 mutex_unlock(&ppd->hls_lock);
10942 * Read one credit merge VL register.
10944 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10945 struct vl_limit *vll)
10947 u64 reg = read_csr(dd, csr);
10949 vll->dedicated = cpu_to_be16(
10950 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10951 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10952 vll->shared = cpu_to_be16(
10953 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10954 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10958 * Read the current credit merge limits.
10960 static int get_buffer_control(struct hfi1_devdata *dd,
10961 struct buffer_control *bc, u16 *overall_limit)
10966 /* not all entries are filled in */
10967 memset(bc, 0, sizeof(*bc));
10969 /* OPA and HFI have a 1-1 mapping */
10970 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10971 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
10973 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10974 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10976 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10977 bc->overall_shared_limit = cpu_to_be16(
10978 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10979 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10981 *overall_limit = (reg
10982 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10983 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10984 return sizeof(struct buffer_control);
10987 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10992 /* each register contains 16 SC->VLnt mappings, 4 bits each */
10993 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10994 for (i = 0; i < sizeof(u64); i++) {
10995 u8 byte = *(((u8 *)®) + i);
10997 dp->vlnt[2 * i] = byte & 0xf;
10998 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11001 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11002 for (i = 0; i < sizeof(u64); i++) {
11003 u8 byte = *(((u8 *)®) + i);
11005 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11006 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11008 return sizeof(struct sc2vlnt);
11011 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11012 struct ib_vl_weight_elem *vl)
11016 for (i = 0; i < nelems; i++, vl++) {
11022 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11024 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11026 0, dp->vlnt[0] & 0xf,
11027 1, dp->vlnt[1] & 0xf,
11028 2, dp->vlnt[2] & 0xf,
11029 3, dp->vlnt[3] & 0xf,
11030 4, dp->vlnt[4] & 0xf,
11031 5, dp->vlnt[5] & 0xf,
11032 6, dp->vlnt[6] & 0xf,
11033 7, dp->vlnt[7] & 0xf,
11034 8, dp->vlnt[8] & 0xf,
11035 9, dp->vlnt[9] & 0xf,
11036 10, dp->vlnt[10] & 0xf,
11037 11, dp->vlnt[11] & 0xf,
11038 12, dp->vlnt[12] & 0xf,
11039 13, dp->vlnt[13] & 0xf,
11040 14, dp->vlnt[14] & 0xf,
11041 15, dp->vlnt[15] & 0xf));
11042 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11043 DC_SC_VL_VAL(31_16,
11044 16, dp->vlnt[16] & 0xf,
11045 17, dp->vlnt[17] & 0xf,
11046 18, dp->vlnt[18] & 0xf,
11047 19, dp->vlnt[19] & 0xf,
11048 20, dp->vlnt[20] & 0xf,
11049 21, dp->vlnt[21] & 0xf,
11050 22, dp->vlnt[22] & 0xf,
11051 23, dp->vlnt[23] & 0xf,
11052 24, dp->vlnt[24] & 0xf,
11053 25, dp->vlnt[25] & 0xf,
11054 26, dp->vlnt[26] & 0xf,
11055 27, dp->vlnt[27] & 0xf,
11056 28, dp->vlnt[28] & 0xf,
11057 29, dp->vlnt[29] & 0xf,
11058 30, dp->vlnt[30] & 0xf,
11059 31, dp->vlnt[31] & 0xf));
11062 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11066 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11067 what, (int)limit, idx);
11070 /* change only the shared limit portion of SendCmGLobalCredit */
11071 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11075 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11076 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11077 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11078 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11081 /* change only the total credit limit portion of SendCmGLobalCredit */
11082 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11086 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11087 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11088 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11089 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11092 /* set the given per-VL shared limit */
11093 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11098 if (vl < TXE_NUM_DATA_VL)
11099 addr = SEND_CM_CREDIT_VL + (8 * vl);
11101 addr = SEND_CM_CREDIT_VL15;
11103 reg = read_csr(dd, addr);
11104 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11105 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11106 write_csr(dd, addr, reg);
11109 /* set the given per-VL dedicated limit */
11110 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11115 if (vl < TXE_NUM_DATA_VL)
11116 addr = SEND_CM_CREDIT_VL + (8 * vl);
11118 addr = SEND_CM_CREDIT_VL15;
11120 reg = read_csr(dd, addr);
11121 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11122 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11123 write_csr(dd, addr, reg);
11126 /* spin until the given per-VL status mask bits clear */
11127 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11130 unsigned long timeout;
11133 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11135 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11138 return; /* success */
11139 if (time_after(jiffies, timeout))
11140 break; /* timed out */
11145 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11146 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11148 * If this occurs, it is likely there was a credit loss on the link.
11149 * The only recovery from that is a link bounce.
11152 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
11156 * The number of credits on the VLs may be changed while everything
11157 * is "live", but the following algorithm must be followed due to
11158 * how the hardware is actually implemented. In particular,
11159 * Return_Credit_Status[] is the only correct status check.
11161 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11162 * set Global_Shared_Credit_Limit = 0
11164 * mask0 = all VLs that are changing either dedicated or shared limits
11165 * set Shared_Limit[mask0] = 0
11166 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11167 * if (changing any dedicated limit)
11168 * mask1 = all VLs that are lowering dedicated limits
11169 * lower Dedicated_Limit[mask1]
11170 * spin until Return_Credit_Status[mask1] == 0
11171 * raise Dedicated_Limits
11172 * raise Shared_Limits
11173 * raise Global_Shared_Credit_Limit
11175 * lower = if the new limit is lower, set the limit to the new value
11176 * raise = if the new limit is higher than the current value (may be changed
11177 * earlier in the algorithm), set the new limit to the new value
11179 int set_buffer_control(struct hfi1_pportdata *ppd,
11180 struct buffer_control *new_bc)
11182 struct hfi1_devdata *dd = ppd->dd;
11183 u64 changing_mask, ld_mask, stat_mask;
11185 int i, use_all_mask;
11186 int this_shared_changing;
11187 int vl_count = 0, ret;
11189 * A0: add the variable any_shared_limit_changing below and in the
11190 * algorithm above. If removing A0 support, it can be removed.
11192 int any_shared_limit_changing;
11193 struct buffer_control cur_bc;
11194 u8 changing[OPA_MAX_VLS];
11195 u8 lowering_dedicated[OPA_MAX_VLS];
11198 const u64 all_mask =
11199 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11200 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11201 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11202 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11203 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11204 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11205 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11206 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11207 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11209 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11210 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
11212 /* find the new total credits, do sanity check on unused VLs */
11213 for (i = 0; i < OPA_MAX_VLS; i++) {
11215 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11218 nonzero_msg(dd, i, "dedicated",
11219 be16_to_cpu(new_bc->vl[i].dedicated));
11220 nonzero_msg(dd, i, "shared",
11221 be16_to_cpu(new_bc->vl[i].shared));
11222 new_bc->vl[i].dedicated = 0;
11223 new_bc->vl[i].shared = 0;
11225 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11227 /* fetch the current values */
11228 get_buffer_control(dd, &cur_bc, &cur_total);
11231 * Create the masks we will use.
11233 memset(changing, 0, sizeof(changing));
11234 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11236 * NOTE: Assumes that the individual VL bits are adjacent and in
11240 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11244 any_shared_limit_changing = 0;
11245 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11248 this_shared_changing = new_bc->vl[i].shared
11249 != cur_bc.vl[i].shared;
11250 if (this_shared_changing)
11251 any_shared_limit_changing = 1;
11252 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11253 this_shared_changing) {
11255 changing_mask |= stat_mask;
11258 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11259 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11260 lowering_dedicated[i] = 1;
11261 ld_mask |= stat_mask;
11265 /* bracket the credit change with a total adjustment */
11266 if (new_total > cur_total)
11267 set_global_limit(dd, new_total);
11270 * Start the credit change algorithm.
11273 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11274 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11275 (is_ax(dd) && any_shared_limit_changing)) {
11276 set_global_shared(dd, 0);
11277 cur_bc.overall_shared_limit = 0;
11281 for (i = 0; i < NUM_USABLE_VLS; i++) {
11286 set_vl_shared(dd, i, 0);
11287 cur_bc.vl[i].shared = 0;
11291 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11294 if (change_count > 0) {
11295 for (i = 0; i < NUM_USABLE_VLS; i++) {
11299 if (lowering_dedicated[i]) {
11300 set_vl_dedicated(dd, i,
11301 be16_to_cpu(new_bc->
11303 cur_bc.vl[i].dedicated =
11304 new_bc->vl[i].dedicated;
11308 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11310 /* now raise all dedicated that are going up */
11311 for (i = 0; i < NUM_USABLE_VLS; i++) {
11315 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11316 be16_to_cpu(cur_bc.vl[i].dedicated))
11317 set_vl_dedicated(dd, i,
11318 be16_to_cpu(new_bc->
11323 /* next raise all shared that are going up */
11324 for (i = 0; i < NUM_USABLE_VLS; i++) {
11328 if (be16_to_cpu(new_bc->vl[i].shared) >
11329 be16_to_cpu(cur_bc.vl[i].shared))
11330 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11333 /* finally raise the global shared */
11334 if (be16_to_cpu(new_bc->overall_shared_limit) >
11335 be16_to_cpu(cur_bc.overall_shared_limit))
11336 set_global_shared(dd,
11337 be16_to_cpu(new_bc->overall_shared_limit));
11339 /* bracket the credit change with a total adjustment */
11340 if (new_total < cur_total)
11341 set_global_limit(dd, new_total);
11344 * Determine the actual number of operational VLS using the number of
11345 * dedicated and shared credits for each VL.
11347 if (change_count > 0) {
11348 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11349 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11350 be16_to_cpu(new_bc->vl[i].shared) > 0)
11352 ppd->actual_vls_operational = vl_count;
11353 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11354 ppd->actual_vls_operational :
11355 ppd->vls_operational,
11358 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11359 ppd->actual_vls_operational :
11360 ppd->vls_operational, NULL);
11368 * Read the given fabric manager table. Return the size of the
11369 * table (in bytes) on success, and a negative error code on
11372 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11376 struct vl_arb_cache *vlc;
11379 case FM_TBL_VL_HIGH_ARB:
11382 * OPA specifies 128 elements (of 2 bytes each), though
11383 * HFI supports only 16 elements in h/w.
11385 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11386 vl_arb_get_cache(vlc, t);
11387 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11389 case FM_TBL_VL_LOW_ARB:
11392 * OPA specifies 128 elements (of 2 bytes each), though
11393 * HFI supports only 16 elements in h/w.
11395 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11396 vl_arb_get_cache(vlc, t);
11397 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11399 case FM_TBL_BUFFER_CONTROL:
11400 size = get_buffer_control(ppd->dd, t, NULL);
11402 case FM_TBL_SC2VLNT:
11403 size = get_sc2vlnt(ppd->dd, t);
11405 case FM_TBL_VL_PREEMPT_ELEMS:
11407 /* OPA specifies 128 elements, of 2 bytes each */
11408 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11410 case FM_TBL_VL_PREEMPT_MATRIX:
11413 * OPA specifies that this is the same size as the VL
11414 * arbitration tables (i.e., 256 bytes).
11424 * Write the given fabric manager table.
11426 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11429 struct vl_arb_cache *vlc;
11432 case FM_TBL_VL_HIGH_ARB:
11433 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11434 if (vl_arb_match_cache(vlc, t)) {
11435 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11438 vl_arb_set_cache(vlc, t);
11439 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11440 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11441 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11443 case FM_TBL_VL_LOW_ARB:
11444 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11445 if (vl_arb_match_cache(vlc, t)) {
11446 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11449 vl_arb_set_cache(vlc, t);
11450 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11451 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11452 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11454 case FM_TBL_BUFFER_CONTROL:
11455 ret = set_buffer_control(ppd, t);
11457 case FM_TBL_SC2VLNT:
11458 set_sc2vlnt(ppd->dd, t);
11467 * Disable all data VLs.
11469 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11471 static int disable_data_vls(struct hfi1_devdata *dd)
11476 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11482 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11483 * Just re-enables all data VLs (the "fill" part happens
11484 * automatically - the name was chosen for symmetry with
11485 * stop_drain_data_vls()).
11487 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11489 int open_fill_data_vls(struct hfi1_devdata *dd)
11494 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11500 * drain_data_vls() - assumes that disable_data_vls() has been called,
11501 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11502 * engines to drop to 0.
11504 static void drain_data_vls(struct hfi1_devdata *dd)
11508 pause_for_credit_return(dd);
11512 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11514 * Use open_fill_data_vls() to resume using data VLs. This pair is
11515 * meant to be used like this:
11517 * stop_drain_data_vls(dd);
11518 * // do things with per-VL resources
11519 * open_fill_data_vls(dd);
11521 int stop_drain_data_vls(struct hfi1_devdata *dd)
11525 ret = disable_data_vls(dd);
11527 drain_data_vls(dd);
11533 * Convert a nanosecond time to a cclock count. No matter how slow
11534 * the cclock, a non-zero ns will always have a non-zero result.
11536 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11540 if (dd->icode == ICODE_FPGA_EMULATION)
11541 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11542 else /* simulation pretends to be ASIC */
11543 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11544 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11550 * Convert a cclock count to nanoseconds. Not matter how slow
11551 * the cclock, a non-zero cclocks will always have a non-zero result.
11553 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11557 if (dd->icode == ICODE_FPGA_EMULATION)
11558 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11559 else /* simulation pretends to be ASIC */
11560 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11561 if (cclocks && !ns)
11567 * Dynamically adjust the receive interrupt timeout for a context based on
11568 * incoming packet rate.
11570 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11572 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11574 struct hfi1_devdata *dd = rcd->dd;
11575 u32 timeout = rcd->rcvavail_timeout;
11578 * This algorithm doubles or halves the timeout depending on whether
11579 * the number of packets received in this interrupt were less than or
11580 * greater equal the interrupt count.
11582 * The calculations below do not allow a steady state to be achieved.
11583 * Only at the endpoints it is possible to have an unchanging
11586 if (npkts < rcv_intr_count) {
11588 * Not enough packets arrived before the timeout, adjust
11589 * timeout downward.
11591 if (timeout < 2) /* already at minimum? */
11596 * More than enough packets arrived before the timeout, adjust
11599 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11601 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11604 rcd->rcvavail_timeout = timeout;
11606 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11607 * been verified to be in range
11609 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11611 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11614 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11615 u32 intr_adjust, u32 npkts)
11617 struct hfi1_devdata *dd = rcd->dd;
11619 u32 ctxt = rcd->ctxt;
11622 * Need to write timeout register before updating RcvHdrHead to ensure
11623 * that a new value is used when the HW decides to restart counting.
11626 adjust_rcv_timeout(rcd, npkts);
11628 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11629 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11630 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11633 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11634 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11635 << RCV_HDR_HEAD_HEAD_SHIFT);
11636 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11640 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11644 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11645 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11647 if (rcd->rcvhdrtail_kvaddr)
11648 tail = get_rcvhdrtail(rcd);
11650 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11652 return head == tail;
11656 * Context Control and Receive Array encoding for buffer size:
11665 * 0x8 512 KB (Receive Array only)
11666 * 0x9 1 MB (Receive Array only)
11667 * 0xa 2 MB (Receive Array only)
11669 * 0xB-0xF - reserved (Receive Array only)
11672 * This routine assumes that the value has already been sanity checked.
11674 static u32 encoded_size(u32 size)
11677 case 4 * 1024: return 0x1;
11678 case 8 * 1024: return 0x2;
11679 case 16 * 1024: return 0x3;
11680 case 32 * 1024: return 0x4;
11681 case 64 * 1024: return 0x5;
11682 case 128 * 1024: return 0x6;
11683 case 256 * 1024: return 0x7;
11684 case 512 * 1024: return 0x8;
11685 case 1 * 1024 * 1024: return 0x9;
11686 case 2 * 1024 * 1024: return 0xa;
11688 return 0x1; /* if invalid, go with the minimum size */
11691 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11693 struct hfi1_ctxtdata *rcd;
11695 int did_enable = 0;
11697 rcd = dd->rcd[ctxt];
11701 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11703 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11704 /* if the context already enabled, don't do the extra steps */
11705 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11706 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11707 /* reset the tail and hdr addresses, and sequence count */
11708 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11710 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11711 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11712 rcd->rcvhdrqtailaddr_dma);
11715 /* reset the cached receive header queue head value */
11719 * Zero the receive header queue so we don't get false
11720 * positives when checking the sequence number. The
11721 * sequence numbers could land exactly on the same spot.
11722 * E.g. a rcd restart before the receive header wrapped.
11724 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11726 /* starting timeout */
11727 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11729 /* enable the context */
11730 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11732 /* clean the egr buffer size first */
11733 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11734 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11735 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11736 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11738 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11739 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11742 /* zero RcvEgrIndexHead */
11743 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11745 /* set eager count and base index */
11746 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11747 & RCV_EGR_CTRL_EGR_CNT_MASK)
11748 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11749 (((rcd->eager_base >> RCV_SHIFT)
11750 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11751 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11752 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11755 * Set TID (expected) count and base index.
11756 * rcd->expected_count is set to individual RcvArray entries,
11757 * not pairs, and the CSR takes a pair-count in groups of
11758 * four, so divide by 8.
11760 reg = (((rcd->expected_count >> RCV_SHIFT)
11761 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11762 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11763 (((rcd->expected_base >> RCV_SHIFT)
11764 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11765 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11766 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11767 if (ctxt == HFI1_CTRL_CTXT)
11768 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11770 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11771 write_csr(dd, RCV_VL15, 0);
11773 * When receive context is being disabled turn on tail
11774 * update with a dummy tail address and then disable
11777 if (dd->rcvhdrtail_dummy_dma) {
11778 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11779 dd->rcvhdrtail_dummy_dma);
11780 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11781 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11784 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11786 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11787 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11788 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11789 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11790 if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
11791 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11792 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11793 /* See comment on RcvCtxtCtrl.TailUpd above */
11794 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11795 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11797 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11798 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11799 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11800 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11801 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11803 * In one-packet-per-eager mode, the size comes from
11804 * the RcvArray entry.
11806 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11807 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11809 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11810 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11811 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11812 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11813 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11814 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11815 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11816 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11817 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11818 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11819 rcd->rcvctrl = rcvctrl;
11820 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11821 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11823 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11825 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11826 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11828 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11830 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11831 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11832 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11833 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11834 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11835 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11836 ctxt, reg, reg == 0 ? "not" : "still");
11842 * The interrupt timeout and count must be set after
11843 * the context is enabled to take effect.
11845 /* set interrupt timeout */
11846 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11847 (u64)rcd->rcvavail_timeout <<
11848 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11850 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11851 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11852 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11855 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11857 * If the context has been disabled and the Tail Update has
11858 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11859 * so it doesn't contain an address that is invalid.
11861 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11862 dd->rcvhdrtail_dummy_dma);
11865 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11871 ret = dd->cntrnameslen;
11872 *namep = dd->cntrnames;
11874 const struct cntr_entry *entry;
11877 ret = (dd->ndevcntrs) * sizeof(u64);
11879 /* Get the start of the block of counters */
11880 *cntrp = dd->cntrs;
11883 * Now go and fill in each counter in the block.
11885 for (i = 0; i < DEV_CNTR_LAST; i++) {
11886 entry = &dev_cntrs[i];
11887 hfi1_cdbg(CNTR, "reading %s", entry->name);
11888 if (entry->flags & CNTR_DISABLED) {
11890 hfi1_cdbg(CNTR, "\tDisabled\n");
11892 if (entry->flags & CNTR_VL) {
11893 hfi1_cdbg(CNTR, "\tPer VL\n");
11894 for (j = 0; j < C_VL_COUNT; j++) {
11895 val = entry->rw_cntr(entry,
11901 "\t\tRead 0x%llx for %d\n",
11903 dd->cntrs[entry->offset + j] =
11906 } else if (entry->flags & CNTR_SDMA) {
11908 "\t Per SDMA Engine\n");
11909 for (j = 0; j < dd->chip_sdma_engines;
11912 entry->rw_cntr(entry, dd, j,
11915 "\t\tRead 0x%llx for %d\n",
11917 dd->cntrs[entry->offset + j] =
11921 val = entry->rw_cntr(entry, dd,
11924 dd->cntrs[entry->offset] = val;
11925 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11934 * Used by sysfs to create files for hfi stats to read
11936 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
11942 ret = ppd->dd->portcntrnameslen;
11943 *namep = ppd->dd->portcntrnames;
11945 const struct cntr_entry *entry;
11948 ret = ppd->dd->nportcntrs * sizeof(u64);
11949 *cntrp = ppd->cntrs;
11951 for (i = 0; i < PORT_CNTR_LAST; i++) {
11952 entry = &port_cntrs[i];
11953 hfi1_cdbg(CNTR, "reading %s", entry->name);
11954 if (entry->flags & CNTR_DISABLED) {
11956 hfi1_cdbg(CNTR, "\tDisabled\n");
11960 if (entry->flags & CNTR_VL) {
11961 hfi1_cdbg(CNTR, "\tPer VL");
11962 for (j = 0; j < C_VL_COUNT; j++) {
11963 val = entry->rw_cntr(entry, ppd, j,
11968 "\t\tRead 0x%llx for %d",
11970 ppd->cntrs[entry->offset + j] = val;
11973 val = entry->rw_cntr(entry, ppd,
11977 ppd->cntrs[entry->offset] = val;
11978 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11985 static void free_cntrs(struct hfi1_devdata *dd)
11987 struct hfi1_pportdata *ppd;
11990 if (dd->synth_stats_timer.data)
11991 del_timer_sync(&dd->synth_stats_timer);
11992 dd->synth_stats_timer.data = 0;
11993 ppd = (struct hfi1_pportdata *)(dd + 1);
11994 for (i = 0; i < dd->num_pports; i++, ppd++) {
11996 kfree(ppd->scntrs);
11997 free_percpu(ppd->ibport_data.rvp.rc_acks);
11998 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11999 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12001 ppd->scntrs = NULL;
12002 ppd->ibport_data.rvp.rc_acks = NULL;
12003 ppd->ibport_data.rvp.rc_qacks = NULL;
12004 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12006 kfree(dd->portcntrnames);
12007 dd->portcntrnames = NULL;
12012 kfree(dd->cntrnames);
12013 dd->cntrnames = NULL;
12014 if (dd->update_cntr_wq) {
12015 destroy_workqueue(dd->update_cntr_wq);
12016 dd->update_cntr_wq = NULL;
12020 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12021 u64 *psval, void *context, int vl)
12026 if (entry->flags & CNTR_DISABLED) {
12027 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12031 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12033 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12035 /* If its a synthetic counter there is more work we need to do */
12036 if (entry->flags & CNTR_SYNTH) {
12037 if (sval == CNTR_MAX) {
12038 /* No need to read already saturated */
12042 if (entry->flags & CNTR_32BIT) {
12043 /* 32bit counters can wrap multiple times */
12044 u64 upper = sval >> 32;
12045 u64 lower = (sval << 32) >> 32;
12047 if (lower > val) { /* hw wrapped */
12048 if (upper == CNTR_32BIT_MAX)
12054 if (val != CNTR_MAX)
12055 val = (upper << 32) | val;
12058 /* If we rolled we are saturated */
12059 if ((val < sval) || (val > CNTR_MAX))
12066 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12071 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12072 struct cntr_entry *entry,
12073 u64 *psval, void *context, int vl, u64 data)
12077 if (entry->flags & CNTR_DISABLED) {
12078 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12082 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12084 if (entry->flags & CNTR_SYNTH) {
12086 if (entry->flags & CNTR_32BIT) {
12087 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12088 (data << 32) >> 32);
12089 val = data; /* return the full 64bit value */
12091 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12095 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12100 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12105 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12107 struct cntr_entry *entry;
12110 entry = &dev_cntrs[index];
12111 sval = dd->scntrs + entry->offset;
12113 if (vl != CNTR_INVALID_VL)
12116 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12119 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12121 struct cntr_entry *entry;
12124 entry = &dev_cntrs[index];
12125 sval = dd->scntrs + entry->offset;
12127 if (vl != CNTR_INVALID_VL)
12130 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12133 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12135 struct cntr_entry *entry;
12138 entry = &port_cntrs[index];
12139 sval = ppd->scntrs + entry->offset;
12141 if (vl != CNTR_INVALID_VL)
12144 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12145 (index <= C_RCV_HDR_OVF_LAST)) {
12146 /* We do not want to bother for disabled contexts */
12150 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12153 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12155 struct cntr_entry *entry;
12158 entry = &port_cntrs[index];
12159 sval = ppd->scntrs + entry->offset;
12161 if (vl != CNTR_INVALID_VL)
12164 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12165 (index <= C_RCV_HDR_OVF_LAST)) {
12166 /* We do not want to bother for disabled contexts */
12170 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12173 static void do_update_synth_timer(struct work_struct *work)
12180 struct hfi1_pportdata *ppd;
12181 struct cntr_entry *entry;
12182 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12186 * Rather than keep beating on the CSRs pick a minimal set that we can
12187 * check to watch for potential roll over. We can do this by looking at
12188 * the number of flits sent/recv. If the total flits exceeds 32bits then
12189 * we have to iterate all the counters and update.
12191 entry = &dev_cntrs[C_DC_RCV_FLITS];
12192 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12194 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12195 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12199 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12200 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12202 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12204 * May not be strictly necessary to update but it won't hurt and
12205 * simplifies the logic here.
12208 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12211 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12213 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12214 total_flits, (u64)CNTR_32BIT_MAX);
12215 if (total_flits >= CNTR_32BIT_MAX) {
12216 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12223 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12224 for (i = 0; i < DEV_CNTR_LAST; i++) {
12225 entry = &dev_cntrs[i];
12226 if (entry->flags & CNTR_VL) {
12227 for (vl = 0; vl < C_VL_COUNT; vl++)
12228 read_dev_cntr(dd, i, vl);
12230 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12233 ppd = (struct hfi1_pportdata *)(dd + 1);
12234 for (i = 0; i < dd->num_pports; i++, ppd++) {
12235 for (j = 0; j < PORT_CNTR_LAST; j++) {
12236 entry = &port_cntrs[j];
12237 if (entry->flags & CNTR_VL) {
12238 for (vl = 0; vl < C_VL_COUNT; vl++)
12239 read_port_cntr(ppd, j, vl);
12241 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12247 * We want the value in the register. The goal is to keep track
12248 * of the number of "ticks" not the counter value. In other
12249 * words if the register rolls we want to notice it and go ahead
12250 * and force an update.
12252 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12253 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12256 entry = &dev_cntrs[C_DC_RCV_FLITS];
12257 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12260 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12261 dd->unit, dd->last_tx, dd->last_rx);
12264 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12268 static void update_synth_timer(unsigned long opaque)
12270 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12272 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12273 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12276 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12277 static int init_cntrs(struct hfi1_devdata *dd)
12279 int i, rcv_ctxts, j;
12282 char name[C_MAX_NAME];
12283 struct hfi1_pportdata *ppd;
12284 const char *bit_type_32 = ",32";
12285 const int bit_type_32_sz = strlen(bit_type_32);
12287 /* set up the stats timer; the add_timer is done at the end */
12288 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12289 (unsigned long)dd);
12291 /***********************/
12292 /* per device counters */
12293 /***********************/
12295 /* size names and determine how many we have*/
12299 for (i = 0; i < DEV_CNTR_LAST; i++) {
12300 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12301 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12305 if (dev_cntrs[i].flags & CNTR_VL) {
12306 dev_cntrs[i].offset = dd->ndevcntrs;
12307 for (j = 0; j < C_VL_COUNT; j++) {
12308 snprintf(name, C_MAX_NAME, "%s%d",
12309 dev_cntrs[i].name, vl_from_idx(j));
12310 sz += strlen(name);
12311 /* Add ",32" for 32-bit counters */
12312 if (dev_cntrs[i].flags & CNTR_32BIT)
12313 sz += bit_type_32_sz;
12317 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12318 dev_cntrs[i].offset = dd->ndevcntrs;
12319 for (j = 0; j < dd->chip_sdma_engines; j++) {
12320 snprintf(name, C_MAX_NAME, "%s%d",
12321 dev_cntrs[i].name, j);
12322 sz += strlen(name);
12323 /* Add ",32" for 32-bit counters */
12324 if (dev_cntrs[i].flags & CNTR_32BIT)
12325 sz += bit_type_32_sz;
12330 /* +1 for newline. */
12331 sz += strlen(dev_cntrs[i].name) + 1;
12332 /* Add ",32" for 32-bit counters */
12333 if (dev_cntrs[i].flags & CNTR_32BIT)
12334 sz += bit_type_32_sz;
12335 dev_cntrs[i].offset = dd->ndevcntrs;
12340 /* allocate space for the counter values */
12341 dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12345 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12349 /* allocate space for the counter names */
12350 dd->cntrnameslen = sz;
12351 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12352 if (!dd->cntrnames)
12355 /* fill in the names */
12356 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12357 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12359 } else if (dev_cntrs[i].flags & CNTR_VL) {
12360 for (j = 0; j < C_VL_COUNT; j++) {
12361 snprintf(name, C_MAX_NAME, "%s%d",
12364 memcpy(p, name, strlen(name));
12367 /* Counter is 32 bits */
12368 if (dev_cntrs[i].flags & CNTR_32BIT) {
12369 memcpy(p, bit_type_32, bit_type_32_sz);
12370 p += bit_type_32_sz;
12375 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12376 for (j = 0; j < dd->chip_sdma_engines; j++) {
12377 snprintf(name, C_MAX_NAME, "%s%d",
12378 dev_cntrs[i].name, j);
12379 memcpy(p, name, strlen(name));
12382 /* Counter is 32 bits */
12383 if (dev_cntrs[i].flags & CNTR_32BIT) {
12384 memcpy(p, bit_type_32, bit_type_32_sz);
12385 p += bit_type_32_sz;
12391 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12392 p += strlen(dev_cntrs[i].name);
12394 /* Counter is 32 bits */
12395 if (dev_cntrs[i].flags & CNTR_32BIT) {
12396 memcpy(p, bit_type_32, bit_type_32_sz);
12397 p += bit_type_32_sz;
12404 /*********************/
12405 /* per port counters */
12406 /*********************/
12409 * Go through the counters for the overflows and disable the ones we
12410 * don't need. This varies based on platform so we need to do it
12411 * dynamically here.
12413 rcv_ctxts = dd->num_rcv_contexts;
12414 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12415 i <= C_RCV_HDR_OVF_LAST; i++) {
12416 port_cntrs[i].flags |= CNTR_DISABLED;
12419 /* size port counter names and determine how many we have*/
12421 dd->nportcntrs = 0;
12422 for (i = 0; i < PORT_CNTR_LAST; i++) {
12423 if (port_cntrs[i].flags & CNTR_DISABLED) {
12424 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12428 if (port_cntrs[i].flags & CNTR_VL) {
12429 port_cntrs[i].offset = dd->nportcntrs;
12430 for (j = 0; j < C_VL_COUNT; j++) {
12431 snprintf(name, C_MAX_NAME, "%s%d",
12432 port_cntrs[i].name, vl_from_idx(j));
12433 sz += strlen(name);
12434 /* Add ",32" for 32-bit counters */
12435 if (port_cntrs[i].flags & CNTR_32BIT)
12436 sz += bit_type_32_sz;
12441 /* +1 for newline */
12442 sz += strlen(port_cntrs[i].name) + 1;
12443 /* Add ",32" for 32-bit counters */
12444 if (port_cntrs[i].flags & CNTR_32BIT)
12445 sz += bit_type_32_sz;
12446 port_cntrs[i].offset = dd->nportcntrs;
12451 /* allocate space for the counter names */
12452 dd->portcntrnameslen = sz;
12453 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12454 if (!dd->portcntrnames)
12457 /* fill in port cntr names */
12458 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12459 if (port_cntrs[i].flags & CNTR_DISABLED)
12462 if (port_cntrs[i].flags & CNTR_VL) {
12463 for (j = 0; j < C_VL_COUNT; j++) {
12464 snprintf(name, C_MAX_NAME, "%s%d",
12465 port_cntrs[i].name, vl_from_idx(j));
12466 memcpy(p, name, strlen(name));
12469 /* Counter is 32 bits */
12470 if (port_cntrs[i].flags & CNTR_32BIT) {
12471 memcpy(p, bit_type_32, bit_type_32_sz);
12472 p += bit_type_32_sz;
12478 memcpy(p, port_cntrs[i].name,
12479 strlen(port_cntrs[i].name));
12480 p += strlen(port_cntrs[i].name);
12482 /* Counter is 32 bits */
12483 if (port_cntrs[i].flags & CNTR_32BIT) {
12484 memcpy(p, bit_type_32, bit_type_32_sz);
12485 p += bit_type_32_sz;
12492 /* allocate per port storage for counter values */
12493 ppd = (struct hfi1_pportdata *)(dd + 1);
12494 for (i = 0; i < dd->num_pports; i++, ppd++) {
12495 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12499 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12504 /* CPU counters need to be allocated and zeroed */
12505 if (init_cpu_counters(dd))
12508 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12509 WQ_MEM_RECLAIM, dd->unit);
12510 if (!dd->update_cntr_wq)
12513 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12515 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12522 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12524 switch (chip_lstate) {
12527 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12531 return IB_PORT_DOWN;
12533 return IB_PORT_INIT;
12535 return IB_PORT_ARMED;
12536 case LSTATE_ACTIVE:
12537 return IB_PORT_ACTIVE;
12541 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12543 /* look at the HFI meta-states only */
12544 switch (chip_pstate & 0xf0) {
12546 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12550 return IB_PORTPHYSSTATE_DISABLED;
12552 return OPA_PORTPHYSSTATE_OFFLINE;
12554 return IB_PORTPHYSSTATE_POLLING;
12555 case PLS_CONFIGPHY:
12556 return IB_PORTPHYSSTATE_TRAINING;
12558 return IB_PORTPHYSSTATE_LINKUP;
12560 return IB_PORTPHYSSTATE_PHY_TEST;
12564 /* return the OPA port logical state name */
12565 const char *opa_lstate_name(u32 lstate)
12567 static const char * const port_logical_names[] = {
12573 "PORT_ACTIVE_DEFER",
12575 if (lstate < ARRAY_SIZE(port_logical_names))
12576 return port_logical_names[lstate];
12580 /* return the OPA port physical state name */
12581 const char *opa_pstate_name(u32 pstate)
12583 static const char * const port_physical_names[] = {
12590 "PHYS_LINK_ERR_RECOVER",
12597 if (pstate < ARRAY_SIZE(port_physical_names))
12598 return port_physical_names[pstate];
12603 * Read the hardware link state and set the driver's cached value of it.
12604 * Return the (new) current value.
12606 u32 get_logical_state(struct hfi1_pportdata *ppd)
12610 new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12611 if (new_state != ppd->lstate) {
12612 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12613 opa_lstate_name(new_state), new_state);
12614 ppd->lstate = new_state;
12617 * Set port status flags in the page mapped into userspace
12618 * memory. Do it here to ensure a reliable state - this is
12619 * the only function called by all state handling code.
12620 * Always set the flags due to the fact that the cache value
12621 * might have been changed explicitly outside of this
12624 if (ppd->statusp) {
12625 switch (ppd->lstate) {
12628 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12629 HFI1_STATUS_IB_READY);
12631 case IB_PORT_ARMED:
12632 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12634 case IB_PORT_ACTIVE:
12635 *ppd->statusp |= HFI1_STATUS_IB_READY;
12639 return ppd->lstate;
12643 * wait_logical_linkstate - wait for an IB link state change to occur
12644 * @ppd: port device
12645 * @state: the state to wait for
12646 * @msecs: the number of milliseconds to wait
12648 * Wait up to msecs milliseconds for IB link state change to occur.
12649 * For now, take the easy polling route.
12650 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12652 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12655 unsigned long timeout;
12657 timeout = jiffies + msecs_to_jiffies(msecs);
12659 if (get_logical_state(ppd) == state)
12661 if (time_after(jiffies, timeout))
12665 dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12671 * Read the physical hardware link state and set the driver's cached value
12674 void cache_physical_state(struct hfi1_pportdata *ppd)
12679 read_pstate = read_physical_state(ppd->dd);
12680 ib_pstate = chip_to_opa_pstate(ppd->dd, read_pstate);
12681 /* check if OPA pstate changed */
12682 if (chip_to_opa_pstate(ppd->dd, ppd->pstate) != ib_pstate) {
12683 dd_dev_info(ppd->dd,
12684 "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12685 __func__, opa_pstate_name(ib_pstate), ib_pstate,
12688 ppd->pstate = read_pstate;
12692 * wait_physical_linkstate - wait for an physical link state change to occur
12693 * @ppd: port device
12694 * @state: the state to wait for
12695 * @msecs: the number of milliseconds to wait
12697 * Wait up to msecs milliseconds for physical link state change to occur.
12698 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12700 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12703 unsigned long timeout;
12705 timeout = jiffies + msecs_to_jiffies(msecs);
12707 cache_physical_state(ppd);
12708 if (ppd->pstate == state)
12710 if (time_after(jiffies, timeout)) {
12711 dd_dev_err(ppd->dd,
12712 "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
12713 state, ppd->pstate);
12716 usleep_range(1950, 2050); /* sleep 2ms-ish */
12722 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12723 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12725 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12726 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12728 void hfi1_init_ctxt(struct send_context *sc)
12731 struct hfi1_devdata *dd = sc->dd;
12733 u8 set = (sc->type == SC_USER ?
12734 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12735 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12736 reg = read_kctxt_csr(dd, sc->hw_context,
12737 SEND_CTXT_CHECK_ENABLE);
12739 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12741 SET_STATIC_RATE_CONTROL_SMASK(reg);
12742 write_kctxt_csr(dd, sc->hw_context,
12743 SEND_CTXT_CHECK_ENABLE, reg);
12747 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12752 if (dd->icode != ICODE_RTL_SILICON) {
12753 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12754 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12758 reg = read_csr(dd, ASIC_STS_THERM);
12759 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12760 ASIC_STS_THERM_CURR_TEMP_MASK);
12761 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12762 ASIC_STS_THERM_LO_TEMP_MASK);
12763 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12764 ASIC_STS_THERM_HI_TEMP_MASK);
12765 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12766 ASIC_STS_THERM_CRIT_TEMP_MASK);
12767 /* triggers is a 3-bit value - 1 bit per trigger. */
12768 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12773 /* ========================================================================= */
12776 * Enable/disable chip from delivering interrupts.
12778 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12783 * In HFI, the mask needs to be 1 to allow interrupts.
12786 /* enable all interrupts */
12787 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12788 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12792 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12793 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12798 * Clear all interrupt sources on the chip.
12800 static void clear_all_interrupts(struct hfi1_devdata *dd)
12804 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12805 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12807 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12808 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12809 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12810 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12811 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12812 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12813 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12814 for (i = 0; i < dd->chip_send_contexts; i++)
12815 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12816 for (i = 0; i < dd->chip_sdma_engines; i++)
12817 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12819 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12820 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12821 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12824 /* Move to pcie.c? */
12825 static void disable_intx(struct pci_dev *pdev)
12830 static void clean_up_interrupts(struct hfi1_devdata *dd)
12834 /* remove irqs - must happen before disabling/turning off */
12835 if (dd->num_msix_entries) {
12837 struct hfi1_msix_entry *me = dd->msix_entries;
12839 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12840 if (!me->arg) /* => no irq, no affinity */
12842 hfi1_put_irq_affinity(dd, me);
12843 free_irq(me->irq, me->arg);
12846 /* clean structures */
12847 kfree(dd->msix_entries);
12848 dd->msix_entries = NULL;
12849 dd->num_msix_entries = 0;
12852 if (dd->requested_intx_irq) {
12853 free_irq(dd->pcidev->irq, dd);
12854 dd->requested_intx_irq = 0;
12856 disable_intx(dd->pcidev);
12859 pci_free_irq_vectors(dd->pcidev);
12863 * Remap the interrupt source from the general handler to the given MSI-X
12866 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12871 /* clear from the handled mask of the general interrupt */
12874 if (likely(m < CCE_NUM_INT_CSRS)) {
12875 dd->gi_mask[m] &= ~((u64)1 << n);
12877 dd_dev_err(dd, "remap interrupt err\n");
12881 /* direct the chip source to the given MSI-X interrupt */
12884 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12885 reg &= ~((u64)0xff << (8 * n));
12886 reg |= ((u64)msix_intr & 0xff) << (8 * n);
12887 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12890 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12891 int engine, int msix_intr)
12894 * SDMA engine interrupt sources grouped by type, rather than
12895 * engine. Per-engine interrupts are as follows:
12900 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12902 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12904 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12908 static int request_intx_irq(struct hfi1_devdata *dd)
12912 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12914 ret = request_irq(dd->pcidev->irq, general_interrupt,
12915 IRQF_SHARED, dd->intx_name, dd);
12917 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12920 dd->requested_intx_irq = 1;
12924 static int request_msix_irqs(struct hfi1_devdata *dd)
12926 int first_general, last_general;
12927 int first_sdma, last_sdma;
12928 int first_rx, last_rx;
12931 /* calculate the ranges we are going to use */
12933 last_general = first_general + 1;
12934 first_sdma = last_general;
12935 last_sdma = first_sdma + dd->num_sdma;
12936 first_rx = last_sdma;
12937 last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
12939 /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
12940 dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
12943 * Sanity check - the code expects all SDMA chip source
12944 * interrupts to be in the same CSR, starting at bit 0. Verify
12945 * that this is true by checking the bit location of the start.
12947 BUILD_BUG_ON(IS_SDMA_START % 64);
12949 for (i = 0; i < dd->num_msix_entries; i++) {
12950 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12951 const char *err_info;
12952 irq_handler_t handler;
12953 irq_handler_t thread = NULL;
12956 struct hfi1_ctxtdata *rcd = NULL;
12957 struct sdma_engine *sde = NULL;
12959 /* obtain the arguments to request_irq */
12960 if (first_general <= i && i < last_general) {
12961 idx = i - first_general;
12962 handler = general_interrupt;
12964 snprintf(me->name, sizeof(me->name),
12965 DRIVER_NAME "_%d", dd->unit);
12966 err_info = "general";
12967 me->type = IRQ_GENERAL;
12968 } else if (first_sdma <= i && i < last_sdma) {
12969 idx = i - first_sdma;
12970 sde = &dd->per_sdma[idx];
12971 handler = sdma_interrupt;
12973 snprintf(me->name, sizeof(me->name),
12974 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12976 remap_sdma_interrupts(dd, idx, i);
12977 me->type = IRQ_SDMA;
12978 } else if (first_rx <= i && i < last_rx) {
12979 idx = i - first_rx;
12980 rcd = dd->rcd[idx];
12983 * Set the interrupt register and mask for this
12984 * context's interrupt.
12986 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12987 rcd->imask = ((u64)1) <<
12988 ((IS_RCVAVAIL_START + idx) % 64);
12989 handler = receive_context_interrupt;
12990 thread = receive_context_thread;
12992 snprintf(me->name, sizeof(me->name),
12993 DRIVER_NAME "_%d kctxt%d",
12995 err_info = "receive context";
12996 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12997 me->type = IRQ_RCVCTXT;
12998 rcd->msix_intr = i;
13001 /* not in our expected range - complain, then
13005 "Unexpected extra MSI-X interrupt %d\n", i);
13008 /* no argument, no interrupt */
13011 /* make sure the name is terminated */
13012 me->name[sizeof(me->name) - 1] = 0;
13013 me->irq = pci_irq_vector(dd->pcidev, i);
13015 * On err return me->irq. Don't need to clear this
13016 * because 'arg' has not been set, and cleanup will
13017 * do the right thing.
13022 ret = request_threaded_irq(me->irq, handler, thread, 0,
13026 "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13027 err_info, me->irq, idx, ret);
13031 * assign arg after request_irq call, so it will be
13036 ret = hfi1_get_irq_affinity(dd, me);
13038 dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
13044 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13048 if (!dd->num_msix_entries) {
13049 synchronize_irq(dd->pcidev->irq);
13053 for (i = 0; i < dd->vnic.num_ctxt; i++) {
13054 struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13055 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13057 synchronize_irq(me->irq);
13061 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13063 struct hfi1_devdata *dd = rcd->dd;
13064 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13066 if (!me->arg) /* => no irq, no affinity */
13069 hfi1_put_irq_affinity(dd, me);
13070 free_irq(me->irq, me->arg);
13075 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13077 struct hfi1_devdata *dd = rcd->dd;
13078 struct hfi1_msix_entry *me;
13079 int idx = rcd->ctxt;
13083 rcd->msix_intr = dd->vnic.msix_idx++;
13084 me = &dd->msix_entries[rcd->msix_intr];
13087 * Set the interrupt register and mask for this
13088 * context's interrupt.
13090 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13091 rcd->imask = ((u64)1) <<
13092 ((IS_RCVAVAIL_START + idx) % 64);
13094 snprintf(me->name, sizeof(me->name),
13095 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13096 me->name[sizeof(me->name) - 1] = 0;
13097 me->type = IRQ_RCVCTXT;
13098 me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13100 dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
13104 remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13106 ret = request_threaded_irq(me->irq, receive_context_interrupt,
13107 receive_context_thread, 0, me->name, arg);
13109 dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13110 me->irq, idx, ret);
13114 * assign arg after request_irq call, so it will be
13119 ret = hfi1_get_irq_affinity(dd, me);
13122 "unable to pin IRQ %d\n", ret);
13123 free_irq(me->irq, me->arg);
13128 * Set the general handler to accept all interrupts, remap all
13129 * chip interrupts back to MSI-X 0.
13131 static void reset_interrupts(struct hfi1_devdata *dd)
13135 /* all interrupts handled by the general handler */
13136 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13137 dd->gi_mask[i] = ~(u64)0;
13139 /* all chip interrupts map to MSI-X 0 */
13140 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13141 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13144 static int set_up_interrupts(struct hfi1_devdata *dd)
13148 int single_interrupt = 0; /* we expect to have all the interrupts */
13152 * 1 general, "slow path" interrupt (includes the SDMA engines
13153 * slow source, SDMACleanupDone)
13154 * N interrupts - one per used SDMA engine
13155 * M interrupt - one per kernel receive context
13157 total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
13159 /* ask for MSI-X interrupts */
13160 request = request_msix(dd, total);
13164 } else if (request == 0) {
13166 /* dd->num_msix_entries already zero */
13167 single_interrupt = 1;
13168 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
13169 } else if (request < total) {
13170 /* using MSI-X, with reduced interrupts */
13171 dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
13176 dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13178 if (!dd->msix_entries) {
13183 dd->num_msix_entries = total;
13184 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13187 /* mask all interrupts */
13188 set_intr_state(dd, 0);
13189 /* clear all pending interrupts */
13190 clear_all_interrupts(dd);
13192 /* reset general handler mask, chip MSI-X mappings */
13193 reset_interrupts(dd);
13195 if (single_interrupt)
13196 ret = request_intx_irq(dd);
13198 ret = request_msix_irqs(dd);
13205 clean_up_interrupts(dd);
13210 * Set up context values in dd. Sets:
13212 * num_rcv_contexts - number of contexts being used
13213 * n_krcv_queues - number of kernel contexts
13214 * first_dyn_alloc_ctxt - first dynamically allocated context
13215 * in array of contexts
13216 * freectxts - number of free user contexts
13217 * num_send_contexts - number of PIO send contexts being used
13219 static int set_up_context_variables(struct hfi1_devdata *dd)
13221 unsigned long num_kernel_contexts;
13222 int total_contexts;
13226 int user_rmt_reduced;
13229 * Kernel receive contexts:
13230 * - Context 0 - control context (VL15/multicast/error)
13231 * - Context 1 - first kernel context
13232 * - Context 2 - second kernel context
13237 * n_krcvqs is the sum of module parameter kernel receive
13238 * contexts, krcvqs[]. It does not include the control
13239 * context, so add that.
13241 num_kernel_contexts = n_krcvqs + 1;
13243 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13245 * Every kernel receive context needs an ACK send context.
13246 * one send context is allocated for each VL{0-7} and VL15
13248 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13250 "Reducing # kernel rcv contexts to: %d, from %lu\n",
13251 (int)(dd->chip_send_contexts - num_vls - 1),
13252 num_kernel_contexts);
13253 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13257 * - default to 1 user context per real (non-HT) CPU core if
13258 * num_user_contexts is negative
13260 if (num_user_contexts < 0)
13261 num_user_contexts =
13262 cpumask_weight(&node_affinity.real_cpu_mask);
13264 total_contexts = num_kernel_contexts + num_user_contexts;
13267 * Adjust the counts given a global max.
13269 if (total_contexts > dd->chip_rcv_contexts) {
13271 "Reducing # user receive contexts to: %d, from %d\n",
13272 (int)(dd->chip_rcv_contexts - num_kernel_contexts),
13273 (int)num_user_contexts);
13274 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
13276 total_contexts = num_kernel_contexts + num_user_contexts;
13279 /* each user context requires an entry in the RMT */
13280 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13281 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13282 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13284 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13285 (int)num_user_contexts,
13288 num_user_contexts = user_rmt_reduced;
13289 total_contexts = num_kernel_contexts + num_user_contexts;
13292 /* Accommodate VNIC contexts */
13293 if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
13294 total_contexts += HFI1_NUM_VNIC_CTXT;
13296 /* the first N are kernel contexts, the rest are user/vnic contexts */
13297 dd->num_rcv_contexts = total_contexts;
13298 dd->n_krcv_queues = num_kernel_contexts;
13299 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13300 dd->num_user_contexts = num_user_contexts;
13301 dd->freectxts = num_user_contexts;
13303 "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
13304 (int)dd->chip_rcv_contexts,
13305 (int)dd->num_rcv_contexts,
13306 (int)dd->n_krcv_queues,
13307 (int)dd->num_rcv_contexts - dd->n_krcv_queues);
13310 * Receive array allocation:
13311 * All RcvArray entries are divided into groups of 8. This
13312 * is required by the hardware and will speed up writes to
13313 * consecutive entries by using write-combining of the entire
13316 * The number of groups are evenly divided among all contexts.
13317 * any left over groups will be given to the first N user
13320 dd->rcv_entries.group_size = RCV_INCREMENT;
13321 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13322 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13323 dd->rcv_entries.nctxt_extra = ngroups -
13324 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13325 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13326 dd->rcv_entries.ngroups,
13327 dd->rcv_entries.nctxt_extra);
13328 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13329 MAX_EAGER_ENTRIES * 2) {
13330 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13331 dd->rcv_entries.group_size;
13333 "RcvArray group count too high, change to %u\n",
13334 dd->rcv_entries.ngroups);
13335 dd->rcv_entries.nctxt_extra = 0;
13338 * PIO send contexts
13340 ret = init_sc_pools_and_sizes(dd);
13341 if (ret >= 0) { /* success */
13342 dd->num_send_contexts = ret;
13345 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13346 dd->chip_send_contexts,
13347 dd->num_send_contexts,
13348 dd->sc_sizes[SC_KERNEL].count,
13349 dd->sc_sizes[SC_ACK].count,
13350 dd->sc_sizes[SC_USER].count,
13351 dd->sc_sizes[SC_VL15].count);
13352 ret = 0; /* success */
13359 * Set the device/port partition key table. The MAD code
13360 * will ensure that, at least, the partial management
13361 * partition key is present in the table.
13363 static void set_partition_keys(struct hfi1_pportdata *ppd)
13365 struct hfi1_devdata *dd = ppd->dd;
13369 dd_dev_info(dd, "Setting partition keys\n");
13370 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13371 reg |= (ppd->pkeys[i] &
13372 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13374 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13375 /* Each register holds 4 PKey values. */
13376 if ((i % 4) == 3) {
13377 write_csr(dd, RCV_PARTITION_KEY +
13378 ((i - 3) * 2), reg);
13383 /* Always enable HW pkeys check when pkeys table is set */
13384 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13388 * These CSRs and memories are uninitialized on reset and must be
13389 * written before reading to set the ECC/parity bits.
13391 * NOTE: All user context CSRs that are not mmaped write-only
13392 * (e.g. the TID flows) must be initialized even if the driver never
13395 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13400 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13401 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13403 /* SendCtxtCreditReturnAddr */
13404 for (i = 0; i < dd->chip_send_contexts; i++)
13405 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13407 /* PIO Send buffers */
13408 /* SDMA Send buffers */
13410 * These are not normally read, and (presently) have no method
13411 * to be read, so are not pre-initialized
13415 /* RcvHdrTailAddr */
13416 /* RcvTidFlowTable */
13417 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13418 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13419 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13420 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13421 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13425 for (i = 0; i < dd->chip_rcv_array_count; i++)
13426 write_csr(dd, RCV_ARRAY + (8 * i),
13427 RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
13429 /* RcvQPMapTable */
13430 for (i = 0; i < 32; i++)
13431 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13435 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13437 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13440 unsigned long timeout;
13443 /* is the condition present? */
13444 reg = read_csr(dd, CCE_STATUS);
13445 if ((reg & status_bits) == 0)
13448 /* clear the condition */
13449 write_csr(dd, CCE_CTRL, ctrl_bits);
13451 /* wait for the condition to clear */
13452 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13454 reg = read_csr(dd, CCE_STATUS);
13455 if ((reg & status_bits) == 0)
13457 if (time_after(jiffies, timeout)) {
13459 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13460 status_bits, reg & status_bits);
13467 /* set CCE CSRs to chip reset defaults */
13468 static void reset_cce_csrs(struct hfi1_devdata *dd)
13472 /* CCE_REVISION read-only */
13473 /* CCE_REVISION2 read-only */
13474 /* CCE_CTRL - bits clear automatically */
13475 /* CCE_STATUS read-only, use CceCtrl to clear */
13476 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13477 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13478 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13479 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13480 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13481 /* CCE_ERR_STATUS read-only */
13482 write_csr(dd, CCE_ERR_MASK, 0);
13483 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13484 /* CCE_ERR_FORCE leave alone */
13485 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13486 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13487 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13488 /* CCE_PCIE_CTRL leave alone */
13489 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13490 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13491 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13492 CCE_MSIX_TABLE_UPPER_RESETCSR);
13494 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13495 /* CCE_MSIX_PBA read-only */
13496 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13497 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13499 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13500 write_csr(dd, CCE_INT_MAP, 0);
13501 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13502 /* CCE_INT_STATUS read-only */
13503 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13504 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13505 /* CCE_INT_FORCE leave alone */
13506 /* CCE_INT_BLOCKED read-only */
13508 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13509 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13512 /* set MISC CSRs to chip reset defaults */
13513 static void reset_misc_csrs(struct hfi1_devdata *dd)
13517 for (i = 0; i < 32; i++) {
13518 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13519 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13520 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13523 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13524 * only be written 128-byte chunks
13526 /* init RSA engine to clear lingering errors */
13527 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13528 write_csr(dd, MISC_CFG_RSA_MU, 0);
13529 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13530 /* MISC_STS_8051_DIGEST read-only */
13531 /* MISC_STS_SBM_DIGEST read-only */
13532 /* MISC_STS_PCIE_DIGEST read-only */
13533 /* MISC_STS_FAB_DIGEST read-only */
13534 /* MISC_ERR_STATUS read-only */
13535 write_csr(dd, MISC_ERR_MASK, 0);
13536 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13537 /* MISC_ERR_FORCE leave alone */
13540 /* set TXE CSRs to chip reset defaults */
13541 static void reset_txe_csrs(struct hfi1_devdata *dd)
13548 write_csr(dd, SEND_CTRL, 0);
13549 __cm_reset(dd, 0); /* reset CM internal state */
13550 /* SEND_CONTEXTS read-only */
13551 /* SEND_DMA_ENGINES read-only */
13552 /* SEND_PIO_MEM_SIZE read-only */
13553 /* SEND_DMA_MEM_SIZE read-only */
13554 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13555 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13556 /* SEND_PIO_ERR_STATUS read-only */
13557 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13558 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13559 /* SEND_PIO_ERR_FORCE leave alone */
13560 /* SEND_DMA_ERR_STATUS read-only */
13561 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13562 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13563 /* SEND_DMA_ERR_FORCE leave alone */
13564 /* SEND_EGRESS_ERR_STATUS read-only */
13565 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13566 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13567 /* SEND_EGRESS_ERR_FORCE leave alone */
13568 write_csr(dd, SEND_BTH_QP, 0);
13569 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13570 write_csr(dd, SEND_SC2VLT0, 0);
13571 write_csr(dd, SEND_SC2VLT1, 0);
13572 write_csr(dd, SEND_SC2VLT2, 0);
13573 write_csr(dd, SEND_SC2VLT3, 0);
13574 write_csr(dd, SEND_LEN_CHECK0, 0);
13575 write_csr(dd, SEND_LEN_CHECK1, 0);
13576 /* SEND_ERR_STATUS read-only */
13577 write_csr(dd, SEND_ERR_MASK, 0);
13578 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13579 /* SEND_ERR_FORCE read-only */
13580 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13581 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13582 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13583 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13584 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13585 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13586 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13587 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13588 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13589 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13590 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13591 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13592 /* SEND_CM_CREDIT_USED_STATUS read-only */
13593 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13594 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13595 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13596 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13597 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13598 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13599 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13600 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13601 /* SEND_CM_CREDIT_USED_VL read-only */
13602 /* SEND_CM_CREDIT_USED_VL15 read-only */
13603 /* SEND_EGRESS_CTXT_STATUS read-only */
13604 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13605 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13606 /* SEND_EGRESS_ERR_INFO read-only */
13607 /* SEND_EGRESS_ERR_SOURCE read-only */
13610 * TXE Per-Context CSRs
13612 for (i = 0; i < dd->chip_send_contexts; i++) {
13613 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13614 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13615 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13616 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13617 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13618 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13619 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13620 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13621 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13622 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13623 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13624 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13628 * TXE Per-SDMA CSRs
13630 for (i = 0; i < dd->chip_sdma_engines; i++) {
13631 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13632 /* SEND_DMA_STATUS read-only */
13633 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13634 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13635 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13636 /* SEND_DMA_HEAD read-only */
13637 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13638 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13639 /* SEND_DMA_IDLE_CNT read-only */
13640 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13641 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13642 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13643 /* SEND_DMA_ENG_ERR_STATUS read-only */
13644 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13645 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13646 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13647 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13648 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13649 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13650 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13651 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13652 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13653 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13659 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13661 static void init_rbufs(struct hfi1_devdata *dd)
13667 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13672 reg = read_csr(dd, RCV_STATUS);
13673 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13674 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13677 * Give up after 1ms - maximum wait time.
13679 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
13680 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13681 * 136 KB / (66% * 250MB/s) = 844us
13683 if (count++ > 500) {
13685 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13689 udelay(2); /* do not busy-wait the CSR */
13692 /* start the init - expect RcvCtrl to be 0 */
13693 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13696 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13697 * period after the write before RcvStatus.RxRbufInitDone is valid.
13698 * The delay in the first run through the loop below is sufficient and
13699 * required before the first read of RcvStatus.RxRbufInintDone.
13701 read_csr(dd, RCV_CTRL);
13703 /* wait for the init to finish */
13706 /* delay is required first time through - see above */
13707 udelay(2); /* do not busy-wait the CSR */
13708 reg = read_csr(dd, RCV_STATUS);
13709 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13712 /* give up after 100us - slowest possible at 33MHz is 73us */
13713 if (count++ > 50) {
13715 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13722 /* set RXE CSRs to chip reset defaults */
13723 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13730 write_csr(dd, RCV_CTRL, 0);
13732 /* RCV_STATUS read-only */
13733 /* RCV_CONTEXTS read-only */
13734 /* RCV_ARRAY_CNT read-only */
13735 /* RCV_BUF_SIZE read-only */
13736 write_csr(dd, RCV_BTH_QP, 0);
13737 write_csr(dd, RCV_MULTICAST, 0);
13738 write_csr(dd, RCV_BYPASS, 0);
13739 write_csr(dd, RCV_VL15, 0);
13740 /* this is a clear-down */
13741 write_csr(dd, RCV_ERR_INFO,
13742 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13743 /* RCV_ERR_STATUS read-only */
13744 write_csr(dd, RCV_ERR_MASK, 0);
13745 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13746 /* RCV_ERR_FORCE leave alone */
13747 for (i = 0; i < 32; i++)
13748 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13749 for (i = 0; i < 4; i++)
13750 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13751 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13752 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13753 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13754 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13755 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13756 clear_rsm_rule(dd, i);
13757 for (i = 0; i < 32; i++)
13758 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13761 * RXE Kernel and User Per-Context CSRs
13763 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13765 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13766 /* RCV_CTXT_STATUS read-only */
13767 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13768 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13769 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13770 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13771 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13772 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13773 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13774 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13775 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13776 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13779 /* RCV_HDR_TAIL read-only */
13780 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13781 /* RCV_EGR_INDEX_TAIL read-only */
13782 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13783 /* RCV_EGR_OFFSET_TAIL read-only */
13784 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13785 write_uctxt_csr(dd, i,
13786 RCV_TID_FLOW_TABLE + (8 * j), 0);
13792 * Set sc2vl tables.
13794 * They power on to zeros, so to avoid send context errors
13795 * they need to be set:
13797 * SC 0-7 -> VL 0-7 (respectively)
13802 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13805 /* init per architecture spec, constrained by hardware capability */
13807 /* HFI maps sent packets */
13808 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13814 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13820 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13826 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13833 /* DC maps received packets */
13834 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13836 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13837 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13838 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13840 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13841 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13843 /* initialize the cached sc2vl values consistently with h/w */
13844 for (i = 0; i < 32; i++) {
13845 if (i < 8 || i == 15)
13846 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13848 *((u8 *)(dd->sc2vl) + i) = 0;
13853 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13854 * depend on the chip going through a power-on reset - a driver may be loaded
13855 * and unloaded many times.
13857 * Do not write any CSR values to the chip in this routine - there may be
13858 * a reset following the (possible) FLR in this routine.
13861 static void init_chip(struct hfi1_devdata *dd)
13866 * Put the HFI CSRs in a known state.
13867 * Combine this with a DC reset.
13869 * Stop the device from doing anything while we do a
13870 * reset. We know there are no other active users of
13871 * the device since we are now in charge. Turn off
13872 * off all outbound and inbound traffic and make sure
13873 * the device does not generate any interrupts.
13876 /* disable send contexts and SDMA engines */
13877 write_csr(dd, SEND_CTRL, 0);
13878 for (i = 0; i < dd->chip_send_contexts; i++)
13879 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13880 for (i = 0; i < dd->chip_sdma_engines; i++)
13881 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13882 /* disable port (turn off RXE inbound traffic) and contexts */
13883 write_csr(dd, RCV_CTRL, 0);
13884 for (i = 0; i < dd->chip_rcv_contexts; i++)
13885 write_csr(dd, RCV_CTXT_CTRL, 0);
13886 /* mask all interrupt sources */
13887 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13888 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13891 * DC Reset: do a full DC reset before the register clear.
13892 * A recommended length of time to hold is one CSR read,
13893 * so reread the CceDcCtrl. Then, hold the DC in reset
13894 * across the clear.
13896 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13897 (void)read_csr(dd, CCE_DC_CTRL);
13901 * A FLR will reset the SPC core and part of the PCIe.
13902 * The parts that need to be restored have already been
13905 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13907 /* do the FLR, the DC reset will remain */
13908 pcie_flr(dd->pcidev);
13910 /* restore command and BARs */
13911 restore_pci_variables(dd);
13914 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13915 pcie_flr(dd->pcidev);
13916 restore_pci_variables(dd);
13919 dd_dev_info(dd, "Resetting CSRs with writes\n");
13920 reset_cce_csrs(dd);
13921 reset_txe_csrs(dd);
13922 reset_rxe_csrs(dd);
13923 reset_misc_csrs(dd);
13925 /* clear the DC reset */
13926 write_csr(dd, CCE_DC_CTRL, 0);
13928 /* Set the LED off */
13932 * Clear the QSFP reset.
13933 * An FLR enforces a 0 on all out pins. The driver does not touch
13934 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
13935 * anything plugged constantly in reset, if it pays attention
13937 * Prime examples of this are optical cables. Set all pins high.
13938 * I2CCLK and I2CDAT will change per direction, and INT_N and
13939 * MODPRS_N are input only and their value is ignored.
13941 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13942 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13943 init_chip_resources(dd);
13946 static void init_early_variables(struct hfi1_devdata *dd)
13950 /* assign link credit variables */
13952 dd->link_credits = CM_GLOBAL_CREDITS;
13954 dd->link_credits--;
13955 dd->vcu = cu_to_vcu(hfi1_cu);
13956 /* enough room for 8 MAD packets plus header - 17K */
13957 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13958 if (dd->vl15_init > dd->link_credits)
13959 dd->vl15_init = dd->link_credits;
13961 write_uninitialized_csrs_and_memories(dd);
13963 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13964 for (i = 0; i < dd->num_pports; i++) {
13965 struct hfi1_pportdata *ppd = &dd->pport[i];
13967 set_partition_keys(ppd);
13969 init_sc2vl_tables(dd);
13972 static void init_kdeth_qp(struct hfi1_devdata *dd)
13974 /* user changed the KDETH_QP */
13975 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13976 /* out of range or illegal value */
13977 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13980 if (kdeth_qp == 0) /* not set, or failed range check */
13981 kdeth_qp = DEFAULT_KDETH_QP;
13983 write_csr(dd, SEND_BTH_QP,
13984 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13985 SEND_BTH_QP_KDETH_QP_SHIFT);
13987 write_csr(dd, RCV_BTH_QP,
13988 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13989 RCV_BTH_QP_KDETH_QP_SHIFT);
13994 * @dd - device data
13995 * @first_ctxt - first context
13996 * @last_ctxt - first context
13998 * This return sets the qpn mapping table that
13999 * is indexed by qpn[8:1].
14001 * The routine will round robin the 256 settings
14002 * from first_ctxt to last_ctxt.
14004 * The first/last looks ahead to having specialized
14005 * receive contexts for mgmt and bypass. Normal
14006 * verbs traffic will assumed to be on a range
14007 * of receive contexts.
14009 static void init_qpmap_table(struct hfi1_devdata *dd,
14014 u64 regno = RCV_QP_MAP_TABLE;
14016 u64 ctxt = first_ctxt;
14018 for (i = 0; i < 256; i++) {
14019 reg |= ctxt << (8 * (i % 8));
14021 if (ctxt > last_ctxt)
14024 write_csr(dd, regno, reg);
14030 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14031 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14034 struct rsm_map_table {
14035 u64 map[NUM_MAP_REGS];
14039 struct rsm_rule_data {
14055 * Return an initialized RMT map table for users to fill in. OK if it
14056 * returns NULL, indicating no table.
14058 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14060 struct rsm_map_table *rmt;
14061 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14063 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14065 memset(rmt->map, rxcontext, sizeof(rmt->map));
14073 * Write the final RMT map table to the chip and free the table. OK if
14076 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14077 struct rsm_map_table *rmt)
14082 /* write table to chip */
14083 for (i = 0; i < NUM_MAP_REGS; i++)
14084 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14087 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14092 * Add a receive side mapping rule.
14094 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14095 struct rsm_rule_data *rrd)
14097 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14098 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14099 1ull << rule_index | /* enable bit */
14100 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14101 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14102 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14103 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14104 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14105 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14106 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14107 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14108 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14109 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14110 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14111 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14112 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14116 * Clear a receive side mapping rule.
14118 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14120 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14121 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14122 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14125 /* return the number of RSM map table entries that will be used for QOS */
14126 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14133 /* is QOS active at all? */
14134 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14139 /* determine bits for qpn */
14140 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14141 if (krcvqs[i] > max_by_vl)
14142 max_by_vl = krcvqs[i];
14143 if (max_by_vl > 32)
14145 m = ilog2(__roundup_pow_of_two(max_by_vl));
14147 /* determine bits for vl */
14148 n = ilog2(__roundup_pow_of_two(num_vls));
14150 /* reject if too much is used */
14159 return 1 << (m + n);
14170 * init_qos - init RX qos
14171 * @dd - device data
14172 * @rmt - RSM map table
14174 * This routine initializes Rule 0 and the RSM map table to implement
14175 * quality of service (qos).
14177 * If all of the limit tests succeed, qos is applied based on the array
14178 * interpretation of krcvqs where entry 0 is VL0.
14180 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14181 * feed both the RSM map table and the single rule.
14183 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14185 struct rsm_rule_data rrd;
14186 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14187 unsigned int rmt_entries;
14192 rmt_entries = qos_rmt_entries(dd, &m, &n);
14193 if (rmt_entries == 0)
14195 qpns_per_vl = 1 << m;
14197 /* enough room in the map table? */
14198 rmt_entries = 1 << (m + n);
14199 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14202 /* add qos entries to the the RSM map table */
14203 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14206 for (qpn = 0, tctxt = ctxt;
14207 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14208 unsigned idx, regoff, regidx;
14210 /* generate the index the hardware will produce */
14211 idx = rmt->used + ((qpn << n) ^ i);
14212 regoff = (idx % 8) * 8;
14214 /* replace default with context number */
14215 reg = rmt->map[regidx];
14216 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14218 reg |= (u64)(tctxt++) << regoff;
14219 rmt->map[regidx] = reg;
14220 if (tctxt == ctxt + krcvqs[i])
14226 rrd.offset = rmt->used;
14228 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14229 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14230 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14231 rrd.index1_width = n;
14232 rrd.index2_off = QPN_SELECT_OFFSET;
14233 rrd.index2_width = m + n;
14234 rrd.mask1 = LRH_BTH_MASK;
14235 rrd.value1 = LRH_BTH_VALUE;
14236 rrd.mask2 = LRH_SC_MASK;
14237 rrd.value2 = LRH_SC_VALUE;
14240 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14242 /* mark RSM map entries as used */
14243 rmt->used += rmt_entries;
14244 /* map everything else to the mcast/err/vl15 context */
14245 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14246 dd->qos_shift = n + 1;
14250 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14253 static void init_user_fecn_handling(struct hfi1_devdata *dd,
14254 struct rsm_map_table *rmt)
14256 struct rsm_rule_data rrd;
14258 int i, idx, regoff, regidx;
14261 /* there needs to be enough room in the map table */
14262 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14263 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14268 * RSM will extract the destination context as an index into the
14269 * map table. The destination contexts are a sequential block
14270 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14271 * Map entries are accessed as offset + extracted value. Adjust
14272 * the added offset so this sequence can be placed anywhere in
14273 * the table - as long as the entries themselves do not wrap.
14274 * There are only enough bits in offset for the table size, so
14275 * start with that to allow for a "negative" offset.
14277 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14278 (int)dd->first_dyn_alloc_ctxt);
14280 for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14281 i < dd->num_rcv_contexts; i++, idx++) {
14282 /* replace with identity mapping */
14283 regoff = (idx % 8) * 8;
14285 reg = rmt->map[regidx];
14286 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14287 reg |= (u64)i << regoff;
14288 rmt->map[regidx] = reg;
14292 * For RSM intercept of Expected FECN packets:
14293 * o packet type 0 - expected
14294 * o match on F (bit 95), using select/match 1, and
14295 * o match on SH (bit 133), using select/match 2.
14297 * Use index 1 to extract the 8-bit receive context from DestQP
14298 * (start at bit 64). Use that as the RSM map table index.
14300 rrd.offset = offset;
14302 rrd.field1_off = 95;
14303 rrd.field2_off = 133;
14304 rrd.index1_off = 64;
14305 rrd.index1_width = 8;
14306 rrd.index2_off = 0;
14307 rrd.index2_width = 0;
14314 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14316 rmt->used += dd->num_user_contexts;
14319 /* Initialize RSM for VNIC */
14320 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14326 struct rsm_rule_data rrd;
14328 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14329 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14330 dd->vnic.rmt_start);
14334 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14335 dd->vnic.rmt_start,
14336 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14338 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14339 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14340 reg = read_csr(dd, regoff);
14341 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14342 /* Update map register with vnic context */
14343 j = (dd->vnic.rmt_start + i) % 8;
14344 reg &= ~(0xffllu << (j * 8));
14345 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14346 /* Wrap up vnic ctx index */
14347 ctx_id %= dd->vnic.num_ctxt;
14348 /* Write back map register */
14349 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14350 dev_dbg(&(dd)->pcidev->dev,
14351 "Vnic rsm map reg[%d] =0x%llx\n",
14352 regoff - RCV_RSM_MAP_TABLE, reg);
14354 write_csr(dd, regoff, reg);
14356 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14357 reg = read_csr(dd, regoff);
14361 /* Add rule for vnic */
14362 rrd.offset = dd->vnic.rmt_start;
14364 /* Match 16B packets */
14365 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14366 rrd.mask1 = L2_TYPE_MASK;
14367 rrd.value1 = L2_16B_VALUE;
14368 /* Match ETH L4 packets */
14369 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14370 rrd.mask2 = L4_16B_TYPE_MASK;
14371 rrd.value2 = L4_16B_ETH_VALUE;
14372 /* Calc context from veswid and entropy */
14373 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14374 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14375 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14376 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14377 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14379 /* Enable RSM if not already enabled */
14380 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14383 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14385 clear_rsm_rule(dd, RSM_INS_VNIC);
14387 /* Disable RSM if used only by vnic */
14388 if (dd->vnic.rmt_start == 0)
14389 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14392 static void init_rxe(struct hfi1_devdata *dd)
14394 struct rsm_map_table *rmt;
14396 /* enable all receive errors */
14397 write_csr(dd, RCV_ERR_MASK, ~0ull);
14399 rmt = alloc_rsm_map_table(dd);
14400 /* set up QOS, including the QPN map table */
14402 init_user_fecn_handling(dd, rmt);
14403 complete_rsm_map_table(dd, rmt);
14404 /* record number of used rsm map entries for vnic */
14405 dd->vnic.rmt_start = rmt->used;
14409 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14410 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14411 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14412 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14413 * Max_PayLoad_Size set to its minimum of 128.
14415 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14416 * (64 bytes). Max_Payload_Size is possibly modified upward in
14417 * tune_pcie_caps() which is called after this routine.
14421 static void init_other(struct hfi1_devdata *dd)
14423 /* enable all CCE errors */
14424 write_csr(dd, CCE_ERR_MASK, ~0ull);
14425 /* enable *some* Misc errors */
14426 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14427 /* enable all DC errors, except LCB */
14428 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14429 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14433 * Fill out the given AU table using the given CU. A CU is defined in terms
14434 * AUs. The table is a an encoding: given the index, how many AUs does that
14437 * NOTE: Assumes that the register layout is the same for the
14438 * local and remote tables.
14440 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14441 u32 csr0to3, u32 csr4to7)
14443 write_csr(dd, csr0to3,
14444 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14445 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14447 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14449 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14450 write_csr(dd, csr4to7,
14452 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14454 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14456 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14458 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14461 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14463 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14464 SEND_CM_LOCAL_AU_TABLE4_TO7);
14467 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14469 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14470 SEND_CM_REMOTE_AU_TABLE4_TO7);
14473 static void init_txe(struct hfi1_devdata *dd)
14477 /* enable all PIO, SDMA, general, and Egress errors */
14478 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14479 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14480 write_csr(dd, SEND_ERR_MASK, ~0ull);
14481 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14483 /* enable all per-context and per-SDMA engine errors */
14484 for (i = 0; i < dd->chip_send_contexts; i++)
14485 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14486 for (i = 0; i < dd->chip_sdma_engines; i++)
14487 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14489 /* set the local CU to AU mapping */
14490 assign_local_cm_au_table(dd, dd->vcu);
14493 * Set reasonable default for Credit Return Timer
14494 * Don't set on Simulator - causes it to choke.
14496 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14497 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14500 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
14502 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14507 if (!rcd || !rcd->sc) {
14511 sctxt = rcd->sc->hw_context;
14512 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14513 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14514 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14515 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14516 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14517 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14518 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14520 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14523 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14524 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14525 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14528 /* Enable J_KEY check on receive context. */
14529 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14530 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14531 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14532 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14537 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14539 struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14544 if (!rcd || !rcd->sc) {
14548 sctxt = rcd->sc->hw_context;
14549 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14551 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14552 * This check would not have been enabled for A0 h/w, see
14556 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14557 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14558 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14560 /* Turn off the J_KEY on the receive side */
14561 write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14566 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14568 struct hfi1_ctxtdata *rcd;
14573 if (ctxt < dd->num_rcv_contexts) {
14574 rcd = dd->rcd[ctxt];
14579 if (!rcd || !rcd->sc) {
14583 sctxt = rcd->sc->hw_context;
14584 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14585 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14586 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14587 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14588 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14589 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14590 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14595 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14600 if (!ctxt || !ctxt->sc)
14603 if (ctxt->ctxt >= dd->num_rcv_contexts)
14606 hw_ctxt = ctxt->sc->hw_context;
14607 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14608 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14609 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14610 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14616 * Start doing the clean up the the chip. Our clean up happens in multiple
14617 * stages and this is just the first.
14619 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14624 clean_up_interrupts(dd);
14625 finish_chip_resources(dd);
14628 #define HFI_BASE_GUID(dev) \
14629 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14632 * Information can be shared between the two HFIs on the same ASIC
14633 * in the same OS. This function finds the peer device and sets
14634 * up a shared structure.
14636 static int init_asic_data(struct hfi1_devdata *dd)
14638 unsigned long flags;
14639 struct hfi1_devdata *tmp, *peer = NULL;
14640 struct hfi1_asic_data *asic_data;
14643 /* pre-allocate the asic structure in case we are the first device */
14644 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14648 spin_lock_irqsave(&hfi1_devs_lock, flags);
14649 /* Find our peer device */
14650 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14651 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14652 dd->unit != tmp->unit) {
14659 /* use already allocated structure */
14660 dd->asic_data = peer->asic_data;
14663 dd->asic_data = asic_data;
14664 mutex_init(&dd->asic_data->asic_resource_mutex);
14666 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14667 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14669 /* first one through - set up i2c devices */
14671 ret = set_up_i2c(dd, dd->asic_data);
14677 * Set dd->boardname. Use a generic name if a name is not returned from
14678 * EFI variable space.
14680 * Return 0 on success, -ENOMEM if space could not be allocated.
14682 static int obtain_boardname(struct hfi1_devdata *dd)
14684 /* generic board description */
14685 const char generic[] =
14686 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14687 unsigned long size;
14690 ret = read_hfi1_efi_var(dd, "description", &size,
14691 (void **)&dd->boardname);
14693 dd_dev_info(dd, "Board description not found\n");
14694 /* use generic description */
14695 dd->boardname = kstrdup(generic, GFP_KERNEL);
14696 if (!dd->boardname)
14703 * Check the interrupt registers to make sure that they are mapped correctly.
14704 * It is intended to help user identify any mismapping by VMM when the driver
14705 * is running in a VM. This function should only be called before interrupt
14706 * is set up properly.
14708 * Return 0 on success, -EINVAL on failure.
14710 static int check_int_registers(struct hfi1_devdata *dd)
14713 u64 all_bits = ~(u64)0;
14716 /* Clear CceIntMask[0] to avoid raising any interrupts */
14717 mask = read_csr(dd, CCE_INT_MASK);
14718 write_csr(dd, CCE_INT_MASK, 0ull);
14719 reg = read_csr(dd, CCE_INT_MASK);
14723 /* Clear all interrupt status bits */
14724 write_csr(dd, CCE_INT_CLEAR, all_bits);
14725 reg = read_csr(dd, CCE_INT_STATUS);
14729 /* Set all interrupt status bits */
14730 write_csr(dd, CCE_INT_FORCE, all_bits);
14731 reg = read_csr(dd, CCE_INT_STATUS);
14732 if (reg != all_bits)
14735 /* Restore the interrupt mask */
14736 write_csr(dd, CCE_INT_CLEAR, all_bits);
14737 write_csr(dd, CCE_INT_MASK, mask);
14741 write_csr(dd, CCE_INT_MASK, mask);
14742 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14747 * Allocate and initialize the device structure for the hfi.
14748 * @dev: the pci_dev for hfi1_ib device
14749 * @ent: pci_device_id struct for this dev
14751 * Also allocates, initializes, and returns the devdata struct for this
14754 * This is global, and is called directly at init to set up the
14755 * chip-specific function pointers for later use.
14757 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14758 const struct pci_device_id *ent)
14760 struct hfi1_devdata *dd;
14761 struct hfi1_pportdata *ppd;
14764 static const char * const inames[] = { /* implementation names */
14766 "RTL VCS simulation",
14767 "RTL FPGA emulation",
14768 "Functional simulator"
14770 struct pci_dev *parent = pdev->bus->self;
14772 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14773 sizeof(struct hfi1_pportdata));
14777 for (i = 0; i < dd->num_pports; i++, ppd++) {
14779 /* init common fields */
14780 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14781 /* DC supports 4 link widths */
14782 ppd->link_width_supported =
14783 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14784 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14785 ppd->link_width_downgrade_supported =
14786 ppd->link_width_supported;
14787 /* start out enabling only 4X */
14788 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14789 ppd->link_width_downgrade_enabled =
14790 ppd->link_width_downgrade_supported;
14791 /* link width active is 0 when link is down */
14792 /* link width downgrade active is 0 when link is down */
14794 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14795 num_vls > HFI1_MAX_VLS_SUPPORTED) {
14796 hfi1_early_err(&pdev->dev,
14797 "Invalid num_vls %u, using %u VLs\n",
14798 num_vls, HFI1_MAX_VLS_SUPPORTED);
14799 num_vls = HFI1_MAX_VLS_SUPPORTED;
14801 ppd->vls_supported = num_vls;
14802 ppd->vls_operational = ppd->vls_supported;
14803 ppd->actual_vls_operational = ppd->vls_supported;
14804 /* Set the default MTU. */
14805 for (vl = 0; vl < num_vls; vl++)
14806 dd->vld[vl].mtu = hfi1_max_mtu;
14807 dd->vld[15].mtu = MAX_MAD_PACKET;
14809 * Set the initial values to reasonable default, will be set
14810 * for real when link is up.
14812 ppd->lstate = IB_PORT_DOWN;
14813 ppd->overrun_threshold = 0x4;
14814 ppd->phy_error_threshold = 0xf;
14815 ppd->port_crc_mode_enabled = link_crc_mask;
14816 /* initialize supported LTP CRC mode */
14817 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14818 /* initialize enabled LTP CRC mode */
14819 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14820 /* start in offline */
14821 ppd->host_link_state = HLS_DN_OFFLINE;
14822 init_vl_arb_caches(ppd);
14823 ppd->pstate = PLS_OFFLINE;
14826 dd->link_default = HLS_DN_POLL;
14829 * Do remaining PCIe setup and save PCIe values in dd.
14830 * Any error printing is already done by the init code.
14831 * On return, we have the chip mapped.
14833 ret = hfi1_pcie_ddinit(dd, pdev);
14837 /* verify that reads actually work, save revision for reset check */
14838 dd->revision = read_csr(dd, CCE_REVISION);
14839 if (dd->revision == ~(u64)0) {
14840 dd_dev_err(dd, "cannot read chip CSRs\n");
14844 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14845 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14846 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14847 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14850 * Check interrupt registers mapping if the driver has no access to
14851 * the upstream component. In this case, it is likely that the driver
14852 * is running in a VM.
14855 ret = check_int_registers(dd);
14861 * obtain the hardware ID - NOT related to unit, which is a
14862 * software enumeration
14864 reg = read_csr(dd, CCE_REVISION2);
14865 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14866 & CCE_REVISION2_HFI_ID_MASK;
14867 /* the variable size will remove unwanted bits */
14868 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14869 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14870 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14871 dd->icode < ARRAY_SIZE(inames) ?
14872 inames[dd->icode] : "unknown", (int)dd->irev);
14874 /* speeds the hardware can support */
14875 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14876 /* speeds allowed to run at */
14877 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14878 /* give a reasonable active value, will be set on link up */
14879 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14881 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14882 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14883 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14884 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14885 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14886 /* fix up link widths for emulation _p */
14888 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14889 ppd->link_width_supported =
14890 ppd->link_width_enabled =
14891 ppd->link_width_downgrade_supported =
14892 ppd->link_width_downgrade_enabled =
14895 /* insure num_vls isn't larger than number of sdma engines */
14896 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14897 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14898 num_vls, dd->chip_sdma_engines);
14899 num_vls = dd->chip_sdma_engines;
14900 ppd->vls_supported = dd->chip_sdma_engines;
14901 ppd->vls_operational = ppd->vls_supported;
14905 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14906 * Limit the max if larger than the field holds. If timeout is
14907 * non-zero, then the calculated field will be at least 1.
14909 * Must be after icode is set up - the cclock rate depends
14910 * on knowing the hardware being used.
14912 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14913 if (dd->rcv_intr_timeout_csr >
14914 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14915 dd->rcv_intr_timeout_csr =
14916 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14917 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14918 dd->rcv_intr_timeout_csr = 1;
14920 /* needs to be done before we look for the peer device */
14923 /* set up shared ASIC data with peer device */
14924 ret = init_asic_data(dd);
14928 /* obtain chip sizes, reset chip CSRs */
14931 /* read in the PCIe link speed information */
14932 ret = pcie_speeds(dd);
14936 /* call before get_platform_config(), after init_chip_resources() */
14937 ret = eprom_init(dd);
14939 goto bail_free_rcverr;
14941 /* Needs to be called before hfi1_firmware_init */
14942 get_platform_config(dd);
14944 /* read in firmware */
14945 ret = hfi1_firmware_init(dd);
14950 * In general, the PCIe Gen3 transition must occur after the
14951 * chip has been idled (so it won't initiate any PCIe transactions
14952 * e.g. an interrupt) and before the driver changes any registers
14953 * (the transition will reset the registers).
14955 * In particular, place this call after:
14956 * - init_chip() - the chip will not initiate any PCIe transactions
14957 * - pcie_speeds() - reads the current link speed
14958 * - hfi1_firmware_init() - the needed firmware is ready to be
14961 ret = do_pcie_gen3_transition(dd);
14965 /* start setting dd values and adjusting CSRs */
14966 init_early_variables(dd);
14968 parse_platform_config(dd);
14970 ret = obtain_boardname(dd);
14974 snprintf(dd->boardversion, BOARD_VERS_MAX,
14975 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14976 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14979 (dd->revision >> CCE_REVISION_SW_SHIFT)
14980 & CCE_REVISION_SW_MASK);
14982 ret = set_up_context_variables(dd);
14986 /* set initial RXE CSRs */
14988 /* set initial TXE CSRs */
14990 /* set initial non-RXE, non-TXE CSRs */
14992 /* set up KDETH QP prefix in both RX and TX CSRs */
14995 ret = hfi1_dev_affinity_init(dd);
14999 /* send contexts must be set up before receive contexts */
15000 ret = init_send_contexts(dd);
15004 ret = hfi1_create_ctxts(dd);
15008 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
15010 * rcd[0] is guaranteed to be valid by this point. Also, all
15011 * context are using the same value, as per the module parameter.
15013 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
15015 ret = init_pervl_scs(dd);
15020 for (i = 0; i < dd->num_pports; ++i) {
15021 ret = sdma_init(dd, i);
15026 /* use contexts created by hfi1_create_ctxts */
15027 ret = set_up_interrupts(dd);
15031 /* set up LCB access - must be after set_up_interrupts() */
15032 init_lcb_access(dd);
15035 * Serial number is created from the base guid:
15036 * [27:24] = base guid [38:35]
15037 * [23: 0] = base guid [23: 0]
15039 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15040 (dd->base_guid & 0xFFFFFF) |
15041 ((dd->base_guid >> 11) & 0xF000000));
15043 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15044 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15045 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15047 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15049 goto bail_clear_intr;
15053 ret = init_cntrs(dd);
15055 goto bail_clear_intr;
15057 ret = init_rcverr(dd);
15059 goto bail_free_cntrs;
15061 init_completion(&dd->user_comp);
15063 /* The user refcount starts with one to inidicate an active device */
15064 atomic_set(&dd->user_refcount, 1);
15073 clean_up_interrupts(dd);
15075 hfi1_pcie_ddcleanup(dd);
15077 hfi1_free_devdata(dd);
15083 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15087 u32 current_egress_rate = ppd->current_egress_rate;
15088 /* rates here are in units of 10^6 bits/sec */
15090 if (desired_egress_rate == -1)
15091 return 0; /* shouldn't happen */
15093 if (desired_egress_rate >= current_egress_rate)
15094 return 0; /* we can't help go faster, only slower */
15096 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15097 egress_cycles(dw_len * 4, current_egress_rate);
15099 return (u16)delta_cycles;
15103 * create_pbc - build a pbc for transmission
15104 * @flags: special case flags or-ed in built pbc
15105 * @srate: static rate
15107 * @dwlen: dword length (header words + data words + pbc words)
15109 * Create a PBC with the given flags, rate, VL, and length.
15111 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15112 * for verbs, which does not use this PSM feature. The lone other caller
15113 * is for the diagnostic interface which calls this if the user does not
15114 * supply their own PBC.
15116 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15119 u64 pbc, delay = 0;
15121 if (unlikely(srate_mbs))
15122 delay = delay_cycles(ppd, srate_mbs, dw_len);
15125 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15126 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15127 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15128 | (dw_len & PBC_LENGTH_DWS_MASK)
15129 << PBC_LENGTH_DWS_SHIFT;
15134 #define SBUS_THERMAL 0x4f
15135 #define SBUS_THERM_MONITOR_MODE 0x1
15137 #define THERM_FAILURE(dev, ret, reason) \
15139 "Thermal sensor initialization failed: %s (%d)\n", \
15143 * Initialize the thermal sensor.
15145 * After initialization, enable polling of thermal sensor through
15146 * SBus interface. In order for this to work, the SBus Master
15147 * firmware has to be loaded due to the fact that the HW polling
15148 * logic uses SBus interrupts, which are not supported with
15149 * default firmware. Otherwise, no data will be returned through
15150 * the ASIC_STS_THERM CSR.
15152 static int thermal_init(struct hfi1_devdata *dd)
15156 if (dd->icode != ICODE_RTL_SILICON ||
15157 check_chip_resource(dd, CR_THERM_INIT, NULL))
15160 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15162 THERM_FAILURE(dd, ret, "Acquire SBus");
15166 dd_dev_info(dd, "Initializing thermal sensor\n");
15167 /* Disable polling of thermal readings */
15168 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15170 /* Thermal Sensor Initialization */
15171 /* Step 1: Reset the Thermal SBus Receiver */
15172 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15173 RESET_SBUS_RECEIVER, 0);
15175 THERM_FAILURE(dd, ret, "Bus Reset");
15178 /* Step 2: Set Reset bit in Thermal block */
15179 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15180 WRITE_SBUS_RECEIVER, 0x1);
15182 THERM_FAILURE(dd, ret, "Therm Block Reset");
15185 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15186 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15187 WRITE_SBUS_RECEIVER, 0x32);
15189 THERM_FAILURE(dd, ret, "Write Clock Div");
15192 /* Step 4: Select temperature mode */
15193 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15194 WRITE_SBUS_RECEIVER,
15195 SBUS_THERM_MONITOR_MODE);
15197 THERM_FAILURE(dd, ret, "Write Mode Sel");
15200 /* Step 5: De-assert block reset and start conversion */
15201 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15202 WRITE_SBUS_RECEIVER, 0x2);
15204 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15207 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15210 /* Enable polling of thermal readings */
15211 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15213 /* Set initialized flag */
15214 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15216 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15219 release_chip_resource(dd, CR_SBUS);
15223 static void handle_temp_err(struct hfi1_devdata *dd)
15225 struct hfi1_pportdata *ppd = &dd->pport[0];
15227 * Thermal Critical Interrupt
15228 * Put the device into forced freeze mode, take link down to
15229 * offline, and put DC into reset.
15232 "Critical temperature reached! Forcing device into freeze mode!\n");
15233 dd->flags |= HFI1_FORCED_FREEZE;
15234 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15236 * Shut DC down as much and as quickly as possible.
15238 * Step 1: Take the link down to OFFLINE. This will cause the
15239 * 8051 to put the Serdes in reset. However, we don't want to
15240 * go through the entire link state machine since we want to
15241 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15242 * but rather an attempt to save the chip.
15243 * Code below is almost the same as quiet_serdes() but avoids
15244 * all the extra work and the sleeps.
15246 ppd->driver_link_ready = 0;
15247 ppd->link_enabled = 0;
15248 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15251 * Step 2: Shutdown LCB and 8051
15252 * After shutdown, do not restore DC_CFG_RESET value.