Merge tag 's390-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-2.6-microblaze.git] / drivers / infiniband / hw / hfi1 / chip.c
1 /*
2  * Copyright(c) 2015 - 2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66 #include "affinity.h"
67 #include "debugfs.h"
68 #include "fault.h"
69
70 uint kdeth_qp;
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78 /*
79  * Default time to aggregate two 10K packets from the idle state
80  * (timer not running). The timer starts at the end of the first packet,
81  * so only the time for one 10K packet and header plus a bit extra is needed.
82  * 10 * 1024 + 64 header byte = 10304 byte
83  * 10304 byte / 12.5 GB/s = 824.32ns
84  */
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97 uint loopback;
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
106
107 struct flag_table {
108         u64 flag;       /* the flag */
109         char *str;      /* description string */
110         u16 extra;      /* extra information */
111         u16 unused0;
112         u32 unused1;
113 };
114
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED       0x1
121 #define SEC_PACKET_DROPPED      0x2
122 #define SEC_SC_HALTED           0x4     /* per-context only */
123 #define SEC_SPC_FREEZE          0x8     /* per-HFI only */
124
125 #define DEFAULT_KRCVQS            2
126 #define MIN_KERNEL_KCTXTS         2
127 #define FIRST_KERNEL_KCTXT        1
128
129 /*
130  * RSM instance allocation
131  *   0 - Verbs
132  *   1 - User Fecn Handling
133  *   2 - Vnic
134  */
135 #define RSM_INS_VERBS             0
136 #define RSM_INS_FECN              1
137 #define RSM_INS_VNIC              2
138
139 /* Bit offset into the GUID which carries HFI id information */
140 #define GUID_HFI_INDEX_SHIFT     39
141
142 /* extract the emulation revision */
143 #define emulator_rev(dd) ((dd)->irev >> 8)
144 /* parallel and serial emulation versions are 3 and 4 respectively */
145 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
146 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
147
148 /* RSM fields for Verbs */
149 /* packet type */
150 #define IB_PACKET_TYPE         2ull
151 #define QW_SHIFT               6ull
152 /* QPN[7..1] */
153 #define QPN_WIDTH              7ull
154
155 /* LRH.BTH: QW 0, OFFSET 48 - for match */
156 #define LRH_BTH_QW             0ull
157 #define LRH_BTH_BIT_OFFSET     48ull
158 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
159 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
160 #define LRH_BTH_SELECT
161 #define LRH_BTH_MASK           3ull
162 #define LRH_BTH_VALUE          2ull
163
164 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
165 #define LRH_SC_QW              0ull
166 #define LRH_SC_BIT_OFFSET      56ull
167 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
168 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
169 #define LRH_SC_MASK            128ull
170 #define LRH_SC_VALUE           0ull
171
172 /* SC[n..0] QW 0, OFFSET 60 - for select */
173 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
174
175 /* QPN[m+n:1] QW 1, OFFSET 1 */
176 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
177
178 /* RSM fields for Vnic */
179 /* L2_TYPE: QW 0, OFFSET 61 - for match */
180 #define L2_TYPE_QW             0ull
181 #define L2_TYPE_BIT_OFFSET     61ull
182 #define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
183 #define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
184 #define L2_TYPE_MASK           3ull
185 #define L2_16B_VALUE           2ull
186
187 /* L4_TYPE QW 1, OFFSET 0 - for match */
188 #define L4_TYPE_QW              1ull
189 #define L4_TYPE_BIT_OFFSET      0ull
190 #define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
191 #define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
192 #define L4_16B_TYPE_MASK        0xFFull
193 #define L4_16B_ETH_VALUE        0x78ull
194
195 /* 16B VESWID - for select */
196 #define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
197 /* 16B ENTROPY - for select */
198 #define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
199
200 /* defines to build power on SC2VL table */
201 #define SC2VL_VAL( \
202         num, \
203         sc0, sc0val, \
204         sc1, sc1val, \
205         sc2, sc2val, \
206         sc3, sc3val, \
207         sc4, sc4val, \
208         sc5, sc5val, \
209         sc6, sc6val, \
210         sc7, sc7val) \
211 ( \
212         ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
213         ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
214         ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
215         ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
216         ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
217         ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
218         ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
219         ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
220 )
221
222 #define DC_SC_VL_VAL( \
223         range, \
224         e0, e0val, \
225         e1, e1val, \
226         e2, e2val, \
227         e3, e3val, \
228         e4, e4val, \
229         e5, e5val, \
230         e6, e6val, \
231         e7, e7val, \
232         e8, e8val, \
233         e9, e9val, \
234         e10, e10val, \
235         e11, e11val, \
236         e12, e12val, \
237         e13, e13val, \
238         e14, e14val, \
239         e15, e15val) \
240 ( \
241         ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
242         ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
243         ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
244         ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
245         ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
246         ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
247         ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
248         ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
249         ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
250         ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
251         ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
252         ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
253         ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
254         ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
255         ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
256         ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
257 )
258
259 /* all CceStatus sub-block freeze bits */
260 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
261                         | CCE_STATUS_RXE_FROZE_SMASK \
262                         | CCE_STATUS_TXE_FROZE_SMASK \
263                         | CCE_STATUS_TXE_PIO_FROZE_SMASK)
264 /* all CceStatus sub-block TXE pause bits */
265 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
266                         | CCE_STATUS_TXE_PAUSED_SMASK \
267                         | CCE_STATUS_SDMA_PAUSED_SMASK)
268 /* all CceStatus sub-block RXE pause bits */
269 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
270
271 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
272 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
273
274 /*
275  * CCE Error flags.
276  */
277 static struct flag_table cce_err_status_flags[] = {
278 /* 0*/  FLAG_ENTRY0("CceCsrParityErr",
279                 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
280 /* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
281                 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
282 /* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
283                 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
284 /* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
285                 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
286 /* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
287                 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
288 /* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
289                 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
290 /* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
291                 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
292 /* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
293                 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
294 /* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
295                 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
296 /* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
297             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
298 /*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
299             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
300 /*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
301             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
302 /*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
303                 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
304 /*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
305                 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
306 /*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
307                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
308 /*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
309                 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
310 /*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
311                 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
312 /*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
313                 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
314 /*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
315                 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
316 /*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
317                 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
318 /*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
319                 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
320 /*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
321                 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
322 /*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
323                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
324 /*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
325                 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
326 /*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
327                 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
328 /*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
329                 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
330 /*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
331                 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
332 /*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
333                 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
334 /*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
335                 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
336 /*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
337                 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
338 /*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
339                 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
340 /*31*/  FLAG_ENTRY0("LATriggered",
341                 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
342 /*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
343                 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
344 /*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
345                 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
346 /*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
347                 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
348 /*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
349                 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
350 /*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
351                 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
352 /*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
353                 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
354 /*38*/  FLAG_ENTRY0("CceIntMapCorErr",
355                 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
356 /*39*/  FLAG_ENTRY0("CceIntMapUncErr",
357                 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
358 /*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
359                 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
360 /*41-63 reserved*/
361 };
362
363 /*
364  * Misc Error flags
365  */
366 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
367 static struct flag_table misc_err_status_flags[] = {
368 /* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
369 /* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
370 /* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
371 /* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
372 /* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
373 /* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
374 /* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
375 /* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
376 /* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
377 /* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
378 /*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
379 /*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
380 /*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
381 };
382
383 /*
384  * TXE PIO Error flags and consequences
385  */
386 static struct flag_table pio_err_status_flags[] = {
387 /* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
388         SEC_WRITE_DROPPED,
389         SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
390 /* 1*/  FLAG_ENTRY("PioWriteAddrParity",
391         SEC_SPC_FREEZE,
392         SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
393 /* 2*/  FLAG_ENTRY("PioCsrParity",
394         SEC_SPC_FREEZE,
395         SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
396 /* 3*/  FLAG_ENTRY("PioSbMemFifo0",
397         SEC_SPC_FREEZE,
398         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
399 /* 4*/  FLAG_ENTRY("PioSbMemFifo1",
400         SEC_SPC_FREEZE,
401         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
402 /* 5*/  FLAG_ENTRY("PioPccFifoParity",
403         SEC_SPC_FREEZE,
404         SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
405 /* 6*/  FLAG_ENTRY("PioPecFifoParity",
406         SEC_SPC_FREEZE,
407         SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
408 /* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
409         SEC_SPC_FREEZE,
410         SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
411 /* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
412         SEC_SPC_FREEZE,
413         SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
414 /* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
415         SEC_SPC_FREEZE,
416         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
417 /*10*/  FLAG_ENTRY("PioSmPktResetParity",
418         SEC_SPC_FREEZE,
419         SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
420 /*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
421         SEC_SPC_FREEZE,
422         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
423 /*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
424         SEC_SPC_FREEZE,
425         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
426 /*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
427         0,
428         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
429 /*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
430         0,
431         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
432 /*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
433         SEC_SPC_FREEZE,
434         SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
435 /*16*/  FLAG_ENTRY("PioPpmcPblFifo",
436         SEC_SPC_FREEZE,
437         SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
438 /*17*/  FLAG_ENTRY("PioInitSmIn",
439         0,
440         SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
441 /*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
442         SEC_SPC_FREEZE,
443         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
444 /*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
445         SEC_SPC_FREEZE,
446         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
447 /*20*/  FLAG_ENTRY("PioHostAddrMemCor",
448         0,
449         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
450 /*21*/  FLAG_ENTRY("PioWriteDataParity",
451         SEC_SPC_FREEZE,
452         SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
453 /*22*/  FLAG_ENTRY("PioStateMachine",
454         SEC_SPC_FREEZE,
455         SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
456 /*23*/  FLAG_ENTRY("PioWriteQwValidParity",
457         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
458         SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
459 /*24*/  FLAG_ENTRY("PioBlockQwCountParity",
460         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
461         SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
462 /*25*/  FLAG_ENTRY("PioVlfVlLenParity",
463         SEC_SPC_FREEZE,
464         SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
465 /*26*/  FLAG_ENTRY("PioVlfSopParity",
466         SEC_SPC_FREEZE,
467         SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
468 /*27*/  FLAG_ENTRY("PioVlFifoParity",
469         SEC_SPC_FREEZE,
470         SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
471 /*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
472         SEC_SPC_FREEZE,
473         SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
474 /*29*/  FLAG_ENTRY("PioPpmcSopLen",
475         SEC_SPC_FREEZE,
476         SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
477 /*30-31 reserved*/
478 /*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
479         SEC_SPC_FREEZE,
480         SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
481 /*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
482         SEC_SPC_FREEZE,
483         SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
484 /*34*/  FLAG_ENTRY("PioPccSopHeadParity",
485         SEC_SPC_FREEZE,
486         SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
487 /*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
488         SEC_SPC_FREEZE,
489         SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
490 /*36-63 reserved*/
491 };
492
493 /* TXE PIO errors that cause an SPC freeze */
494 #define ALL_PIO_FREEZE_ERR \
495         (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
496         | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
497         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
498         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
499         | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
500         | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
501         | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
502         | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
503         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
504         | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
505         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
506         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
507         | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
508         | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
509         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
510         | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
511         | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
512         | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
513         | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
514         | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
515         | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
516         | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
517         | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
518         | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
519         | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
520         | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
521         | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
522         | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
523         | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
524
525 /*
526  * TXE SDMA Error flags
527  */
528 static struct flag_table sdma_err_status_flags[] = {
529 /* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
530                 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
531 /* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
532                 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
533 /* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
534                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
535 /* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
536                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
537 /*04-63 reserved*/
538 };
539
540 /* TXE SDMA errors that cause an SPC freeze */
541 #define ALL_SDMA_FREEZE_ERR  \
542                 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
543                 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
544                 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
545
546 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
547 #define PORT_DISCARD_EGRESS_ERRS \
548         (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
549         | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
550         | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
551
552 /*
553  * TXE Egress Error flags
554  */
555 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
556 static struct flag_table egress_err_status_flags[] = {
557 /* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
558 /* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
559 /* 2 reserved */
560 /* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
561                 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
562 /* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
563 /* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
564 /* 6 reserved */
565 /* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
566                 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
567 /* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
568                 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
569 /* 9-10 reserved */
570 /*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
571                 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
572 /*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
573 /*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
574 /*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
575 /*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
576 /*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
577                 SEES(TX_SDMA0_DISALLOWED_PACKET)),
578 /*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
579                 SEES(TX_SDMA1_DISALLOWED_PACKET)),
580 /*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
581                 SEES(TX_SDMA2_DISALLOWED_PACKET)),
582 /*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
583                 SEES(TX_SDMA3_DISALLOWED_PACKET)),
584 /*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
585                 SEES(TX_SDMA4_DISALLOWED_PACKET)),
586 /*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
587                 SEES(TX_SDMA5_DISALLOWED_PACKET)),
588 /*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
589                 SEES(TX_SDMA6_DISALLOWED_PACKET)),
590 /*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
591                 SEES(TX_SDMA7_DISALLOWED_PACKET)),
592 /*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
593                 SEES(TX_SDMA8_DISALLOWED_PACKET)),
594 /*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
595                 SEES(TX_SDMA9_DISALLOWED_PACKET)),
596 /*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
597                 SEES(TX_SDMA10_DISALLOWED_PACKET)),
598 /*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
599                 SEES(TX_SDMA11_DISALLOWED_PACKET)),
600 /*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
601                 SEES(TX_SDMA12_DISALLOWED_PACKET)),
602 /*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
603                 SEES(TX_SDMA13_DISALLOWED_PACKET)),
604 /*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
605                 SEES(TX_SDMA14_DISALLOWED_PACKET)),
606 /*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
607                 SEES(TX_SDMA15_DISALLOWED_PACKET)),
608 /*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
609                 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
610 /*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
611                 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
612 /*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
613                 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
614 /*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
615                 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
616 /*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
617                 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
618 /*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
619                 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
620 /*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
621                 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
622 /*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
623                 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
624 /*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
625                 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
626 /*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
627 /*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
628 /*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
629 /*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
630 /*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
631 /*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
632 /*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
633 /*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
634 /*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
635 /*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
636 /*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
637 /*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
638 /*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
639 /*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
640 /*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
641 /*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
642 /*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
643 /*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
644 /*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
645 /*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
646 /*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
647 /*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
648                 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
649 /*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
650                 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
651 };
652
653 /*
654  * TXE Egress Error Info flags
655  */
656 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
657 static struct flag_table egress_err_info_flags[] = {
658 /* 0*/  FLAG_ENTRY0("Reserved", 0ull),
659 /* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
660 /* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
661 /* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 /* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
663 /* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
664 /* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
665 /* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
666 /* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
667 /* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
668 /*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
669 /*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
670 /*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
671 /*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
672 /*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
673 /*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
674 /*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
675 /*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
676 /*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
677 /*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
678 /*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
679 /*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
680 };
681
682 /* TXE Egress errors that cause an SPC freeze */
683 #define ALL_TXE_EGRESS_FREEZE_ERR \
684         (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
685         | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
686         | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
687         | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
688         | SEES(TX_LAUNCH_CSR_PARITY) \
689         | SEES(TX_SBRD_CTL_CSR_PARITY) \
690         | SEES(TX_CONFIG_PARITY) \
691         | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
692         | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
693         | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
694         | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
695         | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
696         | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
697         | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
698         | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
699         | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
700         | SEES(TX_CREDIT_RETURN_PARITY))
701
702 /*
703  * TXE Send error flags
704  */
705 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
706 static struct flag_table send_err_status_flags[] = {
707 /* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
708 /* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
709 /* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
710 };
711
712 /*
713  * TXE Send Context Error flags and consequences
714  */
715 static struct flag_table sc_err_status_flags[] = {
716 /* 0*/  FLAG_ENTRY("InconsistentSop",
717                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
718                 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
719 /* 1*/  FLAG_ENTRY("DisallowedPacket",
720                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
721                 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
722 /* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
723                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
724                 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
725 /* 3*/  FLAG_ENTRY("WriteOverflow",
726                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
727                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
728 /* 4*/  FLAG_ENTRY("WriteOutOfBounds",
729                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
730                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
731 /* 5-63 reserved*/
732 };
733
734 /*
735  * RXE Receive Error flags
736  */
737 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
738 static struct flag_table rxe_err_status_flags[] = {
739 /* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
740 /* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
741 /* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
742 /* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
743 /* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
744 /* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
745 /* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
746 /* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
747 /* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
748 /* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
749 /*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
750 /*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
751 /*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
752 /*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
753 /*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
754 /*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
755 /*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
756                 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
757 /*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
758 /*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
759 /*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
760                 RXES(RBUF_BLOCK_LIST_READ_UNC)),
761 /*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
762                 RXES(RBUF_BLOCK_LIST_READ_COR)),
763 /*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
764                 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
765 /*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
766                 RXES(RBUF_CSR_QENT_CNT_PARITY)),
767 /*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
768                 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
769 /*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
770                 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
771 /*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
772 /*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
773 /*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
774                 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
775 /*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
776 /*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
777 /*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
778 /*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
779 /*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
780 /*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
781 /*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
782 /*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
783                 RXES(RBUF_FL_INITDONE_PARITY)),
784 /*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
785                 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
786 /*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
787 /*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
788 /*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
789 /*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
790                 RXES(LOOKUP_DES_PART1_UNC_COR)),
791 /*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
792                 RXES(LOOKUP_DES_PART2_PARITY)),
793 /*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
794 /*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
795 /*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
796 /*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
797 /*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
798 /*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
799 /*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
800 /*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
801 /*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
802 /*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
803 /*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
804 /*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
805 /*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
806 /*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
807 /*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
808 /*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
809 /*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
810 /*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
811 /*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
812 /*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
813 /*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
814 /*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
815 };
816
817 /* RXE errors that will trigger an SPC freeze */
818 #define ALL_RXE_FREEZE_ERR  \
819         (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
820         | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
821         | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
822         | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
823         | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
824         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
825         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
826         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
827         | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
828         | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
829         | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
830         | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
831         | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
832         | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
833         | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
834         | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
835         | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
836         | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
837         | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
838         | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
839         | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
840         | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
841         | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
842         | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
843         | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
844         | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
845         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
846         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
847         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
848         | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
849         | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
850         | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
851         | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
852         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
853         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
854         | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
855         | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
856         | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
857         | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
858         | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
859         | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
860         | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
861         | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
862         | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
863
864 #define RXE_FREEZE_ABORT_MASK \
865         (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
866         RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
867         RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
868
869 /*
870  * DCC Error Flags
871  */
872 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
873 static struct flag_table dcc_err_flags[] = {
874         FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
875         FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
876         FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
877         FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
878         FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
879         FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
880         FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
881         FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
882         FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
883         FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
884         FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
885         FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
886         FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
887         FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
888         FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
889         FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
890         FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
891         FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
892         FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
893         FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
894         FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
895         FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
896         FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
897         FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
898         FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
899         FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
900         FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
901         FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
902         FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
903         FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
904         FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
905         FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
906         FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
907         FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
908         FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
909         FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
910         FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
911         FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
912         FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
913         FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
914         FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
915         FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
916         FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
917         FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
918         FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
919         FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
920 };
921
922 /*
923  * LCB error flags
924  */
925 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
926 static struct flag_table lcb_err_flags[] = {
927 /* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
928 /* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
929 /* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
930 /* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
931                 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
932 /* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
933 /* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
934 /* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
935 /* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
936 /* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
937 /* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
938 /*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
939 /*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
940 /*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
941 /*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
942                 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
943 /*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
944 /*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
945 /*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
946 /*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
947 /*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
948 /*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
949                 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
950 /*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
951 /*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
952 /*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
953 /*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
954 /*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
955 /*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
956 /*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
957                 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
958 /*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
959 /*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
960                 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
961 /*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
962                 LCBE(REDUNDANT_FLIT_PARITY_ERR))
963 };
964
965 /*
966  * DC8051 Error Flags
967  */
968 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
969 static struct flag_table dc8051_err_flags[] = {
970         FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
971         FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
972         FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
973         FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
974         FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
975         FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
976         FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
977         FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
978         FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
979                     D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
980         FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
981 };
982
983 /*
984  * DC8051 Information Error flags
985  *
986  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
987  */
988 static struct flag_table dc8051_info_err_flags[] = {
989         FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
990         FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
991         FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
992         FLAG_ENTRY0("Serdes internal loopback failure",
993                     FAILED_SERDES_INTERNAL_LOOPBACK),
994         FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
995         FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
996         FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
997         FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
998         FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
999         FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1000         FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1001         FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
1002         FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
1003         FLAG_ENTRY0("External Device Request Timeout",
1004                     EXTERNAL_DEVICE_REQ_TIMEOUT),
1005 };
1006
1007 /*
1008  * DC8051 Information Host Information flags
1009  *
1010  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1011  */
1012 static struct flag_table dc8051_info_host_msg_flags[] = {
1013         FLAG_ENTRY0("Host request done", 0x0001),
1014         FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1015         FLAG_ENTRY0("BC SMA message", 0x0004),
1016         FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1017         FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1018         FLAG_ENTRY0("External device config request", 0x0020),
1019         FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1020         FLAG_ENTRY0("LinkUp achieved", 0x0080),
1021         FLAG_ENTRY0("Link going down", 0x0100),
1022         FLAG_ENTRY0("Link width downgraded", 0x0200),
1023 };
1024
1025 static u32 encoded_size(u32 size);
1026 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1027 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1028 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1029                                u8 *continuous);
1030 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1031                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1032 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1033                                       u8 *remote_tx_rate, u16 *link_widths);
1034 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1035                                     u8 *flag_bits, u16 *link_widths);
1036 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1037                                   u8 *device_rev);
1038 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1039 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1040                             u8 *tx_polarity_inversion,
1041                             u8 *rx_polarity_inversion, u8 *max_rate);
1042 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1043                                 unsigned int context, u64 err_status);
1044 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1045 static void handle_dcc_err(struct hfi1_devdata *dd,
1046                            unsigned int context, u64 err_status);
1047 static void handle_lcb_err(struct hfi1_devdata *dd,
1048                            unsigned int context, u64 err_status);
1049 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1050 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1051 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void set_partition_keys(struct hfi1_pportdata *ppd);
1058 static const char *link_state_name(u32 state);
1059 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1060                                           u32 state);
1061 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1062                            u64 *out_data);
1063 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1064 static int thermal_init(struct hfi1_devdata *dd);
1065
1066 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1067 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1068                                             int msecs);
1069 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070                                   int msecs);
1071 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1072 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1073 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1074                                    int msecs);
1075 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1076                                          int msecs);
1077 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079 static void handle_temp_err(struct hfi1_devdata *dd);
1080 static void dc_shutdown(struct hfi1_devdata *dd);
1081 static void dc_start(struct hfi1_devdata *dd);
1082 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1083                            unsigned int *np);
1084 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1088
1089 /*
1090  * Error interrupt table entry.  This is used as input to the interrupt
1091  * "clear down" routine used for all second tier error interrupt register.
1092  * Second tier interrupt registers have a single bit representing them
1093  * in the top-level CceIntStatus.
1094  */
1095 struct err_reg_info {
1096         u32 status;             /* status CSR offset */
1097         u32 clear;              /* clear CSR offset */
1098         u32 mask;               /* mask CSR offset */
1099         void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1100         const char *desc;
1101 };
1102
1103 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1104 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1105 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1106
1107 /*
1108  * Helpers for building HFI and DC error interrupt table entries.  Different
1109  * helpers are needed because of inconsistent register names.
1110  */
1111 #define EE(reg, handler, desc) \
1112         { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1113                 handler, desc }
1114 #define DC_EE1(reg, handler, desc) \
1115         { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1116 #define DC_EE2(reg, handler, desc) \
1117         { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1118
1119 /*
1120  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1121  * another register containing more information.
1122  */
1123 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1124 /* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1125 /* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1126 /* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1127 /* 3*/  { 0, 0, 0, NULL }, /* reserved */
1128 /* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1129 /* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1130 /* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1131 /* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1132         /* the rest are reserved */
1133 };
1134
1135 /*
1136  * Index into the Various section of the interrupt sources
1137  * corresponding to the Critical Temperature interrupt.
1138  */
1139 #define TCRIT_INT_SOURCE 4
1140
1141 /*
1142  * SDMA error interrupt entry - refers to another register containing more
1143  * information.
1144  */
1145 static const struct err_reg_info sdma_eng_err =
1146         EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1147
1148 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1149 /* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1150 /* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1151 /* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1152 /* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1153 /* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1154         /* rest are reserved */
1155 };
1156
1157 /*
1158  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1159  * register can not be derived from the MTU value because 10K is not
1160  * a power of 2. Therefore, we need a constant. Everything else can
1161  * be calculated.
1162  */
1163 #define DCC_CFG_PORT_MTU_CAP_10240 7
1164
1165 /*
1166  * Table of the DC grouping of error interrupts.  Each entry refers to
1167  * another register containing more information.
1168  */
1169 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1170 /* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1171 /* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1172 /* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1173 /* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1174         /* the rest are reserved */
1175 };
1176
1177 struct cntr_entry {
1178         /*
1179          * counter name
1180          */
1181         char *name;
1182
1183         /*
1184          * csr to read for name (if applicable)
1185          */
1186         u64 csr;
1187
1188         /*
1189          * offset into dd or ppd to store the counter's value
1190          */
1191         int offset;
1192
1193         /*
1194          * flags
1195          */
1196         u8 flags;
1197
1198         /*
1199          * accessor for stat element, context either dd or ppd
1200          */
1201         u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1202                        int mode, u64 data);
1203 };
1204
1205 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1206 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1207
1208 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1209 { \
1210         name, \
1211         csr, \
1212         offset, \
1213         flags, \
1214         accessor \
1215 }
1216
1217 /* 32bit RXE */
1218 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1219 CNTR_ELEM(#name, \
1220           (counter * 8 + RCV_COUNTER_ARRAY32), \
1221           0, flags | CNTR_32BIT, \
1222           port_access_u32_csr)
1223
1224 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1225 CNTR_ELEM(#name, \
1226           (counter * 8 + RCV_COUNTER_ARRAY32), \
1227           0, flags | CNTR_32BIT, \
1228           dev_access_u32_csr)
1229
1230 /* 64bit RXE */
1231 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1232 CNTR_ELEM(#name, \
1233           (counter * 8 + RCV_COUNTER_ARRAY64), \
1234           0, flags, \
1235           port_access_u64_csr)
1236
1237 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1238 CNTR_ELEM(#name, \
1239           (counter * 8 + RCV_COUNTER_ARRAY64), \
1240           0, flags, \
1241           dev_access_u64_csr)
1242
1243 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1244 #define OVR_ELM(ctx) \
1245 CNTR_ELEM("RcvHdrOvr" #ctx, \
1246           (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1247           0, CNTR_NORMAL, port_access_u64_csr)
1248
1249 /* 32bit TXE */
1250 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1251 CNTR_ELEM(#name, \
1252           (counter * 8 + SEND_COUNTER_ARRAY32), \
1253           0, flags | CNTR_32BIT, \
1254           port_access_u32_csr)
1255
1256 /* 64bit TXE */
1257 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1258 CNTR_ELEM(#name, \
1259           (counter * 8 + SEND_COUNTER_ARRAY64), \
1260           0, flags, \
1261           port_access_u64_csr)
1262
1263 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1264 CNTR_ELEM(#name,\
1265           counter * 8 + SEND_COUNTER_ARRAY64, \
1266           0, \
1267           flags, \
1268           dev_access_u64_csr)
1269
1270 /* CCE */
1271 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1272 CNTR_ELEM(#name, \
1273           (counter * 8 + CCE_COUNTER_ARRAY32), \
1274           0, flags | CNTR_32BIT, \
1275           dev_access_u32_csr)
1276
1277 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1278 CNTR_ELEM(#name, \
1279           (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1280           0, flags | CNTR_32BIT, \
1281           dev_access_u32_csr)
1282
1283 /* DC */
1284 #define DC_PERF_CNTR(name, counter, flags) \
1285 CNTR_ELEM(#name, \
1286           counter, \
1287           0, \
1288           flags, \
1289           dev_access_u64_csr)
1290
1291 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1292 CNTR_ELEM(#name, \
1293           counter, \
1294           0, \
1295           flags, \
1296           dc_access_lcb_cntr)
1297
1298 /* ibp counters */
1299 #define SW_IBP_CNTR(name, cntr) \
1300 CNTR_ELEM(#name, \
1301           0, \
1302           0, \
1303           CNTR_SYNTH, \
1304           access_ibp_##cntr)
1305
1306 /**
1307  * hfi_addr_from_offset - return addr for readq/writeq
1308  * @dd - the dd device
1309  * @offset - the offset of the CSR within bar0
1310  *
1311  * This routine selects the appropriate base address
1312  * based on the indicated offset.
1313  */
1314 static inline void __iomem *hfi1_addr_from_offset(
1315         const struct hfi1_devdata *dd,
1316         u32 offset)
1317 {
1318         if (offset >= dd->base2_start)
1319                 return dd->kregbase2 + (offset - dd->base2_start);
1320         return dd->kregbase1 + offset;
1321 }
1322
1323 /**
1324  * read_csr - read CSR at the indicated offset
1325  * @dd - the dd device
1326  * @offset - the offset of the CSR within bar0
1327  *
1328  * Return: the value read or all FF's if there
1329  * is no mapping
1330  */
1331 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1332 {
1333         if (dd->flags & HFI1_PRESENT)
1334                 return readq(hfi1_addr_from_offset(dd, offset));
1335         return -1;
1336 }
1337
1338 /**
1339  * write_csr - write CSR at the indicated offset
1340  * @dd - the dd device
1341  * @offset - the offset of the CSR within bar0
1342  * @value - value to write
1343  */
1344 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1345 {
1346         if (dd->flags & HFI1_PRESENT) {
1347                 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1348
1349                 /* avoid write to RcvArray */
1350                 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1351                         return;
1352                 writeq(value, base);
1353         }
1354 }
1355
1356 /**
1357  * get_csr_addr - return te iomem address for offset
1358  * @dd - the dd device
1359  * @offset - the offset of the CSR within bar0
1360  *
1361  * Return: The iomem address to use in subsequent
1362  * writeq/readq operations.
1363  */
1364 void __iomem *get_csr_addr(
1365         const struct hfi1_devdata *dd,
1366         u32 offset)
1367 {
1368         if (dd->flags & HFI1_PRESENT)
1369                 return hfi1_addr_from_offset(dd, offset);
1370         return NULL;
1371 }
1372
1373 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1374                                  int mode, u64 value)
1375 {
1376         u64 ret;
1377
1378         if (mode == CNTR_MODE_R) {
1379                 ret = read_csr(dd, csr);
1380         } else if (mode == CNTR_MODE_W) {
1381                 write_csr(dd, csr, value);
1382                 ret = value;
1383         } else {
1384                 dd_dev_err(dd, "Invalid cntr register access mode");
1385                 return 0;
1386         }
1387
1388         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1389         return ret;
1390 }
1391
1392 /* Dev Access */
1393 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1394                               void *context, int vl, int mode, u64 data)
1395 {
1396         struct hfi1_devdata *dd = context;
1397         u64 csr = entry->csr;
1398
1399         if (entry->flags & CNTR_SDMA) {
1400                 if (vl == CNTR_INVALID_VL)
1401                         return 0;
1402                 csr += 0x100 * vl;
1403         } else {
1404                 if (vl != CNTR_INVALID_VL)
1405                         return 0;
1406         }
1407         return read_write_csr(dd, csr, mode, data);
1408 }
1409
1410 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1411                               void *context, int idx, int mode, u64 data)
1412 {
1413         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1414
1415         if (dd->per_sdma && idx < dd->num_sdma)
1416                 return dd->per_sdma[idx].err_cnt;
1417         return 0;
1418 }
1419
1420 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1421                               void *context, int idx, int mode, u64 data)
1422 {
1423         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1424
1425         if (dd->per_sdma && idx < dd->num_sdma)
1426                 return dd->per_sdma[idx].sdma_int_cnt;
1427         return 0;
1428 }
1429
1430 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1431                                    void *context, int idx, int mode, u64 data)
1432 {
1433         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1434
1435         if (dd->per_sdma && idx < dd->num_sdma)
1436                 return dd->per_sdma[idx].idle_int_cnt;
1437         return 0;
1438 }
1439
1440 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1441                                        void *context, int idx, int mode,
1442                                        u64 data)
1443 {
1444         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1445
1446         if (dd->per_sdma && idx < dd->num_sdma)
1447                 return dd->per_sdma[idx].progress_int_cnt;
1448         return 0;
1449 }
1450
1451 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1452                               int vl, int mode, u64 data)
1453 {
1454         struct hfi1_devdata *dd = context;
1455
1456         u64 val = 0;
1457         u64 csr = entry->csr;
1458
1459         if (entry->flags & CNTR_VL) {
1460                 if (vl == CNTR_INVALID_VL)
1461                         return 0;
1462                 csr += 8 * vl;
1463         } else {
1464                 if (vl != CNTR_INVALID_VL)
1465                         return 0;
1466         }
1467
1468         val = read_write_csr(dd, csr, mode, data);
1469         return val;
1470 }
1471
1472 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1473                               int vl, int mode, u64 data)
1474 {
1475         struct hfi1_devdata *dd = context;
1476         u32 csr = entry->csr;
1477         int ret = 0;
1478
1479         if (vl != CNTR_INVALID_VL)
1480                 return 0;
1481         if (mode == CNTR_MODE_R)
1482                 ret = read_lcb_csr(dd, csr, &data);
1483         else if (mode == CNTR_MODE_W)
1484                 ret = write_lcb_csr(dd, csr, data);
1485
1486         if (ret) {
1487                 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1488                 return 0;
1489         }
1490
1491         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1492         return data;
1493 }
1494
1495 /* Port Access */
1496 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1497                                int vl, int mode, u64 data)
1498 {
1499         struct hfi1_pportdata *ppd = context;
1500
1501         if (vl != CNTR_INVALID_VL)
1502                 return 0;
1503         return read_write_csr(ppd->dd, entry->csr, mode, data);
1504 }
1505
1506 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1507                                void *context, int vl, int mode, u64 data)
1508 {
1509         struct hfi1_pportdata *ppd = context;
1510         u64 val;
1511         u64 csr = entry->csr;
1512
1513         if (entry->flags & CNTR_VL) {
1514                 if (vl == CNTR_INVALID_VL)
1515                         return 0;
1516                 csr += 8 * vl;
1517         } else {
1518                 if (vl != CNTR_INVALID_VL)
1519                         return 0;
1520         }
1521         val = read_write_csr(ppd->dd, csr, mode, data);
1522         return val;
1523 }
1524
1525 /* Software defined */
1526 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1527                                 u64 data)
1528 {
1529         u64 ret;
1530
1531         if (mode == CNTR_MODE_R) {
1532                 ret = *cntr;
1533         } else if (mode == CNTR_MODE_W) {
1534                 *cntr = data;
1535                 ret = data;
1536         } else {
1537                 dd_dev_err(dd, "Invalid cntr sw access mode");
1538                 return 0;
1539         }
1540
1541         hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1542
1543         return ret;
1544 }
1545
1546 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1547                                  int vl, int mode, u64 data)
1548 {
1549         struct hfi1_pportdata *ppd = context;
1550
1551         if (vl != CNTR_INVALID_VL)
1552                 return 0;
1553         return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1554 }
1555
1556 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1557                                  int vl, int mode, u64 data)
1558 {
1559         struct hfi1_pportdata *ppd = context;
1560
1561         if (vl != CNTR_INVALID_VL)
1562                 return 0;
1563         return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1564 }
1565
1566 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1567                                        void *context, int vl, int mode,
1568                                        u64 data)
1569 {
1570         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1571
1572         if (vl != CNTR_INVALID_VL)
1573                 return 0;
1574         return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1575 }
1576
1577 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1578                                    void *context, int vl, int mode, u64 data)
1579 {
1580         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1581         u64 zero = 0;
1582         u64 *counter;
1583
1584         if (vl == CNTR_INVALID_VL)
1585                 counter = &ppd->port_xmit_discards;
1586         else if (vl >= 0 && vl < C_VL_COUNT)
1587                 counter = &ppd->port_xmit_discards_vl[vl];
1588         else
1589                 counter = &zero;
1590
1591         return read_write_sw(ppd->dd, counter, mode, data);
1592 }
1593
1594 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1595                                        void *context, int vl, int mode,
1596                                        u64 data)
1597 {
1598         struct hfi1_pportdata *ppd = context;
1599
1600         if (vl != CNTR_INVALID_VL)
1601                 return 0;
1602
1603         return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1604                              mode, data);
1605 }
1606
1607 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1608                                       void *context, int vl, int mode, u64 data)
1609 {
1610         struct hfi1_pportdata *ppd = context;
1611
1612         if (vl != CNTR_INVALID_VL)
1613                 return 0;
1614
1615         return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1616                              mode, data);
1617 }
1618
1619 u64 get_all_cpu_total(u64 __percpu *cntr)
1620 {
1621         int cpu;
1622         u64 counter = 0;
1623
1624         for_each_possible_cpu(cpu)
1625                 counter += *per_cpu_ptr(cntr, cpu);
1626         return counter;
1627 }
1628
1629 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1630                           u64 __percpu *cntr,
1631                           int vl, int mode, u64 data)
1632 {
1633         u64 ret = 0;
1634
1635         if (vl != CNTR_INVALID_VL)
1636                 return 0;
1637
1638         if (mode == CNTR_MODE_R) {
1639                 ret = get_all_cpu_total(cntr) - *z_val;
1640         } else if (mode == CNTR_MODE_W) {
1641                 /* A write can only zero the counter */
1642                 if (data == 0)
1643                         *z_val = get_all_cpu_total(cntr);
1644                 else
1645                         dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1646         } else {
1647                 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1648                 return 0;
1649         }
1650
1651         return ret;
1652 }
1653
1654 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1655                               void *context, int vl, int mode, u64 data)
1656 {
1657         struct hfi1_devdata *dd = context;
1658
1659         return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1660                               mode, data);
1661 }
1662
1663 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1664                                    void *context, int vl, int mode, u64 data)
1665 {
1666         struct hfi1_devdata *dd = context;
1667
1668         return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1669                               mode, data);
1670 }
1671
1672 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1673                               void *context, int vl, int mode, u64 data)
1674 {
1675         struct hfi1_devdata *dd = context;
1676
1677         return dd->verbs_dev.n_piowait;
1678 }
1679
1680 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1681                                void *context, int vl, int mode, u64 data)
1682 {
1683         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684
1685         return dd->verbs_dev.n_piodrain;
1686 }
1687
1688 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1689                               void *context, int vl, int mode, u64 data)
1690 {
1691         struct hfi1_devdata *dd = context;
1692
1693         return dd->verbs_dev.n_txwait;
1694 }
1695
1696 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1697                                void *context, int vl, int mode, u64 data)
1698 {
1699         struct hfi1_devdata *dd = context;
1700
1701         return dd->verbs_dev.n_kmem_wait;
1702 }
1703
1704 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1705                                    void *context, int vl, int mode, u64 data)
1706 {
1707         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1708
1709         return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1710                               mode, data);
1711 }
1712
1713 /* Software counters for the error status bits within MISC_ERR_STATUS */
1714 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1715                                              void *context, int vl, int mode,
1716                                              u64 data)
1717 {
1718         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719
1720         return dd->misc_err_status_cnt[12];
1721 }
1722
1723 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1724                                           void *context, int vl, int mode,
1725                                           u64 data)
1726 {
1727         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728
1729         return dd->misc_err_status_cnt[11];
1730 }
1731
1732 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1733                                                void *context, int vl, int mode,
1734                                                u64 data)
1735 {
1736         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1737
1738         return dd->misc_err_status_cnt[10];
1739 }
1740
1741 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1742                                                  void *context, int vl,
1743                                                  int mode, u64 data)
1744 {
1745         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746
1747         return dd->misc_err_status_cnt[9];
1748 }
1749
1750 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1751                                            void *context, int vl, int mode,
1752                                            u64 data)
1753 {
1754         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755
1756         return dd->misc_err_status_cnt[8];
1757 }
1758
1759 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1760                                 const struct cntr_entry *entry,
1761                                 void *context, int vl, int mode, u64 data)
1762 {
1763         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764
1765         return dd->misc_err_status_cnt[7];
1766 }
1767
1768 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1769                                                 void *context, int vl,
1770                                                 int mode, u64 data)
1771 {
1772         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773
1774         return dd->misc_err_status_cnt[6];
1775 }
1776
1777 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1778                                               void *context, int vl, int mode,
1779                                               u64 data)
1780 {
1781         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782
1783         return dd->misc_err_status_cnt[5];
1784 }
1785
1786 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1787                                             void *context, int vl, int mode,
1788                                             u64 data)
1789 {
1790         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791
1792         return dd->misc_err_status_cnt[4];
1793 }
1794
1795 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1796                                                  void *context, int vl,
1797                                                  int mode, u64 data)
1798 {
1799         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800
1801         return dd->misc_err_status_cnt[3];
1802 }
1803
1804 static u64 access_misc_csr_write_bad_addr_err_cnt(
1805                                 const struct cntr_entry *entry,
1806                                 void *context, int vl, int mode, u64 data)
1807 {
1808         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809
1810         return dd->misc_err_status_cnt[2];
1811 }
1812
1813 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1814                                                  void *context, int vl,
1815                                                  int mode, u64 data)
1816 {
1817         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818
1819         return dd->misc_err_status_cnt[1];
1820 }
1821
1822 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1823                                           void *context, int vl, int mode,
1824                                           u64 data)
1825 {
1826         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827
1828         return dd->misc_err_status_cnt[0];
1829 }
1830
1831 /*
1832  * Software counter for the aggregate of
1833  * individual CceErrStatus counters
1834  */
1835 static u64 access_sw_cce_err_status_aggregated_cnt(
1836                                 const struct cntr_entry *entry,
1837                                 void *context, int vl, int mode, u64 data)
1838 {
1839         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841         return dd->sw_cce_err_status_aggregate;
1842 }
1843
1844 /*
1845  * Software counters corresponding to each of the
1846  * error status bits within CceErrStatus
1847  */
1848 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1849                                               void *context, int vl, int mode,
1850                                               u64 data)
1851 {
1852         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1853
1854         return dd->cce_err_status_cnt[40];
1855 }
1856
1857 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1858                                           void *context, int vl, int mode,
1859                                           u64 data)
1860 {
1861         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862
1863         return dd->cce_err_status_cnt[39];
1864 }
1865
1866 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1867                                           void *context, int vl, int mode,
1868                                           u64 data)
1869 {
1870         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871
1872         return dd->cce_err_status_cnt[38];
1873 }
1874
1875 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1876                                              void *context, int vl, int mode,
1877                                              u64 data)
1878 {
1879         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880
1881         return dd->cce_err_status_cnt[37];
1882 }
1883
1884 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1885                                              void *context, int vl, int mode,
1886                                              u64 data)
1887 {
1888         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889
1890         return dd->cce_err_status_cnt[36];
1891 }
1892
1893 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1894                                 const struct cntr_entry *entry,
1895                                 void *context, int vl, int mode, u64 data)
1896 {
1897         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898
1899         return dd->cce_err_status_cnt[35];
1900 }
1901
1902 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1903                                 const struct cntr_entry *entry,
1904                                 void *context, int vl, int mode, u64 data)
1905 {
1906         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907
1908         return dd->cce_err_status_cnt[34];
1909 }
1910
1911 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1912                                                  void *context, int vl,
1913                                                  int mode, u64 data)
1914 {
1915         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916
1917         return dd->cce_err_status_cnt[33];
1918 }
1919
1920 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1921                                                 void *context, int vl, int mode,
1922                                                 u64 data)
1923 {
1924         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925
1926         return dd->cce_err_status_cnt[32];
1927 }
1928
1929 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1930                                    void *context, int vl, int mode, u64 data)
1931 {
1932         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933
1934         return dd->cce_err_status_cnt[31];
1935 }
1936
1937 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1938                                                void *context, int vl, int mode,
1939                                                u64 data)
1940 {
1941         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942
1943         return dd->cce_err_status_cnt[30];
1944 }
1945
1946 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1947                                               void *context, int vl, int mode,
1948                                               u64 data)
1949 {
1950         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951
1952         return dd->cce_err_status_cnt[29];
1953 }
1954
1955 static u64 access_pcic_transmit_back_parity_err_cnt(
1956                                 const struct cntr_entry *entry,
1957                                 void *context, int vl, int mode, u64 data)
1958 {
1959         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960
1961         return dd->cce_err_status_cnt[28];
1962 }
1963
1964 static u64 access_pcic_transmit_front_parity_err_cnt(
1965                                 const struct cntr_entry *entry,
1966                                 void *context, int vl, int mode, u64 data)
1967 {
1968         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969
1970         return dd->cce_err_status_cnt[27];
1971 }
1972
1973 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1974                                              void *context, int vl, int mode,
1975                                              u64 data)
1976 {
1977         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978
1979         return dd->cce_err_status_cnt[26];
1980 }
1981
1982 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1983                                             void *context, int vl, int mode,
1984                                             u64 data)
1985 {
1986         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987
1988         return dd->cce_err_status_cnt[25];
1989 }
1990
1991 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1992                                               void *context, int vl, int mode,
1993                                               u64 data)
1994 {
1995         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996
1997         return dd->cce_err_status_cnt[24];
1998 }
1999
2000 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2001                                              void *context, int vl, int mode,
2002                                              u64 data)
2003 {
2004         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005
2006         return dd->cce_err_status_cnt[23];
2007 }
2008
2009 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2010                                                  void *context, int vl,
2011                                                  int mode, u64 data)
2012 {
2013         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014
2015         return dd->cce_err_status_cnt[22];
2016 }
2017
2018 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2019                                          void *context, int vl, int mode,
2020                                          u64 data)
2021 {
2022         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023
2024         return dd->cce_err_status_cnt[21];
2025 }
2026
2027 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2028                                 const struct cntr_entry *entry,
2029                                 void *context, int vl, int mode, u64 data)
2030 {
2031         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032
2033         return dd->cce_err_status_cnt[20];
2034 }
2035
2036 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2037                                                  void *context, int vl,
2038                                                  int mode, u64 data)
2039 {
2040         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041
2042         return dd->cce_err_status_cnt[19];
2043 }
2044
2045 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2046                                              void *context, int vl, int mode,
2047                                              u64 data)
2048 {
2049         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050
2051         return dd->cce_err_status_cnt[18];
2052 }
2053
2054 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2055                                             void *context, int vl, int mode,
2056                                             u64 data)
2057 {
2058         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059
2060         return dd->cce_err_status_cnt[17];
2061 }
2062
2063 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2064                                               void *context, int vl, int mode,
2065                                               u64 data)
2066 {
2067         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068
2069         return dd->cce_err_status_cnt[16];
2070 }
2071
2072 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2073                                              void *context, int vl, int mode,
2074                                              u64 data)
2075 {
2076         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077
2078         return dd->cce_err_status_cnt[15];
2079 }
2080
2081 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2082                                                  void *context, int vl,
2083                                                  int mode, u64 data)
2084 {
2085         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086
2087         return dd->cce_err_status_cnt[14];
2088 }
2089
2090 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2091                                              void *context, int vl, int mode,
2092                                              u64 data)
2093 {
2094         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095
2096         return dd->cce_err_status_cnt[13];
2097 }
2098
2099 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2100                                 const struct cntr_entry *entry,
2101                                 void *context, int vl, int mode, u64 data)
2102 {
2103         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104
2105         return dd->cce_err_status_cnt[12];
2106 }
2107
2108 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2109                                 const struct cntr_entry *entry,
2110                                 void *context, int vl, int mode, u64 data)
2111 {
2112         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113
2114         return dd->cce_err_status_cnt[11];
2115 }
2116
2117 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2118                                 const struct cntr_entry *entry,
2119                                 void *context, int vl, int mode, u64 data)
2120 {
2121         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122
2123         return dd->cce_err_status_cnt[10];
2124 }
2125
2126 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2127                                 const struct cntr_entry *entry,
2128                                 void *context, int vl, int mode, u64 data)
2129 {
2130         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131
2132         return dd->cce_err_status_cnt[9];
2133 }
2134
2135 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2136                                 const struct cntr_entry *entry,
2137                                 void *context, int vl, int mode, u64 data)
2138 {
2139         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141         return dd->cce_err_status_cnt[8];
2142 }
2143
2144 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2145                                                  void *context, int vl,
2146                                                  int mode, u64 data)
2147 {
2148         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150         return dd->cce_err_status_cnt[7];
2151 }
2152
2153 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2154                                 const struct cntr_entry *entry,
2155                                 void *context, int vl, int mode, u64 data)
2156 {
2157         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159         return dd->cce_err_status_cnt[6];
2160 }
2161
2162 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2163                                                void *context, int vl, int mode,
2164                                                u64 data)
2165 {
2166         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168         return dd->cce_err_status_cnt[5];
2169 }
2170
2171 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2172                                           void *context, int vl, int mode,
2173                                           u64 data)
2174 {
2175         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177         return dd->cce_err_status_cnt[4];
2178 }
2179
2180 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2181                                 const struct cntr_entry *entry,
2182                                 void *context, int vl, int mode, u64 data)
2183 {
2184         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186         return dd->cce_err_status_cnt[3];
2187 }
2188
2189 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2190                                                  void *context, int vl,
2191                                                  int mode, u64 data)
2192 {
2193         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195         return dd->cce_err_status_cnt[2];
2196 }
2197
2198 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2199                                                 void *context, int vl,
2200                                                 int mode, u64 data)
2201 {
2202         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204         return dd->cce_err_status_cnt[1];
2205 }
2206
2207 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2208                                          void *context, int vl, int mode,
2209                                          u64 data)
2210 {
2211         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213         return dd->cce_err_status_cnt[0];
2214 }
2215
2216 /*
2217  * Software counters corresponding to each of the
2218  * error status bits within RcvErrStatus
2219  */
2220 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2221                                         void *context, int vl, int mode,
2222                                         u64 data)
2223 {
2224         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225
2226         return dd->rcv_err_status_cnt[63];
2227 }
2228
2229 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2230                                                 void *context, int vl,
2231                                                 int mode, u64 data)
2232 {
2233         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234
2235         return dd->rcv_err_status_cnt[62];
2236 }
2237
2238 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2239                                                void *context, int vl, int mode,
2240                                                u64 data)
2241 {
2242         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243
2244         return dd->rcv_err_status_cnt[61];
2245 }
2246
2247 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2248                                          void *context, int vl, int mode,
2249                                          u64 data)
2250 {
2251         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252
2253         return dd->rcv_err_status_cnt[60];
2254 }
2255
2256 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2257                                                  void *context, int vl,
2258                                                  int mode, u64 data)
2259 {
2260         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261
2262         return dd->rcv_err_status_cnt[59];
2263 }
2264
2265 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2266                                                  void *context, int vl,
2267                                                  int mode, u64 data)
2268 {
2269         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270
2271         return dd->rcv_err_status_cnt[58];
2272 }
2273
2274 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2275                                             void *context, int vl, int mode,
2276                                             u64 data)
2277 {
2278         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279
2280         return dd->rcv_err_status_cnt[57];
2281 }
2282
2283 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2284                                            void *context, int vl, int mode,
2285                                            u64 data)
2286 {
2287         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288
2289         return dd->rcv_err_status_cnt[56];
2290 }
2291
2292 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2293                                            void *context, int vl, int mode,
2294                                            u64 data)
2295 {
2296         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297
2298         return dd->rcv_err_status_cnt[55];
2299 }
2300
2301 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2302                                 const struct cntr_entry *entry,
2303                                 void *context, int vl, int mode, u64 data)
2304 {
2305         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306
2307         return dd->rcv_err_status_cnt[54];
2308 }
2309
2310 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2311                                 const struct cntr_entry *entry,
2312                                 void *context, int vl, int mode, u64 data)
2313 {
2314         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315
2316         return dd->rcv_err_status_cnt[53];
2317 }
2318
2319 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2320                                                  void *context, int vl,
2321                                                  int mode, u64 data)
2322 {
2323         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324
2325         return dd->rcv_err_status_cnt[52];
2326 }
2327
2328 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2329                                                  void *context, int vl,
2330                                                  int mode, u64 data)
2331 {
2332         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333
2334         return dd->rcv_err_status_cnt[51];
2335 }
2336
2337 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2338                                                  void *context, int vl,
2339                                                  int mode, u64 data)
2340 {
2341         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342
2343         return dd->rcv_err_status_cnt[50];
2344 }
2345
2346 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2347                                                  void *context, int vl,
2348                                                  int mode, u64 data)
2349 {
2350         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351
2352         return dd->rcv_err_status_cnt[49];
2353 }
2354
2355 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2356                                                  void *context, int vl,
2357                                                  int mode, u64 data)
2358 {
2359         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360
2361         return dd->rcv_err_status_cnt[48];
2362 }
2363
2364 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2365                                                  void *context, int vl,
2366                                                  int mode, u64 data)
2367 {
2368         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369
2370         return dd->rcv_err_status_cnt[47];
2371 }
2372
2373 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2374                                          void *context, int vl, int mode,
2375                                          u64 data)
2376 {
2377         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378
2379         return dd->rcv_err_status_cnt[46];
2380 }
2381
2382 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2383                                 const struct cntr_entry *entry,
2384                                 void *context, int vl, int mode, u64 data)
2385 {
2386         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387
2388         return dd->rcv_err_status_cnt[45];
2389 }
2390
2391 static u64 access_rx_lookup_csr_parity_err_cnt(
2392                                 const struct cntr_entry *entry,
2393                                 void *context, int vl, int mode, u64 data)
2394 {
2395         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396
2397         return dd->rcv_err_status_cnt[44];
2398 }
2399
2400 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2401                                 const struct cntr_entry *entry,
2402                                 void *context, int vl, int mode, u64 data)
2403 {
2404         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405
2406         return dd->rcv_err_status_cnt[43];
2407 }
2408
2409 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2410                                 const struct cntr_entry *entry,
2411                                 void *context, int vl, int mode, u64 data)
2412 {
2413         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414
2415         return dd->rcv_err_status_cnt[42];
2416 }
2417
2418 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2419                                 const struct cntr_entry *entry,
2420                                 void *context, int vl, int mode, u64 data)
2421 {
2422         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423
2424         return dd->rcv_err_status_cnt[41];
2425 }
2426
2427 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2428                                 const struct cntr_entry *entry,
2429                                 void *context, int vl, int mode, u64 data)
2430 {
2431         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432
2433         return dd->rcv_err_status_cnt[40];
2434 }
2435
2436 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2437                                 const struct cntr_entry *entry,
2438                                 void *context, int vl, int mode, u64 data)
2439 {
2440         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441
2442         return dd->rcv_err_status_cnt[39];
2443 }
2444
2445 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2446                                 const struct cntr_entry *entry,
2447                                 void *context, int vl, int mode, u64 data)
2448 {
2449         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450
2451         return dd->rcv_err_status_cnt[38];
2452 }
2453
2454 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2455                                 const struct cntr_entry *entry,
2456                                 void *context, int vl, int mode, u64 data)
2457 {
2458         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459
2460         return dd->rcv_err_status_cnt[37];
2461 }
2462
2463 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2464                                 const struct cntr_entry *entry,
2465                                 void *context, int vl, int mode, u64 data)
2466 {
2467         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468
2469         return dd->rcv_err_status_cnt[36];
2470 }
2471
2472 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2473                                 const struct cntr_entry *entry,
2474                                 void *context, int vl, int mode, u64 data)
2475 {
2476         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477
2478         return dd->rcv_err_status_cnt[35];
2479 }
2480
2481 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2482                                 const struct cntr_entry *entry,
2483                                 void *context, int vl, int mode, u64 data)
2484 {
2485         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486
2487         return dd->rcv_err_status_cnt[34];
2488 }
2489
2490 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2491                                 const struct cntr_entry *entry,
2492                                 void *context, int vl, int mode, u64 data)
2493 {
2494         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495
2496         return dd->rcv_err_status_cnt[33];
2497 }
2498
2499 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2500                                         void *context, int vl, int mode,
2501                                         u64 data)
2502 {
2503         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504
2505         return dd->rcv_err_status_cnt[32];
2506 }
2507
2508 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2509                                        void *context, int vl, int mode,
2510                                        u64 data)
2511 {
2512         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513
2514         return dd->rcv_err_status_cnt[31];
2515 }
2516
2517 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2518                                           void *context, int vl, int mode,
2519                                           u64 data)
2520 {
2521         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522
2523         return dd->rcv_err_status_cnt[30];
2524 }
2525
2526 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2527                                              void *context, int vl, int mode,
2528                                              u64 data)
2529 {
2530         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531
2532         return dd->rcv_err_status_cnt[29];
2533 }
2534
2535 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2536                                                  void *context, int vl,
2537                                                  int mode, u64 data)
2538 {
2539         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540
2541         return dd->rcv_err_status_cnt[28];
2542 }
2543
2544 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2545                                 const struct cntr_entry *entry,
2546                                 void *context, int vl, int mode, u64 data)
2547 {
2548         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549
2550         return dd->rcv_err_status_cnt[27];
2551 }
2552
2553 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2554                                 const struct cntr_entry *entry,
2555                                 void *context, int vl, int mode, u64 data)
2556 {
2557         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558
2559         return dd->rcv_err_status_cnt[26];
2560 }
2561
2562 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2563                                 const struct cntr_entry *entry,
2564                                 void *context, int vl, int mode, u64 data)
2565 {
2566         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567
2568         return dd->rcv_err_status_cnt[25];
2569 }
2570
2571 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2572                                 const struct cntr_entry *entry,
2573                                 void *context, int vl, int mode, u64 data)
2574 {
2575         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576
2577         return dd->rcv_err_status_cnt[24];
2578 }
2579
2580 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2581                                 const struct cntr_entry *entry,
2582                                 void *context, int vl, int mode, u64 data)
2583 {
2584         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585
2586         return dd->rcv_err_status_cnt[23];
2587 }
2588
2589 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2590                                 const struct cntr_entry *entry,
2591                                 void *context, int vl, int mode, u64 data)
2592 {
2593         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594
2595         return dd->rcv_err_status_cnt[22];
2596 }
2597
2598 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2599                                 const struct cntr_entry *entry,
2600                                 void *context, int vl, int mode, u64 data)
2601 {
2602         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603
2604         return dd->rcv_err_status_cnt[21];
2605 }
2606
2607 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2608                                 const struct cntr_entry *entry,
2609                                 void *context, int vl, int mode, u64 data)
2610 {
2611         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612
2613         return dd->rcv_err_status_cnt[20];
2614 }
2615
2616 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2617                                 const struct cntr_entry *entry,
2618                                 void *context, int vl, int mode, u64 data)
2619 {
2620         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621
2622         return dd->rcv_err_status_cnt[19];
2623 }
2624
2625 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2626                                                  void *context, int vl,
2627                                                  int mode, u64 data)
2628 {
2629         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630
2631         return dd->rcv_err_status_cnt[18];
2632 }
2633
2634 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2635                                                  void *context, int vl,
2636                                                  int mode, u64 data)
2637 {
2638         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639
2640         return dd->rcv_err_status_cnt[17];
2641 }
2642
2643 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2644                                 const struct cntr_entry *entry,
2645                                 void *context, int vl, int mode, u64 data)
2646 {
2647         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648
2649         return dd->rcv_err_status_cnt[16];
2650 }
2651
2652 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2653                                 const struct cntr_entry *entry,
2654                                 void *context, int vl, int mode, u64 data)
2655 {
2656         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657
2658         return dd->rcv_err_status_cnt[15];
2659 }
2660
2661 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2662                                                 void *context, int vl,
2663                                                 int mode, u64 data)
2664 {
2665         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666
2667         return dd->rcv_err_status_cnt[14];
2668 }
2669
2670 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2671                                                 void *context, int vl,
2672                                                 int mode, u64 data)
2673 {
2674         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675
2676         return dd->rcv_err_status_cnt[13];
2677 }
2678
2679 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2680                                               void *context, int vl, int mode,
2681                                               u64 data)
2682 {
2683         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684
2685         return dd->rcv_err_status_cnt[12];
2686 }
2687
2688 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2689                                           void *context, int vl, int mode,
2690                                           u64 data)
2691 {
2692         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693
2694         return dd->rcv_err_status_cnt[11];
2695 }
2696
2697 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2698                                           void *context, int vl, int mode,
2699                                           u64 data)
2700 {
2701         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702
2703         return dd->rcv_err_status_cnt[10];
2704 }
2705
2706 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2707                                                void *context, int vl, int mode,
2708                                                u64 data)
2709 {
2710         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711
2712         return dd->rcv_err_status_cnt[9];
2713 }
2714
2715 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2716                                             void *context, int vl, int mode,
2717                                             u64 data)
2718 {
2719         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721         return dd->rcv_err_status_cnt[8];
2722 }
2723
2724 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2725                                 const struct cntr_entry *entry,
2726                                 void *context, int vl, int mode, u64 data)
2727 {
2728         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730         return dd->rcv_err_status_cnt[7];
2731 }
2732
2733 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2734                                 const struct cntr_entry *entry,
2735                                 void *context, int vl, int mode, u64 data)
2736 {
2737         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739         return dd->rcv_err_status_cnt[6];
2740 }
2741
2742 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2743                                           void *context, int vl, int mode,
2744                                           u64 data)
2745 {
2746         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748         return dd->rcv_err_status_cnt[5];
2749 }
2750
2751 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2752                                           void *context, int vl, int mode,
2753                                           u64 data)
2754 {
2755         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757         return dd->rcv_err_status_cnt[4];
2758 }
2759
2760 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2761                                          void *context, int vl, int mode,
2762                                          u64 data)
2763 {
2764         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766         return dd->rcv_err_status_cnt[3];
2767 }
2768
2769 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2770                                          void *context, int vl, int mode,
2771                                          u64 data)
2772 {
2773         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775         return dd->rcv_err_status_cnt[2];
2776 }
2777
2778 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2779                                             void *context, int vl, int mode,
2780                                             u64 data)
2781 {
2782         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784         return dd->rcv_err_status_cnt[1];
2785 }
2786
2787 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2788                                          void *context, int vl, int mode,
2789                                          u64 data)
2790 {
2791         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793         return dd->rcv_err_status_cnt[0];
2794 }
2795
2796 /*
2797  * Software counters corresponding to each of the
2798  * error status bits within SendPioErrStatus
2799  */
2800 static u64 access_pio_pec_sop_head_parity_err_cnt(
2801                                 const struct cntr_entry *entry,
2802                                 void *context, int vl, int mode, u64 data)
2803 {
2804         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805
2806         return dd->send_pio_err_status_cnt[35];
2807 }
2808
2809 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2810                                 const struct cntr_entry *entry,
2811                                 void *context, int vl, int mode, u64 data)
2812 {
2813         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814
2815         return dd->send_pio_err_status_cnt[34];
2816 }
2817
2818 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2819                                 const struct cntr_entry *entry,
2820                                 void *context, int vl, int mode, u64 data)
2821 {
2822         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823
2824         return dd->send_pio_err_status_cnt[33];
2825 }
2826
2827 static u64 access_pio_current_free_cnt_parity_err_cnt(
2828                                 const struct cntr_entry *entry,
2829                                 void *context, int vl, int mode, u64 data)
2830 {
2831         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832
2833         return dd->send_pio_err_status_cnt[32];
2834 }
2835
2836 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2837                                           void *context, int vl, int mode,
2838                                           u64 data)
2839 {
2840         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841
2842         return dd->send_pio_err_status_cnt[31];
2843 }
2844
2845 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2846                                           void *context, int vl, int mode,
2847                                           u64 data)
2848 {
2849         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850
2851         return dd->send_pio_err_status_cnt[30];
2852 }
2853
2854 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2855                                            void *context, int vl, int mode,
2856                                            u64 data)
2857 {
2858         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859
2860         return dd->send_pio_err_status_cnt[29];
2861 }
2862
2863 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2864                                 const struct cntr_entry *entry,
2865                                 void *context, int vl, int mode, u64 data)
2866 {
2867         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868
2869         return dd->send_pio_err_status_cnt[28];
2870 }
2871
2872 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2873                                              void *context, int vl, int mode,
2874                                              u64 data)
2875 {
2876         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877
2878         return dd->send_pio_err_status_cnt[27];
2879 }
2880
2881 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2882                                              void *context, int vl, int mode,
2883                                              u64 data)
2884 {
2885         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886
2887         return dd->send_pio_err_status_cnt[26];
2888 }
2889
2890 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2891                                                 void *context, int vl,
2892                                                 int mode, u64 data)
2893 {
2894         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895
2896         return dd->send_pio_err_status_cnt[25];
2897 }
2898
2899 static u64 access_pio_block_qw_count_parity_err_cnt(
2900                                 const struct cntr_entry *entry,
2901                                 void *context, int vl, int mode, u64 data)
2902 {
2903         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904
2905         return dd->send_pio_err_status_cnt[24];
2906 }
2907
2908 static u64 access_pio_write_qw_valid_parity_err_cnt(
2909                                 const struct cntr_entry *entry,
2910                                 void *context, int vl, int mode, u64 data)
2911 {
2912         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913
2914         return dd->send_pio_err_status_cnt[23];
2915 }
2916
2917 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2918                                             void *context, int vl, int mode,
2919                                             u64 data)
2920 {
2921         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922
2923         return dd->send_pio_err_status_cnt[22];
2924 }
2925
2926 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2927                                                 void *context, int vl,
2928                                                 int mode, u64 data)
2929 {
2930         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931
2932         return dd->send_pio_err_status_cnt[21];
2933 }
2934
2935 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2936                                                 void *context, int vl,
2937                                                 int mode, u64 data)
2938 {
2939         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940
2941         return dd->send_pio_err_status_cnt[20];
2942 }
2943
2944 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2945                                                 void *context, int vl,
2946                                                 int mode, u64 data)
2947 {
2948         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949
2950         return dd->send_pio_err_status_cnt[19];
2951 }
2952
2953 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2954                                 const struct cntr_entry *entry,
2955                                 void *context, int vl, int mode, u64 data)
2956 {
2957         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958
2959         return dd->send_pio_err_status_cnt[18];
2960 }
2961
2962 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2963                                          void *context, int vl, int mode,
2964                                          u64 data)
2965 {
2966         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967
2968         return dd->send_pio_err_status_cnt[17];
2969 }
2970
2971 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2972                                             void *context, int vl, int mode,
2973                                             u64 data)
2974 {
2975         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976
2977         return dd->send_pio_err_status_cnt[16];
2978 }
2979
2980 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2981                                 const struct cntr_entry *entry,
2982                                 void *context, int vl, int mode, u64 data)
2983 {
2984         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985
2986         return dd->send_pio_err_status_cnt[15];
2987 }
2988
2989 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2990                                 const struct cntr_entry *entry,
2991                                 void *context, int vl, int mode, u64 data)
2992 {
2993         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994
2995         return dd->send_pio_err_status_cnt[14];
2996 }
2997
2998 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2999                                 const struct cntr_entry *entry,
3000                                 void *context, int vl, int mode, u64 data)
3001 {
3002         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003
3004         return dd->send_pio_err_status_cnt[13];
3005 }
3006
3007 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3008                                 const struct cntr_entry *entry,
3009                                 void *context, int vl, int mode, u64 data)
3010 {
3011         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012
3013         return dd->send_pio_err_status_cnt[12];
3014 }
3015
3016 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3017                                 const struct cntr_entry *entry,
3018                                 void *context, int vl, int mode, u64 data)
3019 {
3020         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021
3022         return dd->send_pio_err_status_cnt[11];
3023 }
3024
3025 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3026                                 const struct cntr_entry *entry,
3027                                 void *context, int vl, int mode, u64 data)
3028 {
3029         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030
3031         return dd->send_pio_err_status_cnt[10];
3032 }
3033
3034 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3035                                 const struct cntr_entry *entry,
3036                                 void *context, int vl, int mode, u64 data)
3037 {
3038         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039
3040         return dd->send_pio_err_status_cnt[9];
3041 }
3042
3043 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3044                                 const struct cntr_entry *entry,
3045                                 void *context, int vl, int mode, u64 data)
3046 {
3047         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049         return dd->send_pio_err_status_cnt[8];
3050 }
3051
3052 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3053                                 const struct cntr_entry *entry,
3054                                 void *context, int vl, int mode, u64 data)
3055 {
3056         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057
3058         return dd->send_pio_err_status_cnt[7];
3059 }
3060
3061 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3062                                               void *context, int vl, int mode,
3063                                               u64 data)
3064 {
3065         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066
3067         return dd->send_pio_err_status_cnt[6];
3068 }
3069
3070 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3071                                               void *context, int vl, int mode,
3072                                               u64 data)
3073 {
3074         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075
3076         return dd->send_pio_err_status_cnt[5];
3077 }
3078
3079 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3080                                            void *context, int vl, int mode,
3081                                            u64 data)
3082 {
3083         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084
3085         return dd->send_pio_err_status_cnt[4];
3086 }
3087
3088 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3089                                            void *context, int vl, int mode,
3090                                            u64 data)
3091 {
3092         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093
3094         return dd->send_pio_err_status_cnt[3];
3095 }
3096
3097 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3098                                          void *context, int vl, int mode,
3099                                          u64 data)
3100 {
3101         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102
3103         return dd->send_pio_err_status_cnt[2];
3104 }
3105
3106 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3107                                                 void *context, int vl,
3108                                                 int mode, u64 data)
3109 {
3110         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111
3112         return dd->send_pio_err_status_cnt[1];
3113 }
3114
3115 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3116                                              void *context, int vl, int mode,
3117                                              u64 data)
3118 {
3119         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120
3121         return dd->send_pio_err_status_cnt[0];
3122 }
3123
3124 /*
3125  * Software counters corresponding to each of the
3126  * error status bits within SendDmaErrStatus
3127  */
3128 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3129                                 const struct cntr_entry *entry,
3130                                 void *context, int vl, int mode, u64 data)
3131 {
3132         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134         return dd->send_dma_err_status_cnt[3];
3135 }
3136
3137 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3138                                 const struct cntr_entry *entry,
3139                                 void *context, int vl, int mode, u64 data)
3140 {
3141         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143         return dd->send_dma_err_status_cnt[2];
3144 }
3145
3146 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3147                                           void *context, int vl, int mode,
3148                                           u64 data)
3149 {
3150         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152         return dd->send_dma_err_status_cnt[1];
3153 }
3154
3155 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3156                                        void *context, int vl, int mode,
3157                                        u64 data)
3158 {
3159         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161         return dd->send_dma_err_status_cnt[0];
3162 }
3163
3164 /*
3165  * Software counters corresponding to each of the
3166  * error status bits within SendEgressErrStatus
3167  */
3168 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3169                                 const struct cntr_entry *entry,
3170                                 void *context, int vl, int mode, u64 data)
3171 {
3172         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173
3174         return dd->send_egress_err_status_cnt[63];
3175 }
3176
3177 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3178                                 const struct cntr_entry *entry,
3179                                 void *context, int vl, int mode, u64 data)
3180 {
3181         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182
3183         return dd->send_egress_err_status_cnt[62];
3184 }
3185
3186 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3187                                              void *context, int vl, int mode,
3188                                              u64 data)
3189 {
3190         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191
3192         return dd->send_egress_err_status_cnt[61];
3193 }
3194
3195 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3196                                                  void *context, int vl,
3197                                                  int mode, u64 data)
3198 {
3199         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200
3201         return dd->send_egress_err_status_cnt[60];
3202 }
3203
3204 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3205                                 const struct cntr_entry *entry,
3206                                 void *context, int vl, int mode, u64 data)
3207 {
3208         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209
3210         return dd->send_egress_err_status_cnt[59];
3211 }
3212
3213 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3214                                         void *context, int vl, int mode,
3215                                         u64 data)
3216 {
3217         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218
3219         return dd->send_egress_err_status_cnt[58];
3220 }
3221
3222 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3223                                             void *context, int vl, int mode,
3224                                             u64 data)
3225 {
3226         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227
3228         return dd->send_egress_err_status_cnt[57];
3229 }
3230
3231 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3232                                               void *context, int vl, int mode,
3233                                               u64 data)
3234 {
3235         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236
3237         return dd->send_egress_err_status_cnt[56];
3238 }
3239
3240 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3241                                               void *context, int vl, int mode,
3242                                               u64 data)
3243 {
3244         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245
3246         return dd->send_egress_err_status_cnt[55];
3247 }
3248
3249 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3250                                               void *context, int vl, int mode,
3251                                               u64 data)
3252 {
3253         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254
3255         return dd->send_egress_err_status_cnt[54];
3256 }
3257
3258 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3259                                               void *context, int vl, int mode,
3260                                               u64 data)
3261 {
3262         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263
3264         return dd->send_egress_err_status_cnt[53];
3265 }
3266
3267 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3268                                               void *context, int vl, int mode,
3269                                               u64 data)
3270 {
3271         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272
3273         return dd->send_egress_err_status_cnt[52];
3274 }
3275
3276 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3277                                               void *context, int vl, int mode,
3278                                               u64 data)
3279 {
3280         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281
3282         return dd->send_egress_err_status_cnt[51];
3283 }
3284
3285 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3286                                               void *context, int vl, int mode,
3287                                               u64 data)
3288 {
3289         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290
3291         return dd->send_egress_err_status_cnt[50];
3292 }
3293
3294 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3295                                               void *context, int vl, int mode,
3296                                               u64 data)
3297 {
3298         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299
3300         return dd->send_egress_err_status_cnt[49];
3301 }
3302
3303 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3304                                               void *context, int vl, int mode,
3305                                               u64 data)
3306 {
3307         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308
3309         return dd->send_egress_err_status_cnt[48];
3310 }
3311
3312 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3313                                               void *context, int vl, int mode,
3314                                               u64 data)
3315 {
3316         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317
3318         return dd->send_egress_err_status_cnt[47];
3319 }
3320
3321 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3322                                             void *context, int vl, int mode,
3323                                             u64 data)
3324 {
3325         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326
3327         return dd->send_egress_err_status_cnt[46];
3328 }
3329
3330 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3331                                              void *context, int vl, int mode,
3332                                              u64 data)
3333 {
3334         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335
3336         return dd->send_egress_err_status_cnt[45];
3337 }
3338
3339 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3340                                                  void *context, int vl,
3341                                                  int mode, u64 data)
3342 {
3343         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344
3345         return dd->send_egress_err_status_cnt[44];
3346 }
3347
3348 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3349                                 const struct cntr_entry *entry,
3350                                 void *context, int vl, int mode, u64 data)
3351 {
3352         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353
3354         return dd->send_egress_err_status_cnt[43];
3355 }
3356
3357 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3358                                         void *context, int vl, int mode,
3359                                         u64 data)
3360 {
3361         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362
3363         return dd->send_egress_err_status_cnt[42];
3364 }
3365
3366 static u64 access_tx_credit_return_partiy_err_cnt(
3367                                 const struct cntr_entry *entry,
3368                                 void *context, int vl, int mode, u64 data)
3369 {
3370         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371
3372         return dd->send_egress_err_status_cnt[41];
3373 }
3374
3375 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3376                                 const struct cntr_entry *entry,
3377                                 void *context, int vl, int mode, u64 data)
3378 {
3379         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380
3381         return dd->send_egress_err_status_cnt[40];
3382 }
3383
3384 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3385                                 const struct cntr_entry *entry,
3386                                 void *context, int vl, int mode, u64 data)
3387 {
3388         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389
3390         return dd->send_egress_err_status_cnt[39];
3391 }
3392
3393 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3394                                 const struct cntr_entry *entry,
3395                                 void *context, int vl, int mode, u64 data)
3396 {
3397         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398
3399         return dd->send_egress_err_status_cnt[38];
3400 }
3401
3402 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3403                                 const struct cntr_entry *entry,
3404                                 void *context, int vl, int mode, u64 data)
3405 {
3406         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407
3408         return dd->send_egress_err_status_cnt[37];
3409 }
3410
3411 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3412                                 const struct cntr_entry *entry,
3413                                 void *context, int vl, int mode, u64 data)
3414 {
3415         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416
3417         return dd->send_egress_err_status_cnt[36];
3418 }
3419
3420 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3421                                 const struct cntr_entry *entry,
3422                                 void *context, int vl, int mode, u64 data)
3423 {
3424         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425
3426         return dd->send_egress_err_status_cnt[35];
3427 }
3428
3429 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3430                                 const struct cntr_entry *entry,
3431                                 void *context, int vl, int mode, u64 data)
3432 {
3433         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434
3435         return dd->send_egress_err_status_cnt[34];
3436 }
3437
3438 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3439                                 const struct cntr_entry *entry,
3440                                 void *context, int vl, int mode, u64 data)
3441 {
3442         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443
3444         return dd->send_egress_err_status_cnt[33];
3445 }
3446
3447 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3448                                 const struct cntr_entry *entry,
3449                                 void *context, int vl, int mode, u64 data)
3450 {
3451         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452
3453         return dd->send_egress_err_status_cnt[32];
3454 }
3455
3456 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3457                                 const struct cntr_entry *entry,
3458                                 void *context, int vl, int mode, u64 data)
3459 {
3460         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461
3462         return dd->send_egress_err_status_cnt[31];
3463 }
3464
3465 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3466                                 const struct cntr_entry *entry,
3467                                 void *context, int vl, int mode, u64 data)
3468 {
3469         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470
3471         return dd->send_egress_err_status_cnt[30];
3472 }
3473
3474 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3475                                 const struct cntr_entry *entry,
3476                                 void *context, int vl, int mode, u64 data)
3477 {
3478         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479
3480         return dd->send_egress_err_status_cnt[29];
3481 }
3482
3483 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3484                                 const struct cntr_entry *entry,
3485                                 void *context, int vl, int mode, u64 data)
3486 {
3487         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488
3489         return dd->send_egress_err_status_cnt[28];
3490 }
3491
3492 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3493                                 const struct cntr_entry *entry,
3494                                 void *context, int vl, int mode, u64 data)
3495 {
3496         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497
3498         return dd->send_egress_err_status_cnt[27];
3499 }
3500
3501 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3502                                 const struct cntr_entry *entry,
3503                                 void *context, int vl, int mode, u64 data)
3504 {
3505         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506
3507         return dd->send_egress_err_status_cnt[26];
3508 }
3509
3510 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3511                                 const struct cntr_entry *entry,
3512                                 void *context, int vl, int mode, u64 data)
3513 {
3514         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515
3516         return dd->send_egress_err_status_cnt[25];
3517 }
3518
3519 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3520                                 const struct cntr_entry *entry,
3521                                 void *context, int vl, int mode, u64 data)
3522 {
3523         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524
3525         return dd->send_egress_err_status_cnt[24];
3526 }
3527
3528 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3529                                 const struct cntr_entry *entry,
3530                                 void *context, int vl, int mode, u64 data)
3531 {
3532         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533
3534         return dd->send_egress_err_status_cnt[23];
3535 }
3536
3537 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3538                                 const struct cntr_entry *entry,
3539                                 void *context, int vl, int mode, u64 data)
3540 {
3541         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542
3543         return dd->send_egress_err_status_cnt[22];
3544 }
3545
3546 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3547                                 const struct cntr_entry *entry,
3548                                 void *context, int vl, int mode, u64 data)
3549 {
3550         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551
3552         return dd->send_egress_err_status_cnt[21];
3553 }
3554
3555 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3556                                 const struct cntr_entry *entry,
3557                                 void *context, int vl, int mode, u64 data)
3558 {
3559         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560
3561         return dd->send_egress_err_status_cnt[20];
3562 }
3563
3564 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3565                                 const struct cntr_entry *entry,
3566                                 void *context, int vl, int mode, u64 data)
3567 {
3568         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569
3570         return dd->send_egress_err_status_cnt[19];
3571 }
3572
3573 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3574                                 const struct cntr_entry *entry,
3575                                 void *context, int vl, int mode, u64 data)
3576 {
3577         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578
3579         return dd->send_egress_err_status_cnt[18];
3580 }
3581
3582 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3583                                 const struct cntr_entry *entry,
3584                                 void *context, int vl, int mode, u64 data)
3585 {
3586         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587
3588         return dd->send_egress_err_status_cnt[17];
3589 }
3590
3591 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3592                                 const struct cntr_entry *entry,
3593                                 void *context, int vl, int mode, u64 data)
3594 {
3595         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596
3597         return dd->send_egress_err_status_cnt[16];
3598 }
3599
3600 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3601                                            void *context, int vl, int mode,
3602                                            u64 data)
3603 {
3604         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605
3606         return dd->send_egress_err_status_cnt[15];
3607 }
3608
3609 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3610                                                  void *context, int vl,
3611                                                  int mode, u64 data)
3612 {
3613         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614
3615         return dd->send_egress_err_status_cnt[14];
3616 }
3617
3618 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3619                                                void *context, int vl, int mode,
3620                                                u64 data)
3621 {
3622         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623
3624         return dd->send_egress_err_status_cnt[13];
3625 }
3626
3627 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3628                                         void *context, int vl, int mode,
3629                                         u64 data)
3630 {
3631         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632
3633         return dd->send_egress_err_status_cnt[12];
3634 }
3635
3636 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3637                                 const struct cntr_entry *entry,
3638                                 void *context, int vl, int mode, u64 data)
3639 {
3640         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641
3642         return dd->send_egress_err_status_cnt[11];
3643 }
3644
3645 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3646                                              void *context, int vl, int mode,
3647                                              u64 data)
3648 {
3649         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650
3651         return dd->send_egress_err_status_cnt[10];
3652 }
3653
3654 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3655                                             void *context, int vl, int mode,
3656                                             u64 data)
3657 {
3658         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659
3660         return dd->send_egress_err_status_cnt[9];
3661 }
3662
3663 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3664                                 const struct cntr_entry *entry,
3665                                 void *context, int vl, int mode, u64 data)
3666 {
3667         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668
3669         return dd->send_egress_err_status_cnt[8];
3670 }
3671
3672 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3673                                 const struct cntr_entry *entry,
3674                                 void *context, int vl, int mode, u64 data)
3675 {
3676         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677
3678         return dd->send_egress_err_status_cnt[7];
3679 }
3680
3681 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3682                                             void *context, int vl, int mode,
3683                                             u64 data)
3684 {
3685         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686
3687         return dd->send_egress_err_status_cnt[6];
3688 }
3689
3690 static u64 access_tx_incorrect_link_state_err_cnt(
3691                                 const struct cntr_entry *entry,
3692                                 void *context, int vl, int mode, u64 data)
3693 {
3694         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695
3696         return dd->send_egress_err_status_cnt[5];
3697 }
3698
3699 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3700                                       void *context, int vl, int mode,
3701                                       u64 data)
3702 {
3703         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704
3705         return dd->send_egress_err_status_cnt[4];
3706 }
3707
3708 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3709                                 const struct cntr_entry *entry,
3710                                 void *context, int vl, int mode, u64 data)
3711 {
3712         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713
3714         return dd->send_egress_err_status_cnt[3];
3715 }
3716
3717 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3718                                             void *context, int vl, int mode,
3719                                             u64 data)
3720 {
3721         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722
3723         return dd->send_egress_err_status_cnt[2];
3724 }
3725
3726 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3727                                 const struct cntr_entry *entry,
3728                                 void *context, int vl, int mode, u64 data)
3729 {
3730         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731
3732         return dd->send_egress_err_status_cnt[1];
3733 }
3734
3735 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3736                                 const struct cntr_entry *entry,
3737                                 void *context, int vl, int mode, u64 data)
3738 {
3739         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741         return dd->send_egress_err_status_cnt[0];
3742 }
3743
3744 /*
3745  * Software counters corresponding to each of the
3746  * error status bits within SendErrStatus
3747  */
3748 static u64 access_send_csr_write_bad_addr_err_cnt(
3749                                 const struct cntr_entry *entry,
3750                                 void *context, int vl, int mode, u64 data)
3751 {
3752         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3753
3754         return dd->send_err_status_cnt[2];
3755 }
3756
3757 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3758                                                  void *context, int vl,
3759                                                  int mode, u64 data)
3760 {
3761         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762
3763         return dd->send_err_status_cnt[1];
3764 }
3765
3766 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3767                                       void *context, int vl, int mode,
3768                                       u64 data)
3769 {
3770         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771
3772         return dd->send_err_status_cnt[0];
3773 }
3774
3775 /*
3776  * Software counters corresponding to each of the
3777  * error status bits within SendCtxtErrStatus
3778  */
3779 static u64 access_pio_write_out_of_bounds_err_cnt(
3780                                 const struct cntr_entry *entry,
3781                                 void *context, int vl, int mode, u64 data)
3782 {
3783         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785         return dd->sw_ctxt_err_status_cnt[4];
3786 }
3787
3788 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3789                                              void *context, int vl, int mode,
3790                                              u64 data)
3791 {
3792         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794         return dd->sw_ctxt_err_status_cnt[3];
3795 }
3796
3797 static u64 access_pio_write_crosses_boundary_err_cnt(
3798                                 const struct cntr_entry *entry,
3799                                 void *context, int vl, int mode, u64 data)
3800 {
3801         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803         return dd->sw_ctxt_err_status_cnt[2];
3804 }
3805
3806 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3807                                                 void *context, int vl,
3808                                                 int mode, u64 data)
3809 {
3810         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812         return dd->sw_ctxt_err_status_cnt[1];
3813 }
3814
3815 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3816                                                void *context, int vl, int mode,
3817                                                u64 data)
3818 {
3819         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821         return dd->sw_ctxt_err_status_cnt[0];
3822 }
3823
3824 /*
3825  * Software counters corresponding to each of the
3826  * error status bits within SendDmaEngErrStatus
3827  */
3828 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3829                                 const struct cntr_entry *entry,
3830                                 void *context, int vl, int mode, u64 data)
3831 {
3832         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3833
3834         return dd->sw_send_dma_eng_err_status_cnt[23];
3835 }
3836
3837 static u64 access_sdma_header_storage_cor_err_cnt(
3838                                 const struct cntr_entry *entry,
3839                                 void *context, int vl, int mode, u64 data)
3840 {
3841         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842
3843         return dd->sw_send_dma_eng_err_status_cnt[22];
3844 }
3845
3846 static u64 access_sdma_packet_tracking_cor_err_cnt(
3847                                 const struct cntr_entry *entry,
3848                                 void *context, int vl, int mode, u64 data)
3849 {
3850         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851
3852         return dd->sw_send_dma_eng_err_status_cnt[21];
3853 }
3854
3855 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3856                                             void *context, int vl, int mode,
3857                                             u64 data)
3858 {
3859         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860
3861         return dd->sw_send_dma_eng_err_status_cnt[20];
3862 }
3863
3864 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3865                                               void *context, int vl, int mode,
3866                                               u64 data)
3867 {
3868         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869
3870         return dd->sw_send_dma_eng_err_status_cnt[19];
3871 }
3872
3873 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3874                                 const struct cntr_entry *entry,
3875                                 void *context, int vl, int mode, u64 data)
3876 {
3877         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878
3879         return dd->sw_send_dma_eng_err_status_cnt[18];
3880 }
3881
3882 static u64 access_sdma_header_storage_unc_err_cnt(
3883                                 const struct cntr_entry *entry,
3884                                 void *context, int vl, int mode, u64 data)
3885 {
3886         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3887
3888         return dd->sw_send_dma_eng_err_status_cnt[17];
3889 }
3890
3891 static u64 access_sdma_packet_tracking_unc_err_cnt(
3892                                 const struct cntr_entry *entry,
3893                                 void *context, int vl, int mode, u64 data)
3894 {
3895         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896
3897         return dd->sw_send_dma_eng_err_status_cnt[16];
3898 }
3899
3900 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3901                                             void *context, int vl, int mode,
3902                                             u64 data)
3903 {
3904         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905
3906         return dd->sw_send_dma_eng_err_status_cnt[15];
3907 }
3908
3909 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3910                                               void *context, int vl, int mode,
3911                                               u64 data)
3912 {
3913         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914
3915         return dd->sw_send_dma_eng_err_status_cnt[14];
3916 }
3917
3918 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3919                                        void *context, int vl, int mode,
3920                                        u64 data)
3921 {
3922         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923
3924         return dd->sw_send_dma_eng_err_status_cnt[13];
3925 }
3926
3927 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3928                                              void *context, int vl, int mode,
3929                                              u64 data)
3930 {
3931         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932
3933         return dd->sw_send_dma_eng_err_status_cnt[12];
3934 }
3935
3936 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3937                                               void *context, int vl, int mode,
3938                                               u64 data)
3939 {
3940         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941
3942         return dd->sw_send_dma_eng_err_status_cnt[11];
3943 }
3944
3945 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3946                                              void *context, int vl, int mode,
3947                                              u64 data)
3948 {
3949         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950
3951         return dd->sw_send_dma_eng_err_status_cnt[10];
3952 }
3953
3954 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3955                                           void *context, int vl, int mode,
3956                                           u64 data)
3957 {
3958         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3959
3960         return dd->sw_send_dma_eng_err_status_cnt[9];
3961 }
3962
3963 static u64 access_sdma_packet_desc_overflow_err_cnt(
3964                                 const struct cntr_entry *entry,
3965                                 void *context, int vl, int mode, u64 data)
3966 {
3967         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3968
3969         return dd->sw_send_dma_eng_err_status_cnt[8];
3970 }
3971
3972 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3973                                                void *context, int vl,
3974                                                int mode, u64 data)
3975 {
3976         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3977
3978         return dd->sw_send_dma_eng_err_status_cnt[7];
3979 }
3980
3981 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3982                                     void *context, int vl, int mode, u64 data)
3983 {
3984         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3985
3986         return dd->sw_send_dma_eng_err_status_cnt[6];
3987 }
3988
3989 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3990                                         void *context, int vl, int mode,
3991                                         u64 data)
3992 {
3993         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3994
3995         return dd->sw_send_dma_eng_err_status_cnt[5];
3996 }
3997
3998 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3999                                           void *context, int vl, int mode,
4000                                           u64 data)
4001 {
4002         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4003
4004         return dd->sw_send_dma_eng_err_status_cnt[4];
4005 }
4006
4007 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4008                                 const struct cntr_entry *entry,
4009                                 void *context, int vl, int mode, u64 data)
4010 {
4011         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4012
4013         return dd->sw_send_dma_eng_err_status_cnt[3];
4014 }
4015
4016 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4017                                         void *context, int vl, int mode,
4018                                         u64 data)
4019 {
4020         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4021
4022         return dd->sw_send_dma_eng_err_status_cnt[2];
4023 }
4024
4025 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4026                                             void *context, int vl, int mode,
4027                                             u64 data)
4028 {
4029         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4030
4031         return dd->sw_send_dma_eng_err_status_cnt[1];
4032 }
4033
4034 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4035                                         void *context, int vl, int mode,
4036                                         u64 data)
4037 {
4038         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4039
4040         return dd->sw_send_dma_eng_err_status_cnt[0];
4041 }
4042
4043 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4044                                  void *context, int vl, int mode,
4045                                  u64 data)
4046 {
4047         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4048
4049         u64 val = 0;
4050         u64 csr = entry->csr;
4051
4052         val = read_write_csr(dd, csr, mode, data);
4053         if (mode == CNTR_MODE_R) {
4054                 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4055                         CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4056         } else if (mode == CNTR_MODE_W) {
4057                 dd->sw_rcv_bypass_packet_errors = 0;
4058         } else {
4059                 dd_dev_err(dd, "Invalid cntr register access mode");
4060                 return 0;
4061         }
4062         return val;
4063 }
4064
4065 #define def_access_sw_cpu(cntr) \
4066 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
4067                               void *context, int vl, int mode, u64 data)      \
4068 {                                                                             \
4069         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4070         return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
4071                               ppd->ibport_data.rvp.cntr, vl,                  \
4072                               mode, data);                                    \
4073 }
4074
4075 def_access_sw_cpu(rc_acks);
4076 def_access_sw_cpu(rc_qacks);
4077 def_access_sw_cpu(rc_delayed_comp);
4078
4079 #define def_access_ibp_counter(cntr) \
4080 static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
4081                                 void *context, int vl, int mode, u64 data)    \
4082 {                                                                             \
4083         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4084                                                                               \
4085         if (vl != CNTR_INVALID_VL)                                            \
4086                 return 0;                                                     \
4087                                                                               \
4088         return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
4089                              mode, data);                                     \
4090 }
4091
4092 def_access_ibp_counter(loop_pkts);
4093 def_access_ibp_counter(rc_resends);
4094 def_access_ibp_counter(rnr_naks);
4095 def_access_ibp_counter(other_naks);
4096 def_access_ibp_counter(rc_timeouts);
4097 def_access_ibp_counter(pkt_drops);
4098 def_access_ibp_counter(dmawait);
4099 def_access_ibp_counter(rc_seqnak);
4100 def_access_ibp_counter(rc_dupreq);
4101 def_access_ibp_counter(rdma_seq);
4102 def_access_ibp_counter(unaligned);
4103 def_access_ibp_counter(seq_naks);
4104
4105 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4106 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4107 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4108                         CNTR_NORMAL),
4109 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4110                         CNTR_NORMAL),
4111 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4112                         RCV_TID_FLOW_GEN_MISMATCH_CNT,
4113                         CNTR_NORMAL),
4114 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4115                         CNTR_NORMAL),
4116 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4117                         RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4118 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4119                         CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4120 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4121                         CNTR_NORMAL),
4122 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4123                         CNTR_NORMAL),
4124 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4125                         CNTR_NORMAL),
4126 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4127                         CNTR_NORMAL),
4128 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4129                         CNTR_NORMAL),
4130 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4131                         CNTR_NORMAL),
4132 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4133                         CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4134 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4135                         CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4136 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4137                               CNTR_SYNTH),
4138 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4139                             access_dc_rcv_err_cnt),
4140 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4141                                  CNTR_SYNTH),
4142 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4143                                   CNTR_SYNTH),
4144 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4145                                   CNTR_SYNTH),
4146 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4147                                    DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4148 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4149                                   DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4150                                   CNTR_SYNTH),
4151 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4152                                 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4153 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4154                                CNTR_SYNTH),
4155 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4156                               CNTR_SYNTH),
4157 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4158                                CNTR_SYNTH),
4159 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4160                                  CNTR_SYNTH),
4161 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4162                                 CNTR_SYNTH),
4163 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4164                                 CNTR_SYNTH),
4165 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4166                                CNTR_SYNTH),
4167 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4168                                  CNTR_SYNTH | CNTR_VL),
4169 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4170                                 CNTR_SYNTH | CNTR_VL),
4171 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4172 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4173                                  CNTR_SYNTH | CNTR_VL),
4174 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4175 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4176                                  CNTR_SYNTH | CNTR_VL),
4177 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4178                               CNTR_SYNTH),
4179 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4180                                  CNTR_SYNTH | CNTR_VL),
4181 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4182                                 CNTR_SYNTH),
4183 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4184                                    CNTR_SYNTH | CNTR_VL),
4185 [C_DC_TOTAL_CRC] =
4186         DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4187                          CNTR_SYNTH),
4188 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4189                                   CNTR_SYNTH),
4190 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4191                                   CNTR_SYNTH),
4192 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4193                                   CNTR_SYNTH),
4194 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4195                                   CNTR_SYNTH),
4196 [C_DC_CRC_MULT_LN] =
4197         DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4198                          CNTR_SYNTH),
4199 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4200                                     CNTR_SYNTH),
4201 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4202                                     CNTR_SYNTH),
4203 [C_DC_SEQ_CRC_CNT] =
4204         DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4205                          CNTR_SYNTH),
4206 [C_DC_ESC0_ONLY_CNT] =
4207         DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4208                          CNTR_SYNTH),
4209 [C_DC_ESC0_PLUS1_CNT] =
4210         DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4211                          CNTR_SYNTH),
4212 [C_DC_ESC0_PLUS2_CNT] =
4213         DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4214                          CNTR_SYNTH),
4215 [C_DC_REINIT_FROM_PEER_CNT] =
4216         DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4217                          CNTR_SYNTH),
4218 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4219                                   CNTR_SYNTH),
4220 [C_DC_MISC_FLG_CNT] =
4221         DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4222                          CNTR_SYNTH),
4223 [C_DC_PRF_GOOD_LTP_CNT] =
4224         DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4225 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4226         DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4227                          CNTR_SYNTH),
4228 [C_DC_PRF_RX_FLIT_CNT] =
4229         DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4230 [C_DC_PRF_TX_FLIT_CNT] =
4231         DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4232 [C_DC_PRF_CLK_CNTR] =
4233         DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4234 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4235         DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4236 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4237         DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4238                          CNTR_SYNTH),
4239 [C_DC_PG_STS_TX_SBE_CNT] =
4240         DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4241 [C_DC_PG_STS_TX_MBE_CNT] =
4242         DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4243                          CNTR_SYNTH),
4244 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4245                             access_sw_cpu_intr),
4246 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4247                             access_sw_cpu_rcv_limit),
4248 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4249                             access_sw_vtx_wait),
4250 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4251                             access_sw_pio_wait),
4252 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4253                             access_sw_pio_drain),
4254 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4255                             access_sw_kmem_wait),
4256 [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4257                             hfi1_access_sw_tid_wait),
4258 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4259                             access_sw_send_schedule),
4260 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4261                                       SEND_DMA_DESC_FETCHED_CNT, 0,
4262                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4263                                       dev_access_u32_csr),
4264 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4265                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4266                              access_sde_int_cnt),
4267 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4268                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4269                              access_sde_err_cnt),
4270 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4271                                   CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4272                                   access_sde_idle_int_cnt),
4273 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4274                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4275                                       access_sde_progress_int_cnt),
4276 /* MISC_ERR_STATUS */
4277 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4278                                 CNTR_NORMAL,
4279                                 access_misc_pll_lock_fail_err_cnt),
4280 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4281                                 CNTR_NORMAL,
4282                                 access_misc_mbist_fail_err_cnt),
4283 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4284                                 CNTR_NORMAL,
4285                                 access_misc_invalid_eep_cmd_err_cnt),
4286 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4287                                 CNTR_NORMAL,
4288                                 access_misc_efuse_done_parity_err_cnt),
4289 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4290                                 CNTR_NORMAL,
4291                                 access_misc_efuse_write_err_cnt),
4292 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4293                                 0, CNTR_NORMAL,
4294                                 access_misc_efuse_read_bad_addr_err_cnt),
4295 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4296                                 CNTR_NORMAL,
4297                                 access_misc_efuse_csr_parity_err_cnt),
4298 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4299                                 CNTR_NORMAL,
4300                                 access_misc_fw_auth_failed_err_cnt),
4301 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4302                                 CNTR_NORMAL,
4303                                 access_misc_key_mismatch_err_cnt),
4304 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4305                                 CNTR_NORMAL,
4306                                 access_misc_sbus_write_failed_err_cnt),
4307 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4308                                 CNTR_NORMAL,
4309                                 access_misc_csr_write_bad_addr_err_cnt),
4310 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4311                                 CNTR_NORMAL,
4312                                 access_misc_csr_read_bad_addr_err_cnt),
4313 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4314                                 CNTR_NORMAL,
4315                                 access_misc_csr_parity_err_cnt),
4316 /* CceErrStatus */
4317 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4318                                 CNTR_NORMAL,
4319                                 access_sw_cce_err_status_aggregated_cnt),
4320 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4321                                 CNTR_NORMAL,
4322                                 access_cce_msix_csr_parity_err_cnt),
4323 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4324                                 CNTR_NORMAL,
4325                                 access_cce_int_map_unc_err_cnt),
4326 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4327                                 CNTR_NORMAL,
4328                                 access_cce_int_map_cor_err_cnt),
4329 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4330                                 CNTR_NORMAL,
4331                                 access_cce_msix_table_unc_err_cnt),
4332 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4333                                 CNTR_NORMAL,
4334                                 access_cce_msix_table_cor_err_cnt),
4335 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4336                                 0, CNTR_NORMAL,
4337                                 access_cce_rxdma_conv_fifo_parity_err_cnt),
4338 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4339                                 0, CNTR_NORMAL,
4340                                 access_cce_rcpl_async_fifo_parity_err_cnt),
4341 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4342                                 CNTR_NORMAL,
4343                                 access_cce_seg_write_bad_addr_err_cnt),
4344 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4345                                 CNTR_NORMAL,
4346                                 access_cce_seg_read_bad_addr_err_cnt),
4347 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4348                                 CNTR_NORMAL,
4349                                 access_la_triggered_cnt),
4350 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4351                                 CNTR_NORMAL,
4352                                 access_cce_trgt_cpl_timeout_err_cnt),
4353 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4354                                 CNTR_NORMAL,
4355                                 access_pcic_receive_parity_err_cnt),
4356 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4357                                 CNTR_NORMAL,
4358                                 access_pcic_transmit_back_parity_err_cnt),
4359 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4360                                 0, CNTR_NORMAL,
4361                                 access_pcic_transmit_front_parity_err_cnt),
4362 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4363                                 CNTR_NORMAL,
4364                                 access_pcic_cpl_dat_q_unc_err_cnt),
4365 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4366                                 CNTR_NORMAL,
4367                                 access_pcic_cpl_hd_q_unc_err_cnt),
4368 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4369                                 CNTR_NORMAL,
4370                                 access_pcic_post_dat_q_unc_err_cnt),
4371 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4372                                 CNTR_NORMAL,
4373                                 access_pcic_post_hd_q_unc_err_cnt),
4374 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4375                                 CNTR_NORMAL,
4376                                 access_pcic_retry_sot_mem_unc_err_cnt),
4377 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4378                                 CNTR_NORMAL,
4379                                 access_pcic_retry_mem_unc_err),
4380 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4381                                 CNTR_NORMAL,
4382                                 access_pcic_n_post_dat_q_parity_err_cnt),
4383 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4384                                 CNTR_NORMAL,
4385                                 access_pcic_n_post_h_q_parity_err_cnt),
4386 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4387                                 CNTR_NORMAL,
4388                                 access_pcic_cpl_dat_q_cor_err_cnt),
4389 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4390                                 CNTR_NORMAL,
4391                                 access_pcic_cpl_hd_q_cor_err_cnt),
4392 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4393                                 CNTR_NORMAL,
4394                                 access_pcic_post_dat_q_cor_err_cnt),
4395 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4396                                 CNTR_NORMAL,
4397                                 access_pcic_post_hd_q_cor_err_cnt),
4398 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4399                                 CNTR_NORMAL,
4400                                 access_pcic_retry_sot_mem_cor_err_cnt),
4401 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4402                                 CNTR_NORMAL,
4403                                 access_pcic_retry_mem_cor_err_cnt),
4404 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4405                                 "CceCli1AsyncFifoDbgParityError", 0, 0,
4406                                 CNTR_NORMAL,
4407                                 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4408 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4409                                 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4410                                 CNTR_NORMAL,
4411                                 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4412                                 ),
4413 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4414                         "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4415                         CNTR_NORMAL,
4416                         access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4417 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4418                         "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4419                         CNTR_NORMAL,
4420                         access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4421 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4422                         0, CNTR_NORMAL,
4423                         access_cce_cli2_async_fifo_parity_err_cnt),
4424 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4425                         CNTR_NORMAL,
4426                         access_cce_csr_cfg_bus_parity_err_cnt),
4427 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4428                         0, CNTR_NORMAL,
4429                         access_cce_cli0_async_fifo_parity_err_cnt),
4430 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4431                         CNTR_NORMAL,
4432                         access_cce_rspd_data_parity_err_cnt),
4433 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4434                         CNTR_NORMAL,
4435                         access_cce_trgt_access_err_cnt),
4436 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4437                         0, CNTR_NORMAL,
4438                         access_cce_trgt_async_fifo_parity_err_cnt),
4439 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4440                         CNTR_NORMAL,
4441                         access_cce_csr_write_bad_addr_err_cnt),
4442 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4443                         CNTR_NORMAL,
4444                         access_cce_csr_read_bad_addr_err_cnt),
4445 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4446                         CNTR_NORMAL,
4447                         access_ccs_csr_parity_err_cnt),
4448
4449 /* RcvErrStatus */
4450 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4451                         CNTR_NORMAL,
4452                         access_rx_csr_parity_err_cnt),
4453 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4454                         CNTR_NORMAL,
4455                         access_rx_csr_write_bad_addr_err_cnt),
4456 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4457                         CNTR_NORMAL,
4458                         access_rx_csr_read_bad_addr_err_cnt),
4459 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4460                         CNTR_NORMAL,
4461                         access_rx_dma_csr_unc_err_cnt),
4462 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4463                         CNTR_NORMAL,
4464                         access_rx_dma_dq_fsm_encoding_err_cnt),
4465 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4466                         CNTR_NORMAL,
4467                         access_rx_dma_eq_fsm_encoding_err_cnt),
4468 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4469                         CNTR_NORMAL,
4470                         access_rx_dma_csr_parity_err_cnt),
4471 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4472                         CNTR_NORMAL,
4473                         access_rx_rbuf_data_cor_err_cnt),
4474 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4475                         CNTR_NORMAL,
4476                         access_rx_rbuf_data_unc_err_cnt),
4477 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4478                         CNTR_NORMAL,
4479                         access_rx_dma_data_fifo_rd_cor_err_cnt),
4480 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4481                         CNTR_NORMAL,
4482                         access_rx_dma_data_fifo_rd_unc_err_cnt),
4483 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4484                         CNTR_NORMAL,
4485                         access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4486 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4487                         CNTR_NORMAL,
4488                         access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4489 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4490                         CNTR_NORMAL,
4491                         access_rx_rbuf_desc_part2_cor_err_cnt),
4492 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4493                         CNTR_NORMAL,
4494                         access_rx_rbuf_desc_part2_unc_err_cnt),
4495 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4496                         CNTR_NORMAL,
4497                         access_rx_rbuf_desc_part1_cor_err_cnt),
4498 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4499                         CNTR_NORMAL,
4500                         access_rx_rbuf_desc_part1_unc_err_cnt),
4501 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4502                         CNTR_NORMAL,
4503                         access_rx_hq_intr_fsm_err_cnt),
4504 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4505                         CNTR_NORMAL,
4506                         access_rx_hq_intr_csr_parity_err_cnt),
4507 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4508                         CNTR_NORMAL,
4509                         access_rx_lookup_csr_parity_err_cnt),
4510 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4511                         CNTR_NORMAL,
4512                         access_rx_lookup_rcv_array_cor_err_cnt),
4513 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4514                         CNTR_NORMAL,
4515                         access_rx_lookup_rcv_array_unc_err_cnt),
4516 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4517                         0, CNTR_NORMAL,
4518                         access_rx_lookup_des_part2_parity_err_cnt),
4519 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4520                         0, CNTR_NORMAL,
4521                         access_rx_lookup_des_part1_unc_cor_err_cnt),
4522 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4523                         CNTR_NORMAL,
4524                         access_rx_lookup_des_part1_unc_err_cnt),
4525 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4526                         CNTR_NORMAL,
4527                         access_rx_rbuf_next_free_buf_cor_err_cnt),
4528 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4529                         CNTR_NORMAL,
4530                         access_rx_rbuf_next_free_buf_unc_err_cnt),
4531 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4532                         "RxRbufFlInitWrAddrParityErr", 0, 0,
4533                         CNTR_NORMAL,
4534                         access_rbuf_fl_init_wr_addr_parity_err_cnt),
4535 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4536                         0, CNTR_NORMAL,
4537                         access_rx_rbuf_fl_initdone_parity_err_cnt),
4538 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4539                         0, CNTR_NORMAL,
4540                         access_rx_rbuf_fl_write_addr_parity_err_cnt),
4541 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4542                         CNTR_NORMAL,
4543                         access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4544 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4545                         CNTR_NORMAL,
4546                         access_rx_rbuf_empty_err_cnt),
4547 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4548                         CNTR_NORMAL,
4549                         access_rx_rbuf_full_err_cnt),
4550 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4551                         CNTR_NORMAL,
4552                         access_rbuf_bad_lookup_err_cnt),
4553 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4554                         CNTR_NORMAL,
4555                         access_rbuf_ctx_id_parity_err_cnt),
4556 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4557                         CNTR_NORMAL,
4558                         access_rbuf_csr_qeopdw_parity_err_cnt),
4559 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4560                         "RxRbufCsrQNumOfPktParityErr", 0, 0,
4561                         CNTR_NORMAL,
4562                         access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4563 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4564                         "RxRbufCsrQTlPtrParityErr", 0, 0,
4565                         CNTR_NORMAL,
4566                         access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4567 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4568                         0, CNTR_NORMAL,
4569                         access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4570 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4571                         0, CNTR_NORMAL,
4572                         access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4573 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4574                         0, 0, CNTR_NORMAL,
4575                         access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4576 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4577                         0, CNTR_NORMAL,
4578                         access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4579 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4580                         "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4581                         CNTR_NORMAL,
4582                         access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4583 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4584                         0, CNTR_NORMAL,
4585                         access_rx_rbuf_block_list_read_cor_err_cnt),
4586 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4587                         0, CNTR_NORMAL,
4588                         access_rx_rbuf_block_list_read_unc_err_cnt),
4589 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4590                         CNTR_NORMAL,
4591                         access_rx_rbuf_lookup_des_cor_err_cnt),
4592 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4593                         CNTR_NORMAL,
4594                         access_rx_rbuf_lookup_des_unc_err_cnt),
4595 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4596                         "RxRbufLookupDesRegUncCorErr", 0, 0,
4597                         CNTR_NORMAL,
4598                         access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4599 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4600                         CNTR_NORMAL,
4601                         access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4602 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4603                         CNTR_NORMAL,
4604                         access_rx_rbuf_free_list_cor_err_cnt),
4605 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4606                         CNTR_NORMAL,
4607                         access_rx_rbuf_free_list_unc_err_cnt),
4608 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4609                         CNTR_NORMAL,
4610                         access_rx_rcv_fsm_encoding_err_cnt),
4611 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4612                         CNTR_NORMAL,
4613                         access_rx_dma_flag_cor_err_cnt),
4614 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4615                         CNTR_NORMAL,
4616                         access_rx_dma_flag_unc_err_cnt),
4617 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4618                         CNTR_NORMAL,
4619                         access_rx_dc_sop_eop_parity_err_cnt),
4620 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4621                         CNTR_NORMAL,
4622                         access_rx_rcv_csr_parity_err_cnt),
4623 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4624                         CNTR_NORMAL,
4625                         access_rx_rcv_qp_map_table_cor_err_cnt),
4626 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4627                         CNTR_NORMAL,
4628                         access_rx_rcv_qp_map_table_unc_err_cnt),
4629 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4630                         CNTR_NORMAL,
4631                         access_rx_rcv_data_cor_err_cnt),
4632 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4633                         CNTR_NORMAL,
4634                         access_rx_rcv_data_unc_err_cnt),
4635 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4636                         CNTR_NORMAL,
4637                         access_rx_rcv_hdr_cor_err_cnt),
4638 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4639                         CNTR_NORMAL,
4640                         access_rx_rcv_hdr_unc_err_cnt),
4641 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4642                         CNTR_NORMAL,
4643                         access_rx_dc_intf_parity_err_cnt),
4644 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4645                         CNTR_NORMAL,
4646                         access_rx_dma_csr_cor_err_cnt),
4647 /* SendPioErrStatus */
4648 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4649                         CNTR_NORMAL,
4650                         access_pio_pec_sop_head_parity_err_cnt),
4651 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4652                         CNTR_NORMAL,
4653                         access_pio_pcc_sop_head_parity_err_cnt),
4654 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4655                         0, 0, CNTR_NORMAL,
4656                         access_pio_last_returned_cnt_parity_err_cnt),
4657 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4658                         0, CNTR_NORMAL,
4659                         access_pio_current_free_cnt_parity_err_cnt),
4660 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4661                         CNTR_NORMAL,
4662                         access_pio_reserved_31_err_cnt),
4663 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4664                         CNTR_NORMAL,
4665                         access_pio_reserved_30_err_cnt),
4666 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4667                         CNTR_NORMAL,
4668                         access_pio_ppmc_sop_len_err_cnt),
4669 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4670                         CNTR_NORMAL,
4671                         access_pio_ppmc_bqc_mem_parity_err_cnt),
4672 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4673                         CNTR_NORMAL,
4674                         access_pio_vl_fifo_parity_err_cnt),
4675 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4676                         CNTR_NORMAL,
4677                         access_pio_vlf_sop_parity_err_cnt),
4678 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4679                         CNTR_NORMAL,
4680                         access_pio_vlf_v1_len_parity_err_cnt),
4681 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4682                         CNTR_NORMAL,
4683                         access_pio_block_qw_count_parity_err_cnt),
4684 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4685                         CNTR_NORMAL,
4686                         access_pio_write_qw_valid_parity_err_cnt),
4687 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4688                         CNTR_NORMAL,
4689                         access_pio_state_machine_err_cnt),
4690 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4691                         CNTR_NORMAL,
4692                         access_pio_write_data_parity_err_cnt),
4693 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4694                         CNTR_NORMAL,
4695                         access_pio_host_addr_mem_cor_err_cnt),
4696 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4697                         CNTR_NORMAL,
4698                         access_pio_host_addr_mem_unc_err_cnt),
4699 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4700                         CNTR_NORMAL,
4701                         access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4702 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4703                         CNTR_NORMAL,
4704                         access_pio_init_sm_in_err_cnt),
4705 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4706                         CNTR_NORMAL,
4707                         access_pio_ppmc_pbl_fifo_err_cnt),
4708 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4709                         0, CNTR_NORMAL,
4710                         access_pio_credit_ret_fifo_parity_err_cnt),
4711 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4712                         CNTR_NORMAL,
4713                         access_pio_v1_len_mem_bank1_cor_err_cnt),
4714 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4715                         CNTR_NORMAL,
4716                         access_pio_v1_len_mem_bank0_cor_err_cnt),
4717 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4718                         CNTR_NORMAL,
4719                         access_pio_v1_len_mem_bank1_unc_err_cnt),
4720 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4721                         CNTR_NORMAL,
4722                         access_pio_v1_len_mem_bank0_unc_err_cnt),
4723 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4724                         CNTR_NORMAL,
4725                         access_pio_sm_pkt_reset_parity_err_cnt),
4726 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4727                         CNTR_NORMAL,
4728                         access_pio_pkt_evict_fifo_parity_err_cnt),
4729 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4730                         "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4731                         CNTR_NORMAL,
4732                         access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4733 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4734                         CNTR_NORMAL,
4735                         access_pio_sbrdctl_crrel_parity_err_cnt),
4736 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4737                         CNTR_NORMAL,
4738                         access_pio_pec_fifo_parity_err_cnt),
4739 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4740                         CNTR_NORMAL,
4741                         access_pio_pcc_fifo_parity_err_cnt),
4742 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4743                         CNTR_NORMAL,
4744                         access_pio_sb_mem_fifo1_err_cnt),
4745 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4746                         CNTR_NORMAL,
4747                         access_pio_sb_mem_fifo0_err_cnt),
4748 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4749                         CNTR_NORMAL,
4750                         access_pio_csr_parity_err_cnt),
4751 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4752                         CNTR_NORMAL,
4753                         access_pio_write_addr_parity_err_cnt),
4754 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4755                         CNTR_NORMAL,
4756                         access_pio_write_bad_ctxt_err_cnt),
4757 /* SendDmaErrStatus */
4758 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4759                         0, CNTR_NORMAL,
4760                         access_sdma_pcie_req_tracking_cor_err_cnt),
4761 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4762                         0, CNTR_NORMAL,
4763                         access_sdma_pcie_req_tracking_unc_err_cnt),
4764 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4765                         CNTR_NORMAL,
4766                         access_sdma_csr_parity_err_cnt),
4767 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4768                         CNTR_NORMAL,
4769                         access_sdma_rpy_tag_err_cnt),
4770 /* SendEgressErrStatus */
4771 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4772                         CNTR_NORMAL,
4773                         access_tx_read_pio_memory_csr_unc_err_cnt),
4774 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4775                         0, CNTR_NORMAL,
4776                         access_tx_read_sdma_memory_csr_err_cnt),
4777 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4778                         CNTR_NORMAL,
4779                         access_tx_egress_fifo_cor_err_cnt),
4780 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4781                         CNTR_NORMAL,
4782                         access_tx_read_pio_memory_cor_err_cnt),
4783 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4784                         CNTR_NORMAL,
4785                         access_tx_read_sdma_memory_cor_err_cnt),
4786 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4787                         CNTR_NORMAL,
4788                         access_tx_sb_hdr_cor_err_cnt),
4789 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4790                         CNTR_NORMAL,
4791                         access_tx_credit_overrun_err_cnt),
4792 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4793                         CNTR_NORMAL,
4794                         access_tx_launch_fifo8_cor_err_cnt),
4795 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4796                         CNTR_NORMAL,
4797                         access_tx_launch_fifo7_cor_err_cnt),
4798 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4799                         CNTR_NORMAL,
4800                         access_tx_launch_fifo6_cor_err_cnt),
4801 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4802                         CNTR_NORMAL,
4803                         access_tx_launch_fifo5_cor_err_cnt),
4804 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4805                         CNTR_NORMAL,
4806                         access_tx_launch_fifo4_cor_err_cnt),
4807 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4808                         CNTR_NORMAL,
4809                         access_tx_launch_fifo3_cor_err_cnt),
4810 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4811                         CNTR_NORMAL,
4812                         access_tx_launch_fifo2_cor_err_cnt),
4813 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4814                         CNTR_NORMAL,
4815                         access_tx_launch_fifo1_cor_err_cnt),
4816 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4817                         CNTR_NORMAL,
4818                         access_tx_launch_fifo0_cor_err_cnt),
4819 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4820                         CNTR_NORMAL,
4821                         access_tx_credit_return_vl_err_cnt),
4822 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4823                         CNTR_NORMAL,
4824                         access_tx_hcrc_insertion_err_cnt),
4825 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4826                         CNTR_NORMAL,
4827                         access_tx_egress_fifo_unc_err_cnt),
4828 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4829                         CNTR_NORMAL,
4830                         access_tx_read_pio_memory_unc_err_cnt),
4831 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4832                         CNTR_NORMAL,
4833                         access_tx_read_sdma_memory_unc_err_cnt),
4834 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4835                         CNTR_NORMAL,
4836                         access_tx_sb_hdr_unc_err_cnt),
4837 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4838                         CNTR_NORMAL,
4839                         access_tx_credit_return_partiy_err_cnt),
4840 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4841                         0, 0, CNTR_NORMAL,
4842                         access_tx_launch_fifo8_unc_or_parity_err_cnt),
4843 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4844                         0, 0, CNTR_NORMAL,
4845                         access_tx_launch_fifo7_unc_or_parity_err_cnt),
4846 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4847                         0, 0, CNTR_NORMAL,
4848                         access_tx_launch_fifo6_unc_or_parity_err_cnt),
4849 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4850                         0, 0, CNTR_NORMAL,
4851                         access_tx_launch_fifo5_unc_or_parity_err_cnt),
4852 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4853                         0, 0, CNTR_NORMAL,
4854                         access_tx_launch_fifo4_unc_or_parity_err_cnt),
4855 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4856                         0, 0, CNTR_NORMAL,
4857                         access_tx_launch_fifo3_unc_or_parity_err_cnt),
4858 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4859                         0, 0, CNTR_NORMAL,
4860                         access_tx_launch_fifo2_unc_or_parity_err_cnt),
4861 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4862                         0, 0, CNTR_NORMAL,
4863                         access_tx_launch_fifo1_unc_or_parity_err_cnt),
4864 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4865                         0, 0, CNTR_NORMAL,
4866                         access_tx_launch_fifo0_unc_or_parity_err_cnt),
4867 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4868                         0, 0, CNTR_NORMAL,
4869                         access_tx_sdma15_disallowed_packet_err_cnt),
4870 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4871                         0, 0, CNTR_NORMAL,
4872                         access_tx_sdma14_disallowed_packet_err_cnt),
4873 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4874                         0, 0, CNTR_NORMAL,
4875                         access_tx_sdma13_disallowed_packet_err_cnt),
4876 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4877                         0, 0, CNTR_NORMAL,
4878                         access_tx_sdma12_disallowed_packet_err_cnt),
4879 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4880                         0, 0, CNTR_NORMAL,
4881                         access_tx_sdma11_disallowed_packet_err_cnt),
4882 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4883                         0, 0, CNTR_NORMAL,
4884                         access_tx_sdma10_disallowed_packet_err_cnt),
4885 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4886                         0, 0, CNTR_NORMAL,
4887                         access_tx_sdma9_disallowed_packet_err_cnt),
4888 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4889                         0, 0, CNTR_NORMAL,
4890                         access_tx_sdma8_disallowed_packet_err_cnt),
4891 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4892                         0, 0, CNTR_NORMAL,
4893                         access_tx_sdma7_disallowed_packet_err_cnt),
4894 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4895                         0, 0, CNTR_NORMAL,
4896                         access_tx_sdma6_disallowed_packet_err_cnt),
4897 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4898                         0, 0, CNTR_NORMAL,
4899                         access_tx_sdma5_disallowed_packet_err_cnt),
4900 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4901                         0, 0, CNTR_NORMAL,
4902                         access_tx_sdma4_disallowed_packet_err_cnt),
4903 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4904                         0, 0, CNTR_NORMAL,
4905                         access_tx_sdma3_disallowed_packet_err_cnt),
4906 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4907                         0, 0, CNTR_NORMAL,
4908                         access_tx_sdma2_disallowed_packet_err_cnt),
4909 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4910                         0, 0, CNTR_NORMAL,
4911                         access_tx_sdma1_disallowed_packet_err_cnt),
4912 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4913                         0, 0, CNTR_NORMAL,
4914                         access_tx_sdma0_disallowed_packet_err_cnt),
4915 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4916                         CNTR_NORMAL,
4917                         access_tx_config_parity_err_cnt),
4918 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4919                         CNTR_NORMAL,
4920                         access_tx_sbrd_ctl_csr_parity_err_cnt),
4921 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4922                         CNTR_NORMAL,
4923                         access_tx_launch_csr_parity_err_cnt),
4924 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4925                         CNTR_NORMAL,
4926                         access_tx_illegal_vl_err_cnt),
4927 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4928                         "TxSbrdCtlStateMachineParityErr", 0, 0,
4929                         CNTR_NORMAL,
4930                         access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4931 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4932                         CNTR_NORMAL,
4933                         access_egress_reserved_10_err_cnt),
4934 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4935                         CNTR_NORMAL,
4936                         access_egress_reserved_9_err_cnt),
4937 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4938                         0, 0, CNTR_NORMAL,
4939                         access_tx_sdma_launch_intf_parity_err_cnt),
4940 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4941                         CNTR_NORMAL,
4942                         access_tx_pio_launch_intf_parity_err_cnt),
4943 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4944                         CNTR_NORMAL,
4945                         access_egress_reserved_6_err_cnt),
4946 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4947                         CNTR_NORMAL,
4948                         access_tx_incorrect_link_state_err_cnt),
4949 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4950                         CNTR_NORMAL,
4951                         access_tx_linkdown_err_cnt),
4952 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4953                         "EgressFifoUnderrunOrParityErr", 0, 0,
4954                         CNTR_NORMAL,
4955                         access_tx_egress_fifi_underrun_or_parity_err_cnt),
4956 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4957                         CNTR_NORMAL,
4958                         access_egress_reserved_2_err_cnt),
4959 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4960                         CNTR_NORMAL,
4961                         access_tx_pkt_integrity_mem_unc_err_cnt),
4962 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4963                         CNTR_NORMAL,
4964                         access_tx_pkt_integrity_mem_cor_err_cnt),
4965 /* SendErrStatus */
4966 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4967                         CNTR_NORMAL,
4968                         access_send_csr_write_bad_addr_err_cnt),
4969 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4970                         CNTR_NORMAL,
4971                         access_send_csr_read_bad_addr_err_cnt),
4972 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4973                         CNTR_NORMAL,
4974                         access_send_csr_parity_cnt),
4975 /* SendCtxtErrStatus */
4976 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4977                         CNTR_NORMAL,
4978                         access_pio_write_out_of_bounds_err_cnt),
4979 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4980                         CNTR_NORMAL,
4981                         access_pio_write_overflow_err_cnt),
4982 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4983                         0, 0, CNTR_NORMAL,
4984                         access_pio_write_crosses_boundary_err_cnt),
4985 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4986                         CNTR_NORMAL,
4987                         access_pio_disallowed_packet_err_cnt),
4988 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4989                         CNTR_NORMAL,
4990                         access_pio_inconsistent_sop_err_cnt),
4991 /* SendDmaEngErrStatus */
4992 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4993                         0, 0, CNTR_NORMAL,
4994                         access_sdma_header_request_fifo_cor_err_cnt),
4995 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4996                         CNTR_NORMAL,
4997                         access_sdma_header_storage_cor_err_cnt),
4998 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4999                         CNTR_NORMAL,
5000                         access_sdma_packet_tracking_cor_err_cnt),
5001 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5002                         CNTR_NORMAL,
5003                         access_sdma_assembly_cor_err_cnt),
5004 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5005                         CNTR_NORMAL,
5006                         access_sdma_desc_table_cor_err_cnt),
5007 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5008                         0, 0, CNTR_NORMAL,
5009                         access_sdma_header_request_fifo_unc_err_cnt),
5010 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5011                         CNTR_NORMAL,
5012                         access_sdma_header_storage_unc_err_cnt),
5013 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5014                         CNTR_NORMAL,
5015                         access_sdma_packet_tracking_unc_err_cnt),
5016 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5017                         CNTR_NORMAL,
5018                         access_sdma_assembly_unc_err_cnt),
5019 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5020                         CNTR_NORMAL,
5021                         access_sdma_desc_table_unc_err_cnt),
5022 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5023                         CNTR_NORMAL,
5024                         access_sdma_timeout_err_cnt),
5025 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5026                         CNTR_NORMAL,
5027                         access_sdma_header_length_err_cnt),
5028 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5029                         CNTR_NORMAL,
5030                         access_sdma_header_address_err_cnt),
5031 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5032                         CNTR_NORMAL,
5033                         access_sdma_header_select_err_cnt),
5034 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5035                         CNTR_NORMAL,
5036                         access_sdma_reserved_9_err_cnt),
5037 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5038                         CNTR_NORMAL,
5039                         access_sdma_packet_desc_overflow_err_cnt),
5040 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5041                         CNTR_NORMAL,
5042                         access_sdma_length_mismatch_err_cnt),
5043 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5044                         CNTR_NORMAL,
5045                         access_sdma_halt_err_cnt),
5046 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5047                         CNTR_NORMAL,
5048                         access_sdma_mem_read_err_cnt),
5049 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5050                         CNTR_NORMAL,
5051                         access_sdma_first_desc_err_cnt),
5052 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5053                         CNTR_NORMAL,
5054                         access_sdma_tail_out_of_bounds_err_cnt),
5055 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5056                         CNTR_NORMAL,
5057                         access_sdma_too_long_err_cnt),
5058 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5059                         CNTR_NORMAL,
5060                         access_sdma_gen_mismatch_err_cnt),
5061 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5062                         CNTR_NORMAL,
5063                         access_sdma_wrong_dw_err_cnt),
5064 };
5065
5066 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5067 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5068                         CNTR_NORMAL),
5069 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5070                         CNTR_NORMAL),
5071 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5072                         CNTR_NORMAL),
5073 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5074                         CNTR_NORMAL),
5075 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5076                         CNTR_NORMAL),
5077 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5078                         CNTR_NORMAL),
5079 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5080                         CNTR_NORMAL),
5081 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5082 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5083 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5084 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5085                                       CNTR_SYNTH | CNTR_VL),
5086 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5087                                      CNTR_SYNTH | CNTR_VL),
5088 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5089                                       CNTR_SYNTH | CNTR_VL),
5090 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5091 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5092 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5093                              access_sw_link_dn_cnt),
5094 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5095                            access_sw_link_up_cnt),
5096 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5097                                  access_sw_unknown_frame_cnt),
5098 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5099                              access_sw_xmit_discards),
5100 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5101                                 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5102                                 access_sw_xmit_discards),
5103 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5104                                  access_xmit_constraint_errs),
5105 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5106                                 access_rcv_constraint_errs),
5107 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5108 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5109 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5110 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5111 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5112 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5113 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5114 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5115 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5116 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5117 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5118 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5119 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5120                                access_sw_cpu_rc_acks),
5121 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5122                                 access_sw_cpu_rc_qacks),
5123 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5124                                        access_sw_cpu_rc_delayed_comp),
5125 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5126 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5127 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5128 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5129 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5130 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5131 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5132 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5133 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5134 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5135 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5136 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5137 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5138 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5139 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5140 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5141 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5142 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5143 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5144 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5145 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5146 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5147 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5148 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5149 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5150 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5151 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5152 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5153 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5154 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5155 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5156 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5157 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5158 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5159 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5160 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5161 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5162 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5163 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5164 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5165 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5166 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5167 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5168 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5169 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5170 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5171 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5172 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5173 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5174 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5175 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5176 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5177 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5178 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5179 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5180 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5181 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5182 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5183 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5184 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5185 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5186 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5187 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5188 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5189 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5190 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5191 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5192 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5193 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5194 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5195 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5196 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5197 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5198 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5199 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5200 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5201 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5202 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5203 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5204 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5205 };
5206
5207 /* ======================================================================== */
5208
5209 /* return true if this is chip revision revision a */
5210 int is_ax(struct hfi1_devdata *dd)
5211 {
5212         u8 chip_rev_minor =
5213                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5214                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5215         return (chip_rev_minor & 0xf0) == 0;
5216 }
5217
5218 /* return true if this is chip revision revision b */
5219 int is_bx(struct hfi1_devdata *dd)
5220 {
5221         u8 chip_rev_minor =
5222                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5223                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5224         return (chip_rev_minor & 0xF0) == 0x10;
5225 }
5226
5227 /* return true is kernel urg disabled for rcd */
5228 bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5229 {
5230         u64 mask;
5231         u32 is = IS_RCVURGENT_START + rcd->ctxt;
5232         u8 bit = is % 64;
5233
5234         mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5235         return !(mask & BIT_ULL(bit));
5236 }
5237
5238 /*
5239  * Append string s to buffer buf.  Arguments curp and len are the current
5240  * position and remaining length, respectively.
5241  *
5242  * return 0 on success, 1 on out of room
5243  */
5244 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5245 {
5246         char *p = *curp;
5247         int len = *lenp;
5248         int result = 0; /* success */
5249         char c;
5250
5251         /* add a comma, if first in the buffer */
5252         if (p != buf) {
5253                 if (len == 0) {
5254                         result = 1; /* out of room */
5255                         goto done;
5256                 }
5257                 *p++ = ',';
5258                 len--;
5259         }
5260
5261         /* copy the string */
5262         while ((c = *s++) != 0) {
5263                 if (len == 0) {
5264                         result = 1; /* out of room */
5265                         goto done;
5266                 }
5267                 *p++ = c;
5268                 len--;
5269         }
5270
5271 done:
5272         /* write return values */
5273         *curp = p;
5274         *lenp = len;
5275
5276         return result;
5277 }
5278
5279 /*
5280  * Using the given flag table, print a comma separated string into
5281  * the buffer.  End in '*' if the buffer is too short.
5282  */
5283 static char *flag_string(char *buf, int buf_len, u64 flags,
5284                          struct flag_table *table, int table_size)
5285 {
5286         char extra[32];
5287         char *p = buf;
5288         int len = buf_len;
5289         int no_room = 0;
5290         int i;
5291
5292         /* make sure there is at least 2 so we can form "*" */
5293         if (len < 2)
5294                 return "";
5295
5296         len--;  /* leave room for a nul */
5297         for (i = 0; i < table_size; i++) {
5298                 if (flags & table[i].flag) {
5299                         no_room = append_str(buf, &p, &len, table[i].str);
5300                         if (no_room)
5301                                 break;
5302                         flags &= ~table[i].flag;
5303                 }
5304         }
5305
5306         /* any undocumented bits left? */
5307         if (!no_room && flags) {
5308                 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5309                 no_room = append_str(buf, &p, &len, extra);
5310         }
5311
5312         /* add * if ran out of room */
5313         if (no_room) {
5314                 /* may need to back up to add space for a '*' */
5315                 if (len == 0)
5316                         --p;
5317                 *p++ = '*';
5318         }
5319
5320         /* add final nul - space already allocated above */
5321         *p = 0;
5322         return buf;
5323 }
5324
5325 /* first 8 CCE error interrupt source names */
5326 static const char * const cce_misc_names[] = {
5327         "CceErrInt",            /* 0 */
5328         "RxeErrInt",            /* 1 */
5329         "MiscErrInt",           /* 2 */
5330         "Reserved3",            /* 3 */
5331         "PioErrInt",            /* 4 */
5332         "SDmaErrInt",           /* 5 */
5333         "EgressErrInt",         /* 6 */
5334         "TxeErrInt"             /* 7 */
5335 };
5336
5337 /*
5338  * Return the miscellaneous error interrupt name.
5339  */
5340 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5341 {
5342         if (source < ARRAY_SIZE(cce_misc_names))
5343                 strncpy(buf, cce_misc_names[source], bsize);
5344         else
5345                 snprintf(buf, bsize, "Reserved%u",
5346                          source + IS_GENERAL_ERR_START);
5347
5348         return buf;
5349 }
5350
5351 /*
5352  * Return the SDMA engine error interrupt name.
5353  */
5354 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5355 {
5356         snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5357         return buf;
5358 }
5359
5360 /*
5361  * Return the send context error interrupt name.
5362  */
5363 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5364 {
5365         snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5366         return buf;
5367 }
5368
5369 static const char * const various_names[] = {
5370         "PbcInt",
5371         "GpioAssertInt",
5372         "Qsfp1Int",
5373         "Qsfp2Int",
5374         "TCritInt"
5375 };
5376
5377 /*
5378  * Return the various interrupt name.
5379  */
5380 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5381 {
5382         if (source < ARRAY_SIZE(various_names))
5383                 strncpy(buf, various_names[source], bsize);
5384         else
5385                 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5386         return buf;
5387 }
5388
5389 /*
5390  * Return the DC interrupt name.
5391  */
5392 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5393 {
5394         static const char * const dc_int_names[] = {
5395                 "common",
5396                 "lcb",
5397                 "8051",
5398                 "lbm"   /* local block merge */
5399         };
5400
5401         if (source < ARRAY_SIZE(dc_int_names))
5402                 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5403         else
5404                 snprintf(buf, bsize, "DCInt%u", source);
5405         return buf;
5406 }
5407
5408 static const char * const sdma_int_names[] = {
5409         "SDmaInt",
5410         "SdmaIdleInt",
5411         "SdmaProgressInt",
5412 };
5413
5414 /*
5415  * Return the SDMA engine interrupt name.
5416  */
5417 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5418 {
5419         /* what interrupt */
5420         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5421         /* which engine */
5422         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5423
5424         if (likely(what < 3))
5425                 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5426         else
5427                 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5428         return buf;
5429 }
5430
5431 /*
5432  * Return the receive available interrupt name.
5433  */
5434 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5435 {
5436         snprintf(buf, bsize, "RcvAvailInt%u", source);
5437         return buf;
5438 }
5439
5440 /*
5441  * Return the receive urgent interrupt name.
5442  */
5443 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5444 {
5445         snprintf(buf, bsize, "RcvUrgentInt%u", source);
5446         return buf;
5447 }
5448
5449 /*
5450  * Return the send credit interrupt name.
5451  */
5452 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5453 {
5454         snprintf(buf, bsize, "SendCreditInt%u", source);
5455         return buf;
5456 }
5457
5458 /*
5459  * Return the reserved interrupt name.
5460  */
5461 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5462 {
5463         snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5464         return buf;
5465 }
5466
5467 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5468 {
5469         return flag_string(buf, buf_len, flags,
5470                            cce_err_status_flags,
5471                            ARRAY_SIZE(cce_err_status_flags));
5472 }
5473
5474 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5475 {
5476         return flag_string(buf, buf_len, flags,
5477                            rxe_err_status_flags,
5478                            ARRAY_SIZE(rxe_err_status_flags));
5479 }
5480
5481 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5482 {
5483         return flag_string(buf, buf_len, flags, misc_err_status_flags,
5484                            ARRAY_SIZE(misc_err_status_flags));
5485 }
5486
5487 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5488 {
5489         return flag_string(buf, buf_len, flags,
5490                            pio_err_status_flags,
5491                            ARRAY_SIZE(pio_err_status_flags));
5492 }
5493
5494 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5495 {
5496         return flag_string(buf, buf_len, flags,
5497                            sdma_err_status_flags,
5498                            ARRAY_SIZE(sdma_err_status_flags));
5499 }
5500
5501 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5502 {
5503         return flag_string(buf, buf_len, flags,
5504                            egress_err_status_flags,
5505                            ARRAY_SIZE(egress_err_status_flags));
5506 }
5507
5508 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5509 {
5510         return flag_string(buf, buf_len, flags,
5511                            egress_err_info_flags,
5512                            ARRAY_SIZE(egress_err_info_flags));
5513 }
5514
5515 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5516 {
5517         return flag_string(buf, buf_len, flags,
5518                            send_err_status_flags,
5519                            ARRAY_SIZE(send_err_status_flags));
5520 }
5521
5522 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5523 {
5524         char buf[96];
5525         int i = 0;
5526
5527         /*
5528          * For most these errors, there is nothing that can be done except
5529          * report or record it.
5530          */
5531         dd_dev_info(dd, "CCE Error: %s\n",
5532                     cce_err_status_string(buf, sizeof(buf), reg));
5533
5534         if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5535             is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5536                 /* this error requires a manual drop into SPC freeze mode */
5537                 /* then a fix up */
5538                 start_freeze_handling(dd->pport, FREEZE_SELF);
5539         }
5540
5541         for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5542                 if (reg & (1ull << i)) {
5543                         incr_cntr64(&dd->cce_err_status_cnt[i]);
5544                         /* maintain a counter over all cce_err_status errors */
5545                         incr_cntr64(&dd->sw_cce_err_status_aggregate);
5546                 }
5547         }
5548 }
5549
5550 /*
5551  * Check counters for receive errors that do not have an interrupt
5552  * associated with them.
5553  */
5554 #define RCVERR_CHECK_TIME 10
5555 static void update_rcverr_timer(struct timer_list *t)
5556 {
5557         struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5558         struct hfi1_pportdata *ppd = dd->pport;
5559         u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5560
5561         if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5562             ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5563                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5564                 set_link_down_reason(
5565                 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5566                 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5567                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5568         }
5569         dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5570
5571         mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5572 }
5573
5574 static int init_rcverr(struct hfi1_devdata *dd)
5575 {
5576         timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5577         /* Assume the hardware counter has been reset */
5578         dd->rcv_ovfl_cnt = 0;
5579         return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5580 }
5581
5582 static void free_rcverr(struct hfi1_devdata *dd)
5583 {
5584         if (dd->rcverr_timer.function)
5585                 del_timer_sync(&dd->rcverr_timer);
5586 }
5587
5588 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5589 {
5590         char buf[96];
5591         int i = 0;
5592
5593         dd_dev_info(dd, "Receive Error: %s\n",
5594                     rxe_err_status_string(buf, sizeof(buf), reg));
5595
5596         if (reg & ALL_RXE_FREEZE_ERR) {
5597                 int flags = 0;
5598
5599                 /*
5600                  * Freeze mode recovery is disabled for the errors
5601                  * in RXE_FREEZE_ABORT_MASK
5602                  */
5603                 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5604                         flags = FREEZE_ABORT;
5605
5606                 start_freeze_handling(dd->pport, flags);
5607         }
5608
5609         for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5610                 if (reg & (1ull << i))
5611                         incr_cntr64(&dd->rcv_err_status_cnt[i]);
5612         }
5613 }
5614
5615 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5616 {
5617         char buf[96];
5618         int i = 0;
5619
5620         dd_dev_info(dd, "Misc Error: %s",
5621                     misc_err_status_string(buf, sizeof(buf), reg));
5622         for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5623                 if (reg & (1ull << i))
5624                         incr_cntr64(&dd->misc_err_status_cnt[i]);
5625         }
5626 }
5627
5628 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5629 {
5630         char buf[96];
5631         int i = 0;
5632
5633         dd_dev_info(dd, "PIO Error: %s\n",
5634                     pio_err_status_string(buf, sizeof(buf), reg));
5635
5636         if (reg & ALL_PIO_FREEZE_ERR)
5637                 start_freeze_handling(dd->pport, 0);
5638
5639         for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5640                 if (reg & (1ull << i))
5641                         incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5642         }
5643 }
5644
5645 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5646 {
5647         char buf[96];
5648         int i = 0;
5649
5650         dd_dev_info(dd, "SDMA Error: %s\n",
5651                     sdma_err_status_string(buf, sizeof(buf), reg));
5652
5653         if (reg & ALL_SDMA_FREEZE_ERR)
5654                 start_freeze_handling(dd->pport, 0);
5655
5656         for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5657                 if (reg & (1ull << i))
5658                         incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5659         }
5660 }
5661
5662 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5663 {
5664         incr_cntr64(&ppd->port_xmit_discards);
5665 }
5666
5667 static void count_port_inactive(struct hfi1_devdata *dd)
5668 {
5669         __count_port_discards(dd->pport);
5670 }
5671
5672 /*
5673  * We have had a "disallowed packet" error during egress. Determine the
5674  * integrity check which failed, and update relevant error counter, etc.
5675  *
5676  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5677  * bit of state per integrity check, and so we can miss the reason for an
5678  * egress error if more than one packet fails the same integrity check
5679  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5680  */
5681 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5682                                         int vl)
5683 {
5684         struct hfi1_pportdata *ppd = dd->pport;
5685         u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5686         u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5687         char buf[96];
5688
5689         /* clear down all observed info as quickly as possible after read */
5690         write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5691
5692         dd_dev_info(dd,
5693                     "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5694                     info, egress_err_info_string(buf, sizeof(buf), info), src);
5695
5696         /* Eventually add other counters for each bit */
5697         if (info & PORT_DISCARD_EGRESS_ERRS) {
5698                 int weight, i;
5699
5700                 /*
5701                  * Count all applicable bits as individual errors and
5702                  * attribute them to the packet that triggered this handler.
5703                  * This may not be completely accurate due to limitations
5704                  * on the available hardware error information.  There is
5705                  * a single information register and any number of error
5706                  * packets may have occurred and contributed to it before
5707                  * this routine is called.  This means that:
5708                  * a) If multiple packets with the same error occur before
5709                  *    this routine is called, earlier packets are missed.
5710                  *    There is only a single bit for each error type.
5711                  * b) Errors may not be attributed to the correct VL.
5712                  *    The driver is attributing all bits in the info register
5713                  *    to the packet that triggered this call, but bits
5714                  *    could be an accumulation of different packets with
5715                  *    different VLs.
5716                  * c) A single error packet may have multiple counts attached
5717                  *    to it.  There is no way for the driver to know if
5718                  *    multiple bits set in the info register are due to a
5719                  *    single packet or multiple packets.  The driver assumes
5720                  *    multiple packets.
5721                  */
5722                 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5723                 for (i = 0; i < weight; i++) {
5724                         __count_port_discards(ppd);
5725                         if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5726                                 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5727                         else if (vl == 15)
5728                                 incr_cntr64(&ppd->port_xmit_discards_vl
5729                                             [C_VL_15]);
5730                 }
5731         }
5732 }
5733
5734 /*
5735  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5736  * register. Does it represent a 'port inactive' error?
5737  */
5738 static inline int port_inactive_err(u64 posn)
5739 {
5740         return (posn >= SEES(TX_LINKDOWN) &&
5741                 posn <= SEES(TX_INCORRECT_LINK_STATE));
5742 }
5743
5744 /*
5745  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5746  * register. Does it represent a 'disallowed packet' error?
5747  */
5748 static inline int disallowed_pkt_err(int posn)
5749 {
5750         return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5751                 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5752 }
5753
5754 /*
5755  * Input value is a bit position of one of the SDMA engine disallowed
5756  * packet errors.  Return which engine.  Use of this must be guarded by
5757  * disallowed_pkt_err().
5758  */
5759 static inline int disallowed_pkt_engine(int posn)
5760 {
5761         return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5762 }
5763
5764 /*
5765  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5766  * be done.
5767  */
5768 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5769 {
5770         struct sdma_vl_map *m;
5771         int vl;
5772
5773         /* range check */
5774         if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5775                 return -1;
5776
5777         rcu_read_lock();
5778         m = rcu_dereference(dd->sdma_map);
5779         vl = m->engine_to_vl[engine];
5780         rcu_read_unlock();
5781
5782         return vl;
5783 }
5784
5785 /*
5786  * Translate the send context (sofware index) into a VL.  Return -1 if the
5787  * translation cannot be done.
5788  */
5789 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5790 {
5791         struct send_context_info *sci;
5792         struct send_context *sc;
5793         int i;
5794
5795         sci = &dd->send_contexts[sw_index];
5796
5797         /* there is no information for user (PSM) and ack contexts */
5798         if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5799                 return -1;
5800
5801         sc = sci->sc;
5802         if (!sc)
5803                 return -1;
5804         if (dd->vld[15].sc == sc)
5805                 return 15;
5806         for (i = 0; i < num_vls; i++)
5807                 if (dd->vld[i].sc == sc)
5808                         return i;
5809
5810         return -1;
5811 }
5812
5813 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5814 {
5815         u64 reg_copy = reg, handled = 0;
5816         char buf[96];
5817         int i = 0;
5818
5819         if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5820                 start_freeze_handling(dd->pport, 0);
5821         else if (is_ax(dd) &&
5822                  (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5823                  (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5824                 start_freeze_handling(dd->pport, 0);
5825
5826         while (reg_copy) {
5827                 int posn = fls64(reg_copy);
5828                 /* fls64() returns a 1-based offset, we want it zero based */
5829                 int shift = posn - 1;
5830                 u64 mask = 1ULL << shift;
5831
5832                 if (port_inactive_err(shift)) {
5833                         count_port_inactive(dd);
5834                         handled |= mask;
5835                 } else if (disallowed_pkt_err(shift)) {
5836                         int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5837
5838                         handle_send_egress_err_info(dd, vl);
5839                         handled |= mask;
5840                 }
5841                 reg_copy &= ~mask;
5842         }
5843
5844         reg &= ~handled;
5845
5846         if (reg)
5847                 dd_dev_info(dd, "Egress Error: %s\n",
5848                             egress_err_status_string(buf, sizeof(buf), reg));
5849
5850         for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5851                 if (reg & (1ull << i))
5852                         incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5853         }
5854 }
5855
5856 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5857 {
5858         char buf[96];
5859         int i = 0;
5860
5861         dd_dev_info(dd, "Send Error: %s\n",
5862                     send_err_status_string(buf, sizeof(buf), reg));
5863
5864         for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5865                 if (reg & (1ull << i))
5866                         incr_cntr64(&dd->send_err_status_cnt[i]);
5867         }
5868 }
5869
5870 /*
5871  * The maximum number of times the error clear down will loop before
5872  * blocking a repeating error.  This value is arbitrary.
5873  */
5874 #define MAX_CLEAR_COUNT 20
5875
5876 /*
5877  * Clear and handle an error register.  All error interrupts are funneled
5878  * through here to have a central location to correctly handle single-
5879  * or multi-shot errors.
5880  *
5881  * For non per-context registers, call this routine with a context value
5882  * of 0 so the per-context offset is zero.
5883  *
5884  * If the handler loops too many times, assume that something is wrong
5885  * and can't be fixed, so mask the error bits.
5886  */
5887 static void interrupt_clear_down(struct hfi1_devdata *dd,
5888                                  u32 context,
5889                                  const struct err_reg_info *eri)
5890 {
5891         u64 reg;
5892         u32 count;
5893
5894         /* read in a loop until no more errors are seen */
5895         count = 0;
5896         while (1) {
5897                 reg = read_kctxt_csr(dd, context, eri->status);
5898                 if (reg == 0)
5899                         break;
5900                 write_kctxt_csr(dd, context, eri->clear, reg);
5901                 if (likely(eri->handler))
5902                         eri->handler(dd, context, reg);
5903                 count++;
5904                 if (count > MAX_CLEAR_COUNT) {
5905                         u64 mask;
5906
5907                         dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5908                                    eri->desc, reg);
5909                         /*
5910                          * Read-modify-write so any other masked bits
5911                          * remain masked.
5912                          */
5913                         mask = read_kctxt_csr(dd, context, eri->mask);
5914                         mask &= ~reg;
5915                         write_kctxt_csr(dd, context, eri->mask, mask);
5916                         break;
5917                 }
5918         }
5919 }
5920
5921 /*
5922  * CCE block "misc" interrupt.  Source is < 16.
5923  */
5924 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5925 {
5926         const struct err_reg_info *eri = &misc_errs[source];
5927
5928         if (eri->handler) {
5929                 interrupt_clear_down(dd, 0, eri);
5930         } else {
5931                 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5932                            source);
5933         }
5934 }
5935
5936 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5937 {
5938         return flag_string(buf, buf_len, flags,
5939                            sc_err_status_flags,
5940                            ARRAY_SIZE(sc_err_status_flags));
5941 }
5942
5943 /*
5944  * Send context error interrupt.  Source (hw_context) is < 160.
5945  *
5946  * All send context errors cause the send context to halt.  The normal
5947  * clear-down mechanism cannot be used because we cannot clear the
5948  * error bits until several other long-running items are done first.
5949  * This is OK because with the context halted, nothing else is going
5950  * to happen on it anyway.
5951  */
5952 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5953                                 unsigned int hw_context)
5954 {
5955         struct send_context_info *sci;
5956         struct send_context *sc;
5957         char flags[96];
5958         u64 status;
5959         u32 sw_index;
5960         int i = 0;
5961         unsigned long irq_flags;
5962
5963         sw_index = dd->hw_to_sw[hw_context];
5964         if (sw_index >= dd->num_send_contexts) {
5965                 dd_dev_err(dd,
5966                            "out of range sw index %u for send context %u\n",
5967                            sw_index, hw_context);
5968                 return;
5969         }
5970         sci = &dd->send_contexts[sw_index];
5971         spin_lock_irqsave(&dd->sc_lock, irq_flags);
5972         sc = sci->sc;
5973         if (!sc) {
5974                 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5975                            sw_index, hw_context);
5976                 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5977                 return;
5978         }
5979
5980         /* tell the software that a halt has begun */
5981         sc_stop(sc, SCF_HALTED);
5982
5983         status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5984
5985         dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5986                     send_context_err_status_string(flags, sizeof(flags),
5987                                                    status));
5988
5989         if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5990                 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5991
5992         /*
5993          * Automatically restart halted kernel contexts out of interrupt
5994          * context.  User contexts must ask the driver to restart the context.
5995          */
5996         if (sc->type != SC_USER)
5997                 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5998         spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5999
6000         /*
6001          * Update the counters for the corresponding status bits.
6002          * Note that these particular counters are aggregated over all
6003          * 160 contexts.
6004          */
6005         for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6006                 if (status & (1ull << i))
6007                         incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6008         }
6009 }
6010
6011 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6012                                 unsigned int source, u64 status)
6013 {
6014         struct sdma_engine *sde;
6015         int i = 0;
6016
6017         sde = &dd->per_sdma[source];
6018 #ifdef CONFIG_SDMA_VERBOSITY
6019         dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6020                    slashstrip(__FILE__), __LINE__, __func__);
6021         dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6022                    sde->this_idx, source, (unsigned long long)status);
6023 #endif
6024         sde->err_cnt++;
6025         sdma_engine_error(sde, status);
6026
6027         /*
6028         * Update the counters for the corresponding status bits.
6029         * Note that these particular counters are aggregated over
6030         * all 16 DMA engines.
6031         */
6032         for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6033                 if (status & (1ull << i))
6034                         incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6035         }
6036 }
6037
6038 /*
6039  * CCE block SDMA error interrupt.  Source is < 16.
6040  */
6041 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6042 {
6043 #ifdef CONFIG_SDMA_VERBOSITY
6044         struct sdma_engine *sde = &dd->per_sdma[source];
6045
6046         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6047                    slashstrip(__FILE__), __LINE__, __func__);
6048         dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6049                    source);
6050         sdma_dumpstate(sde);
6051 #endif
6052         interrupt_clear_down(dd, source, &sdma_eng_err);
6053 }
6054
6055 /*
6056  * CCE block "various" interrupt.  Source is < 8.
6057  */
6058 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6059 {
6060         const struct err_reg_info *eri = &various_err[source];
6061
6062         /*
6063          * TCritInt cannot go through interrupt_clear_down()
6064          * because it is not a second tier interrupt. The handler
6065          * should be called directly.
6066          */
6067         if (source == TCRIT_INT_SOURCE)
6068                 handle_temp_err(dd);
6069         else if (eri->handler)
6070                 interrupt_clear_down(dd, 0, eri);
6071         else
6072                 dd_dev_info(dd,
6073                             "%s: Unimplemented/reserved interrupt %d\n",
6074                             __func__, source);
6075 }
6076
6077 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6078 {
6079         /* src_ctx is always zero */
6080         struct hfi1_pportdata *ppd = dd->pport;
6081         unsigned long flags;
6082         u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6083
6084         if (reg & QSFP_HFI0_MODPRST_N) {
6085                 if (!qsfp_mod_present(ppd)) {
6086                         dd_dev_info(dd, "%s: QSFP module removed\n",
6087                                     __func__);
6088
6089                         ppd->driver_link_ready = 0;
6090                         /*
6091                          * Cable removed, reset all our information about the
6092                          * cache and cable capabilities
6093                          */
6094
6095                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6096                         /*
6097                          * We don't set cache_refresh_required here as we expect
6098                          * an interrupt when a cable is inserted
6099                          */
6100                         ppd->qsfp_info.cache_valid = 0;
6101                         ppd->qsfp_info.reset_needed = 0;
6102                         ppd->qsfp_info.limiting_active = 0;
6103                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6104                                                flags);
6105                         /* Invert the ModPresent pin now to detect plug-in */
6106                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6107                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6108
6109                         if ((ppd->offline_disabled_reason >
6110                           HFI1_ODR_MASK(
6111                           OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6112                           (ppd->offline_disabled_reason ==
6113                           HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6114                                 ppd->offline_disabled_reason =
6115                                 HFI1_ODR_MASK(
6116                                 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6117
6118                         if (ppd->host_link_state == HLS_DN_POLL) {
6119                                 /*
6120                                  * The link is still in POLL. This means
6121                                  * that the normal link down processing
6122                                  * will not happen. We have to do it here
6123                                  * before turning the DC off.
6124                                  */
6125                                 queue_work(ppd->link_wq, &ppd->link_down_work);
6126                         }
6127                 } else {
6128                         dd_dev_info(dd, "%s: QSFP module inserted\n",
6129                                     __func__);
6130
6131                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6132                         ppd->qsfp_info.cache_valid = 0;
6133                         ppd->qsfp_info.cache_refresh_required = 1;
6134                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6135                                                flags);
6136
6137                         /*
6138                          * Stop inversion of ModPresent pin to detect
6139                          * removal of the cable
6140                          */
6141                         qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6142                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6143                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6144
6145                         ppd->offline_disabled_reason =
6146                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6147                 }
6148         }
6149
6150         if (reg & QSFP_HFI0_INT_N) {
6151                 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6152                             __func__);
6153                 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6154                 ppd->qsfp_info.check_interrupt_flags = 1;
6155                 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6156         }
6157
6158         /* Schedule the QSFP work only if there is a cable attached. */
6159         if (qsfp_mod_present(ppd))
6160                 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6161 }
6162
6163 static int request_host_lcb_access(struct hfi1_devdata *dd)
6164 {
6165         int ret;
6166
6167         ret = do_8051_command(dd, HCMD_MISC,
6168                               (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6169                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6170         if (ret != HCMD_SUCCESS) {
6171                 dd_dev_err(dd, "%s: command failed with error %d\n",
6172                            __func__, ret);
6173         }
6174         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6175 }
6176
6177 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6178 {
6179         int ret;
6180
6181         ret = do_8051_command(dd, HCMD_MISC,
6182                               (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6183                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6184         if (ret != HCMD_SUCCESS) {
6185                 dd_dev_err(dd, "%s: command failed with error %d\n",
6186                            __func__, ret);
6187         }
6188         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6189 }
6190
6191 /*
6192  * Set the LCB selector - allow host access.  The DCC selector always
6193  * points to the host.
6194  */
6195 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6196 {
6197         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6198                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6199                   DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6200 }
6201
6202 /*
6203  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6204  * points to the host.
6205  */
6206 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6207 {
6208         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6209                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6210 }
6211
6212 /*
6213  * Acquire LCB access from the 8051.  If the host already has access,
6214  * just increment a counter.  Otherwise, inform the 8051 that the
6215  * host is taking access.
6216  *
6217  * Returns:
6218  *      0 on success
6219  *      -EBUSY if the 8051 has control and cannot be disturbed
6220  *      -errno if unable to acquire access from the 8051
6221  */
6222 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6223 {
6224         struct hfi1_pportdata *ppd = dd->pport;
6225         int ret = 0;
6226
6227         /*
6228          * Use the host link state lock so the operation of this routine
6229          * { link state check, selector change, count increment } can occur
6230          * as a unit against a link state change.  Otherwise there is a
6231          * race between the state change and the count increment.
6232          */
6233         if (sleep_ok) {
6234                 mutex_lock(&ppd->hls_lock);
6235         } else {
6236                 while (!mutex_trylock(&ppd->hls_lock))
6237                         udelay(1);
6238         }
6239
6240         /* this access is valid only when the link is up */
6241         if (ppd->host_link_state & HLS_DOWN) {
6242                 dd_dev_info(dd, "%s: link state %s not up\n",
6243                             __func__, link_state_name(ppd->host_link_state));
6244                 ret = -EBUSY;
6245                 goto done;
6246         }
6247
6248         if (dd->lcb_access_count == 0) {
6249                 ret = request_host_lcb_access(dd);
6250                 if (ret) {
6251                         dd_dev_err(dd,
6252                                    "%s: unable to acquire LCB access, err %d\n",
6253                                    __func__, ret);
6254                         goto done;
6255                 }
6256                 set_host_lcb_access(dd);
6257         }
6258         dd->lcb_access_count++;
6259 done:
6260         mutex_unlock(&ppd->hls_lock);
6261         return ret;
6262 }
6263
6264 /*
6265  * Release LCB access by decrementing the use count.  If the count is moving
6266  * from 1 to 0, inform 8051 that it has control back.
6267  *
6268  * Returns:
6269  *      0 on success
6270  *      -errno if unable to release access to the 8051
6271  */
6272 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6273 {
6274         int ret = 0;
6275
6276         /*
6277          * Use the host link state lock because the acquire needed it.
6278          * Here, we only need to keep { selector change, count decrement }
6279          * as a unit.
6280          */
6281         if (sleep_ok) {
6282                 mutex_lock(&dd->pport->hls_lock);
6283         } else {
6284                 while (!mutex_trylock(&dd->pport->hls_lock))
6285                         udelay(1);
6286         }
6287
6288         if (dd->lcb_access_count == 0) {
6289                 dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6290                            __func__);
6291                 goto done;
6292         }
6293
6294         if (dd->lcb_access_count == 1) {
6295                 set_8051_lcb_access(dd);
6296                 ret = request_8051_lcb_access(dd);
6297                 if (ret) {
6298                         dd_dev_err(dd,
6299                                    "%s: unable to release LCB access, err %d\n",
6300                                    __func__, ret);
6301                         /* restore host access if the grant didn't work */
6302                         set_host_lcb_access(dd);
6303                         goto done;
6304                 }
6305         }
6306         dd->lcb_access_count--;
6307 done:
6308         mutex_unlock(&dd->pport->hls_lock);
6309         return ret;
6310 }
6311
6312 /*
6313  * Initialize LCB access variables and state.  Called during driver load,
6314  * after most of the initialization is finished.
6315  *
6316  * The DC default is LCB access on for the host.  The driver defaults to
6317  * leaving access to the 8051.  Assign access now - this constrains the call
6318  * to this routine to be after all LCB set-up is done.  In particular, after
6319  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6320  */
6321 static void init_lcb_access(struct hfi1_devdata *dd)
6322 {
6323         dd->lcb_access_count = 0;
6324 }
6325
6326 /*
6327  * Write a response back to a 8051 request.
6328  */
6329 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6330 {
6331         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6332                   DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6333                   (u64)return_code <<
6334                   DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6335                   (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6336 }
6337
6338 /*
6339  * Handle host requests from the 8051.
6340  */
6341 static void handle_8051_request(struct hfi1_pportdata *ppd)
6342 {
6343         struct hfi1_devdata *dd = ppd->dd;
6344         u64 reg;
6345         u16 data = 0;
6346         u8 type;
6347
6348         reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6349         if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6350                 return; /* no request */
6351
6352         /* zero out COMPLETED so the response is seen */
6353         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6354
6355         /* extract request details */
6356         type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6357                         & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6358         data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6359                         & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6360
6361         switch (type) {
6362         case HREQ_LOAD_CONFIG:
6363         case HREQ_SAVE_CONFIG:
6364         case HREQ_READ_CONFIG:
6365         case HREQ_SET_TX_EQ_ABS:
6366         case HREQ_SET_TX_EQ_REL:
6367         case HREQ_ENABLE:
6368                 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6369                             type);
6370                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6371                 break;
6372         case HREQ_LCB_RESET:
6373                 /* Put the LCB, RX FPE and TX FPE into reset */
6374                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6375                 /* Make sure the write completed */
6376                 (void)read_csr(dd, DCC_CFG_RESET);
6377                 /* Hold the reset long enough to take effect */
6378                 udelay(1);
6379                 /* Take the LCB, RX FPE and TX FPE out of reset */
6380                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6381                 hreq_response(dd, HREQ_SUCCESS, 0);
6382
6383                 break;
6384         case HREQ_CONFIG_DONE:
6385                 hreq_response(dd, HREQ_SUCCESS, 0);
6386                 break;
6387
6388         case HREQ_INTERFACE_TEST:
6389                 hreq_response(dd, HREQ_SUCCESS, data);
6390                 break;
6391         default:
6392                 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6393                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6394                 break;
6395         }
6396 }
6397
6398 /*
6399  * Set up allocation unit vaulue.
6400  */
6401 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6402 {
6403         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6404
6405         /* do not modify other values in the register */
6406         reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6407         reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6408         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6409 }
6410
6411 /*
6412  * Set up initial VL15 credits of the remote.  Assumes the rest of
6413  * the CM credit registers are zero from a previous global or credit reset.
6414  * Shared limit for VL15 will always be 0.
6415  */
6416 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6417 {
6418         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6419
6420         /* set initial values for total and shared credit limit */
6421         reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6422                  SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6423
6424         /*
6425          * Set total limit to be equal to VL15 credits.
6426          * Leave shared limit at 0.
6427          */
6428         reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6429         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6430
6431         write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6432                   << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6433 }
6434
6435 /*
6436  * Zero all credit details from the previous connection and
6437  * reset the CM manager's internal counters.
6438  */
6439 void reset_link_credits(struct hfi1_devdata *dd)
6440 {
6441         int i;
6442
6443         /* remove all previous VL credit limits */
6444         for (i = 0; i < TXE_NUM_DATA_VL; i++)
6445                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6446         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6447         write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6448         /* reset the CM block */
6449         pio_send_control(dd, PSC_CM_RESET);
6450         /* reset cached value */
6451         dd->vl15buf_cached = 0;
6452 }
6453
6454 /* convert a vCU to a CU */
6455 static u32 vcu_to_cu(u8 vcu)
6456 {
6457         return 1 << vcu;
6458 }
6459
6460 /* convert a CU to a vCU */
6461 static u8 cu_to_vcu(u32 cu)
6462 {
6463         return ilog2(cu);
6464 }
6465
6466 /* convert a vAU to an AU */
6467 static u32 vau_to_au(u8 vau)
6468 {
6469         return 8 * (1 << vau);
6470 }
6471
6472 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6473 {
6474         ppd->sm_trap_qp = 0x0;
6475         ppd->sa_qp = 0x1;
6476 }
6477
6478 /*
6479  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6480  */
6481 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6482 {
6483         u64 reg;
6484
6485         /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6486         write_csr(dd, DC_LCB_CFG_RUN, 0);
6487         /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6488         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6489                   1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6490         /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6491         dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6492         reg = read_csr(dd, DCC_CFG_RESET);
6493         write_csr(dd, DCC_CFG_RESET, reg |
6494                   DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6495         (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6496         if (!abort) {
6497                 udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6498                 write_csr(dd, DCC_CFG_RESET, reg);
6499                 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6500         }
6501 }
6502
6503 /*
6504  * This routine should be called after the link has been transitioned to
6505  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6506  * reset).
6507  *
6508  * The expectation is that the caller of this routine would have taken
6509  * care of properly transitioning the link into the correct state.
6510  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6511  *       before calling this function.
6512  */
6513 static void _dc_shutdown(struct hfi1_devdata *dd)
6514 {
6515         lockdep_assert_held(&dd->dc8051_lock);
6516
6517         if (dd->dc_shutdown)
6518                 return;
6519
6520         dd->dc_shutdown = 1;
6521         /* Shutdown the LCB */
6522         lcb_shutdown(dd, 1);
6523         /*
6524          * Going to OFFLINE would have causes the 8051 to put the
6525          * SerDes into reset already. Just need to shut down the 8051,
6526          * itself.
6527          */
6528         write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6529 }
6530
6531 static void dc_shutdown(struct hfi1_devdata *dd)
6532 {
6533         mutex_lock(&dd->dc8051_lock);
6534         _dc_shutdown(dd);
6535         mutex_unlock(&dd->dc8051_lock);
6536 }
6537
6538 /*
6539  * Calling this after the DC has been brought out of reset should not
6540  * do any damage.
6541  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6542  *       before calling this function.
6543  */
6544 static void _dc_start(struct hfi1_devdata *dd)
6545 {
6546         lockdep_assert_held(&dd->dc8051_lock);
6547
6548         if (!dd->dc_shutdown)
6549                 return;
6550
6551         /* Take the 8051 out of reset */
6552         write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6553         /* Wait until 8051 is ready */
6554         if (wait_fm_ready(dd, TIMEOUT_8051_START))
6555                 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6556                            __func__);
6557
6558         /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6559         write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6560         /* lcb_shutdown() with abort=1 does not restore these */
6561         write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6562         dd->dc_shutdown = 0;
6563 }
6564
6565 static void dc_start(struct hfi1_devdata *dd)
6566 {
6567         mutex_lock(&dd->dc8051_lock);
6568         _dc_start(dd);
6569         mutex_unlock(&dd->dc8051_lock);
6570 }
6571
6572 /*
6573  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6574  */
6575 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6576 {
6577         u64 rx_radr, tx_radr;
6578         u32 version;
6579
6580         if (dd->icode != ICODE_FPGA_EMULATION)
6581                 return;
6582
6583         /*
6584          * These LCB defaults on emulator _s are good, nothing to do here:
6585          *      LCB_CFG_TX_FIFOS_RADR
6586          *      LCB_CFG_RX_FIFOS_RADR
6587          *      LCB_CFG_LN_DCLK
6588          *      LCB_CFG_IGNORE_LOST_RCLK
6589          */
6590         if (is_emulator_s(dd))
6591                 return;
6592         /* else this is _p */
6593
6594         version = emulator_rev(dd);
6595         if (!is_ax(dd))
6596                 version = 0x2d; /* all B0 use 0x2d or higher settings */
6597
6598         if (version <= 0x12) {
6599                 /* release 0x12 and below */
6600
6601                 /*
6602                  * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6603                  * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6604                  * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6605                  */
6606                 rx_radr =
6607                       0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6608                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6609                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6610                 /*
6611                  * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6612                  * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6613                  */
6614                 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6615         } else if (version <= 0x18) {
6616                 /* release 0x13 up to 0x18 */
6617                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6618                 rx_radr =
6619                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6620                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6621                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6622                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6623         } else if (version == 0x19) {
6624                 /* release 0x19 */
6625                 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6626                 rx_radr =
6627                       0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6628                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6629                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6630                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6631         } else if (version == 0x1a) {
6632                 /* release 0x1a */
6633                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6634                 rx_radr =
6635                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6636                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6637                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6638                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6639                 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6640         } else {
6641                 /* release 0x1b and higher */
6642                 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6643                 rx_radr =
6644                       0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6645                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6646                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6647                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6648         }
6649
6650         write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6651         /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6652         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6653                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6654         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6655 }
6656
6657 /*
6658  * Handle a SMA idle message
6659  *
6660  * This is a work-queue function outside of the interrupt.
6661  */
6662 void handle_sma_message(struct work_struct *work)
6663 {
6664         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6665                                                         sma_message_work);
6666         struct hfi1_devdata *dd = ppd->dd;
6667         u64 msg;
6668         int ret;
6669
6670         /*
6671          * msg is bytes 1-4 of the 40-bit idle message - the command code
6672          * is stripped off
6673          */
6674         ret = read_idle_sma(dd, &msg);
6675         if (ret)
6676                 return;
6677         dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6678         /*
6679          * React to the SMA message.  Byte[1] (0 for us) is the command.
6680          */
6681         switch (msg & 0xff) {
6682         case SMA_IDLE_ARM:
6683                 /*
6684                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6685                  * State Transitions
6686                  *
6687                  * Only expected in INIT or ARMED, discard otherwise.
6688                  */
6689                 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6690                         ppd->neighbor_normal = 1;
6691                 break;
6692         case SMA_IDLE_ACTIVE:
6693                 /*
6694                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6695                  * State Transitions
6696                  *
6697                  * Can activate the node.  Discard otherwise.
6698                  */
6699                 if (ppd->host_link_state == HLS_UP_ARMED &&
6700                     ppd->is_active_optimize_enabled) {
6701                         ppd->neighbor_normal = 1;
6702                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
6703                         if (ret)
6704                                 dd_dev_err(
6705                                         dd,
6706                                         "%s: received Active SMA idle message, couldn't set link to Active\n",
6707                                         __func__);
6708                 }
6709                 break;
6710         default:
6711                 dd_dev_err(dd,
6712                            "%s: received unexpected SMA idle message 0x%llx\n",
6713                            __func__, msg);
6714                 break;
6715         }
6716 }
6717
6718 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6719 {
6720         u64 rcvctrl;
6721         unsigned long flags;
6722
6723         spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6724         rcvctrl = read_csr(dd, RCV_CTRL);
6725         rcvctrl |= add;
6726         rcvctrl &= ~clear;
6727         write_csr(dd, RCV_CTRL, rcvctrl);
6728         spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6729 }
6730
6731 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6732 {
6733         adjust_rcvctrl(dd, add, 0);
6734 }
6735
6736 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6737 {
6738         adjust_rcvctrl(dd, 0, clear);
6739 }
6740
6741 /*
6742  * Called from all interrupt handlers to start handling an SPC freeze.
6743  */
6744 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6745 {
6746         struct hfi1_devdata *dd = ppd->dd;
6747         struct send_context *sc;
6748         int i;
6749         int sc_flags;
6750
6751         if (flags & FREEZE_SELF)
6752                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6753
6754         /* enter frozen mode */
6755         dd->flags |= HFI1_FROZEN;
6756
6757         /* notify all SDMA engines that they are going into a freeze */
6758         sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6759
6760         sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6761                                               SCF_LINK_DOWN : 0);
6762         /* do halt pre-handling on all enabled send contexts */
6763         for (i = 0; i < dd->num_send_contexts; i++) {
6764                 sc = dd->send_contexts[i].sc;
6765                 if (sc && (sc->flags & SCF_ENABLED))
6766                         sc_stop(sc, sc_flags);
6767         }
6768
6769         /* Send context are frozen. Notify user space */
6770         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6771
6772         if (flags & FREEZE_ABORT) {
6773                 dd_dev_err(dd,
6774                            "Aborted freeze recovery. Please REBOOT system\n");
6775                 return;
6776         }
6777         /* queue non-interrupt handler */
6778         queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6779 }
6780
6781 /*
6782  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6783  * depending on the "freeze" parameter.
6784  *
6785  * No need to return an error if it times out, our only option
6786  * is to proceed anyway.
6787  */
6788 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6789 {
6790         unsigned long timeout;
6791         u64 reg;
6792
6793         timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6794         while (1) {
6795                 reg = read_csr(dd, CCE_STATUS);
6796                 if (freeze) {
6797                         /* waiting until all indicators are set */
6798                         if ((reg & ALL_FROZE) == ALL_FROZE)
6799                                 return; /* all done */
6800                 } else {
6801                         /* waiting until all indicators are clear */
6802                         if ((reg & ALL_FROZE) == 0)
6803                                 return; /* all done */
6804                 }
6805
6806                 if (time_after(jiffies, timeout)) {
6807                         dd_dev_err(dd,
6808                                    "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6809                                    freeze ? "" : "un", reg & ALL_FROZE,
6810                                    freeze ? ALL_FROZE : 0ull);
6811                         return;
6812                 }
6813                 usleep_range(80, 120);
6814         }
6815 }
6816
6817 /*
6818  * Do all freeze handling for the RXE block.
6819  */
6820 static void rxe_freeze(struct hfi1_devdata *dd)
6821 {
6822         int i;
6823         struct hfi1_ctxtdata *rcd;
6824
6825         /* disable port */
6826         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6827
6828         /* disable all receive contexts */
6829         for (i = 0; i < dd->num_rcv_contexts; i++) {
6830                 rcd = hfi1_rcd_get_by_index(dd, i);
6831                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6832                 hfi1_rcd_put(rcd);
6833         }
6834 }
6835
6836 /*
6837  * Unfreeze handling for the RXE block - kernel contexts only.
6838  * This will also enable the port.  User contexts will do unfreeze
6839  * handling on a per-context basis as they call into the driver.
6840  *
6841  */
6842 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6843 {
6844         u32 rcvmask;
6845         u16 i;
6846         struct hfi1_ctxtdata *rcd;
6847
6848         /* enable all kernel contexts */
6849         for (i = 0; i < dd->num_rcv_contexts; i++) {
6850                 rcd = hfi1_rcd_get_by_index(dd, i);
6851
6852                 /* Ensure all non-user contexts(including vnic) are enabled */
6853                 if (!rcd ||
6854                     (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6855                         hfi1_rcd_put(rcd);
6856                         continue;
6857                 }
6858                 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6859                 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6860                 rcvmask |= rcd->rcvhdrtail_kvaddr ?
6861                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6862                 hfi1_rcvctrl(dd, rcvmask, rcd);
6863                 hfi1_rcd_put(rcd);
6864         }
6865
6866         /* enable port */
6867         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6868 }
6869
6870 /*
6871  * Non-interrupt SPC freeze handling.
6872  *
6873  * This is a work-queue function outside of the triggering interrupt.
6874  */
6875 void handle_freeze(struct work_struct *work)
6876 {
6877         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6878                                                                 freeze_work);
6879         struct hfi1_devdata *dd = ppd->dd;
6880
6881         /* wait for freeze indicators on all affected blocks */
6882         wait_for_freeze_status(dd, 1);
6883
6884         /* SPC is now frozen */
6885
6886         /* do send PIO freeze steps */
6887         pio_freeze(dd);
6888
6889         /* do send DMA freeze steps */
6890         sdma_freeze(dd);
6891
6892         /* do send egress freeze steps - nothing to do */
6893
6894         /* do receive freeze steps */
6895         rxe_freeze(dd);
6896
6897         /*
6898          * Unfreeze the hardware - clear the freeze, wait for each
6899          * block's frozen bit to clear, then clear the frozen flag.
6900          */
6901         write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6902         wait_for_freeze_status(dd, 0);
6903
6904         if (is_ax(dd)) {
6905                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6906                 wait_for_freeze_status(dd, 1);
6907                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6908                 wait_for_freeze_status(dd, 0);
6909         }
6910
6911         /* do send PIO unfreeze steps for kernel contexts */
6912         pio_kernel_unfreeze(dd);
6913
6914         /* do send DMA unfreeze steps */
6915         sdma_unfreeze(dd);
6916
6917         /* do send egress unfreeze steps - nothing to do */
6918
6919         /* do receive unfreeze steps for kernel contexts */
6920         rxe_kernel_unfreeze(dd);
6921
6922         /*
6923          * The unfreeze procedure touches global device registers when
6924          * it disables and re-enables RXE. Mark the device unfrozen
6925          * after all that is done so other parts of the driver waiting
6926          * for the device to unfreeze don't do things out of order.
6927          *
6928          * The above implies that the meaning of HFI1_FROZEN flag is
6929          * "Device has gone into freeze mode and freeze mode handling
6930          * is still in progress."
6931          *
6932          * The flag will be removed when freeze mode processing has
6933          * completed.
6934          */
6935         dd->flags &= ~HFI1_FROZEN;
6936         wake_up(&dd->event_queue);
6937
6938         /* no longer frozen */
6939 }
6940
6941 /**
6942  * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6943  * counters.
6944  * @ppd: info of physical Hfi port
6945  * @link_width: new link width after link up or downgrade
6946  *
6947  * Update the PortXmitWait and PortVlXmitWait counters after
6948  * a link up or downgrade event to reflect a link width change.
6949  */
6950 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6951 {
6952         int i;
6953         u16 tx_width;
6954         u16 link_speed;
6955
6956         tx_width = tx_link_width(link_width);
6957         link_speed = get_link_speed(ppd->link_speed_active);
6958
6959         /*
6960          * There are C_VL_COUNT number of PortVLXmitWait counters.
6961          * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6962          */
6963         for (i = 0; i < C_VL_COUNT + 1; i++)
6964                 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6965 }
6966
6967 /*
6968  * Handle a link up interrupt from the 8051.
6969  *
6970  * This is a work-queue function outside of the interrupt.
6971  */
6972 void handle_link_up(struct work_struct *work)
6973 {
6974         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6975                                                   link_up_work);
6976         struct hfi1_devdata *dd = ppd->dd;
6977
6978         set_link_state(ppd, HLS_UP_INIT);
6979
6980         /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6981         read_ltp_rtt(dd);
6982         /*
6983          * OPA specifies that certain counters are cleared on a transition
6984          * to link up, so do that.
6985          */
6986         clear_linkup_counters(dd);
6987         /*
6988          * And (re)set link up default values.
6989          */
6990         set_linkup_defaults(ppd);
6991
6992         /*
6993          * Set VL15 credits. Use cached value from verify cap interrupt.
6994          * In case of quick linkup or simulator, vl15 value will be set by
6995          * handle_linkup_change. VerifyCap interrupt handler will not be
6996          * called in those scenarios.
6997          */
6998         if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6999                 set_up_vl15(dd, dd->vl15buf_cached);
7000
7001         /* enforce link speed enabled */
7002         if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
7003                 /* oops - current speed is not enabled, bounce */
7004                 dd_dev_err(dd,
7005                            "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
7006                            ppd->link_speed_active, ppd->link_speed_enabled);
7007                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7008                                      OPA_LINKDOWN_REASON_SPEED_POLICY);
7009                 set_link_state(ppd, HLS_DN_OFFLINE);
7010                 start_link(ppd);
7011         }
7012 }
7013
7014 /*
7015  * Several pieces of LNI information were cached for SMA in ppd.
7016  * Reset these on link down
7017  */
7018 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7019 {
7020         ppd->neighbor_guid = 0;
7021         ppd->neighbor_port_number = 0;
7022         ppd->neighbor_type = 0;
7023         ppd->neighbor_fm_security = 0;
7024 }
7025
7026 static const char * const link_down_reason_strs[] = {
7027         [OPA_LINKDOWN_REASON_NONE] = "None",
7028         [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7029         [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7030         [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7031         [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7032         [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7033         [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7034         [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7035         [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7036         [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7037         [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7038         [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7039         [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7040         [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7041         [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7042         [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7043         [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7044         [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7045         [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7046         [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7047         [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7048         [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7049         [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7050         [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7051         [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7052         [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7053         [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7054         [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7055         [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7056         [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7057         [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7058         [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7059         [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7060                                         "Excessive buffer overrun",
7061         [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7062         [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7063         [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7064         [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7065         [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7066         [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7067         [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7068         [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7069                                         "Local media not installed",
7070         [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7071         [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7072         [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7073                                         "End to end not installed",
7074         [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7075         [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7076         [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7077         [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7078         [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7079         [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7080 };
7081
7082 /* return the neighbor link down reason string */
7083 static const char *link_down_reason_str(u8 reason)
7084 {
7085         const char *str = NULL;
7086
7087         if (reason < ARRAY_SIZE(link_down_reason_strs))
7088                 str = link_down_reason_strs[reason];
7089         if (!str)
7090                 str = "(invalid)";
7091
7092         return str;
7093 }
7094
7095 /*
7096  * Handle a link down interrupt from the 8051.
7097  *
7098  * This is a work-queue function outside of the interrupt.
7099  */
7100 void handle_link_down(struct work_struct *work)
7101 {
7102         u8 lcl_reason, neigh_reason = 0;
7103         u8 link_down_reason;
7104         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7105                                                   link_down_work);
7106         int was_up;
7107         static const char ldr_str[] = "Link down reason: ";
7108
7109         if ((ppd->host_link_state &
7110              (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7111              ppd->port_type == PORT_TYPE_FIXED)
7112                 ppd->offline_disabled_reason =
7113                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7114
7115         /* Go offline first, then deal with reading/writing through 8051 */
7116         was_up = !!(ppd->host_link_state & HLS_UP);
7117         set_link_state(ppd, HLS_DN_OFFLINE);
7118         xchg(&ppd->is_link_down_queued, 0);
7119
7120         if (was_up) {
7121                 lcl_reason = 0;
7122                 /* link down reason is only valid if the link was up */
7123                 read_link_down_reason(ppd->dd, &link_down_reason);
7124                 switch (link_down_reason) {
7125                 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7126                         /* the link went down, no idle message reason */
7127                         dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7128                                     ldr_str);
7129                         break;
7130                 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7131                         /*
7132                          * The neighbor reason is only valid if an idle message
7133                          * was received for it.
7134                          */
7135                         read_planned_down_reason_code(ppd->dd, &neigh_reason);
7136                         dd_dev_info(ppd->dd,
7137                                     "%sNeighbor link down message %d, %s\n",
7138                                     ldr_str, neigh_reason,
7139                                     link_down_reason_str(neigh_reason));
7140                         break;
7141                 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7142                         dd_dev_info(ppd->dd,
7143                                     "%sHost requested link to go offline\n",
7144                                     ldr_str);
7145                         break;
7146                 default:
7147                         dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7148                                     ldr_str, link_down_reason);
7149                         break;
7150                 }
7151
7152                 /*
7153                  * If no reason, assume peer-initiated but missed
7154                  * LinkGoingDown idle flits.
7155                  */
7156                 if (neigh_reason == 0)
7157                         lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7158         } else {
7159                 /* went down while polling or going up */
7160                 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7161         }
7162
7163         set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7164
7165         /* inform the SMA when the link transitions from up to down */
7166         if (was_up && ppd->local_link_down_reason.sma == 0 &&
7167             ppd->neigh_link_down_reason.sma == 0) {
7168                 ppd->local_link_down_reason.sma =
7169                                         ppd->local_link_down_reason.latest;
7170                 ppd->neigh_link_down_reason.sma =
7171                                         ppd->neigh_link_down_reason.latest;
7172         }
7173
7174         reset_neighbor_info(ppd);
7175
7176         /* disable the port */
7177         clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7178
7179         /*
7180          * If there is no cable attached, turn the DC off. Otherwise,
7181          * start the link bring up.
7182          */
7183         if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7184                 dc_shutdown(ppd->dd);
7185         else
7186                 start_link(ppd);
7187 }
7188
7189 void handle_link_bounce(struct work_struct *work)
7190 {
7191         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7192                                                         link_bounce_work);
7193
7194         /*
7195          * Only do something if the link is currently up.
7196          */
7197         if (ppd->host_link_state & HLS_UP) {
7198                 set_link_state(ppd, HLS_DN_OFFLINE);
7199                 start_link(ppd);
7200         } else {
7201                 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7202                             __func__, link_state_name(ppd->host_link_state));
7203         }
7204 }
7205
7206 /*
7207  * Mask conversion: Capability exchange to Port LTP.  The capability
7208  * exchange has an implicit 16b CRC that is mandatory.
7209  */
7210 static int cap_to_port_ltp(int cap)
7211 {
7212         int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7213
7214         if (cap & CAP_CRC_14B)
7215                 port_ltp |= PORT_LTP_CRC_MODE_14;
7216         if (cap & CAP_CRC_48B)
7217                 port_ltp |= PORT_LTP_CRC_MODE_48;
7218         if (cap & CAP_CRC_12B_16B_PER_LANE)
7219                 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7220
7221         return port_ltp;
7222 }
7223
7224 /*
7225  * Convert an OPA Port LTP mask to capability mask
7226  */
7227 int port_ltp_to_cap(int port_ltp)
7228 {
7229         int cap_mask = 0;
7230
7231         if (port_ltp & PORT_LTP_CRC_MODE_14)
7232                 cap_mask |= CAP_CRC_14B;
7233         if (port_ltp & PORT_LTP_CRC_MODE_48)
7234                 cap_mask |= CAP_CRC_48B;
7235         if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7236                 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7237
7238         return cap_mask;
7239 }
7240
7241 /*
7242  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7243  */
7244 static int lcb_to_port_ltp(int lcb_crc)
7245 {
7246         int port_ltp = 0;
7247
7248         if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7249                 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7250         else if (lcb_crc == LCB_CRC_48B)
7251                 port_ltp = PORT_LTP_CRC_MODE_48;
7252         else if (lcb_crc == LCB_CRC_14B)
7253                 port_ltp = PORT_LTP_CRC_MODE_14;
7254         else
7255                 port_ltp = PORT_LTP_CRC_MODE_16;
7256
7257         return port_ltp;
7258 }
7259
7260 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7261 {
7262         if (ppd->pkeys[2] != 0) {
7263                 ppd->pkeys[2] = 0;
7264                 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7265                 hfi1_event_pkey_change(ppd->dd, ppd->port);
7266         }
7267 }
7268
7269 /*
7270  * Convert the given link width to the OPA link width bitmask.
7271  */
7272 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7273 {
7274         switch (width) {
7275         case 0:
7276                 /*
7277                  * Simulator and quick linkup do not set the width.
7278                  * Just set it to 4x without complaint.
7279                  */
7280                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7281                         return OPA_LINK_WIDTH_4X;
7282                 return 0; /* no lanes up */
7283         case 1: return OPA_LINK_WIDTH_1X;
7284         case 2: return OPA_LINK_WIDTH_2X;
7285         case 3: return OPA_LINK_WIDTH_3X;
7286         default:
7287                 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7288                             __func__, width);
7289                 /* fall through */
7290         case 4: return OPA_LINK_WIDTH_4X;
7291         }
7292 }
7293
7294 /*
7295  * Do a population count on the bottom nibble.
7296  */
7297 static const u8 bit_counts[16] = {
7298         0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7299 };
7300
7301 static inline u8 nibble_to_count(u8 nibble)
7302 {
7303         return bit_counts[nibble & 0xf];
7304 }
7305
7306 /*
7307  * Read the active lane information from the 8051 registers and return
7308  * their widths.
7309  *
7310  * Active lane information is found in these 8051 registers:
7311  *      enable_lane_tx
7312  *      enable_lane_rx
7313  */
7314 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7315                             u16 *rx_width)
7316 {
7317         u16 tx, rx;
7318         u8 enable_lane_rx;
7319         u8 enable_lane_tx;
7320         u8 tx_polarity_inversion;
7321         u8 rx_polarity_inversion;
7322         u8 max_rate;
7323
7324         /* read the active lanes */
7325         read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7326                          &rx_polarity_inversion, &max_rate);
7327         read_local_lni(dd, &enable_lane_rx);
7328
7329         /* convert to counts */
7330         tx = nibble_to_count(enable_lane_tx);
7331         rx = nibble_to_count(enable_lane_rx);
7332
7333         /*
7334          * Set link_speed_active here, overriding what was set in
7335          * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7336          * set the max_rate field in handle_verify_cap until v0.19.
7337          */
7338         if ((dd->icode == ICODE_RTL_SILICON) &&
7339             (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7340                 /* max_rate: 0 = 12.5G, 1 = 25G */
7341                 switch (max_rate) {
7342                 case 0:
7343                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7344                         break;
7345                 default:
7346                         dd_dev_err(dd,
7347                                    "%s: unexpected max rate %d, using 25Gb\n",
7348                                    __func__, (int)max_rate);
7349                         /* fall through */
7350                 case 1:
7351                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7352                         break;
7353                 }
7354         }
7355
7356         dd_dev_info(dd,
7357                     "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7358                     enable_lane_tx, tx, enable_lane_rx, rx);
7359         *tx_width = link_width_to_bits(dd, tx);
7360         *rx_width = link_width_to_bits(dd, rx);
7361 }
7362
7363 /*
7364  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7365  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7366  * after link up.  I.e. look elsewhere for downgrade information.
7367  *
7368  * Bits are:
7369  *      + bits [7:4] contain the number of active transmitters
7370  *      + bits [3:0] contain the number of active receivers
7371  * These are numbers 1 through 4 and can be different values if the
7372  * link is asymmetric.
7373  *
7374  * verify_cap_local_fm_link_width[0] retains its original value.
7375  */
7376 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7377                               u16 *rx_width)
7378 {
7379         u16 widths, tx, rx;
7380         u8 misc_bits, local_flags;
7381         u16 active_tx, active_rx;
7382
7383         read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7384         tx = widths >> 12;
7385         rx = (widths >> 8) & 0xf;
7386
7387         *tx_width = link_width_to_bits(dd, tx);
7388         *rx_width = link_width_to_bits(dd, rx);
7389
7390         /* print the active widths */
7391         get_link_widths(dd, &active_tx, &active_rx);
7392 }
7393
7394 /*
7395  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7396  * hardware information when the link first comes up.
7397  *
7398  * The link width is not available until after VerifyCap.AllFramesReceived
7399  * (the trigger for handle_verify_cap), so this is outside that routine
7400  * and should be called when the 8051 signals linkup.
7401  */
7402 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7403 {
7404         u16 tx_width, rx_width;
7405
7406         /* get end-of-LNI link widths */
7407         get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7408
7409         /* use tx_width as the link is supposed to be symmetric on link up */
7410         ppd->link_width_active = tx_width;
7411         /* link width downgrade active (LWD.A) starts out matching LW.A */
7412         ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7413         ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7414         /* per OPA spec, on link up LWD.E resets to LWD.S */
7415         ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7416         /* cache the active egress rate (units {10^6 bits/sec]) */
7417         ppd->current_egress_rate = active_egress_rate(ppd);
7418 }
7419
7420 /*
7421  * Handle a verify capabilities interrupt from the 8051.
7422  *
7423  * This is a work-queue function outside of the interrupt.
7424  */
7425 void handle_verify_cap(struct work_struct *work)
7426 {
7427         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7428                                                                 link_vc_work);
7429         struct hfi1_devdata *dd = ppd->dd;
7430         u64 reg;
7431         u8 power_management;
7432         u8 continuous;
7433         u8 vcu;
7434         u8 vau;
7435         u8 z;
7436         u16 vl15buf;
7437         u16 link_widths;
7438         u16 crc_mask;
7439         u16 crc_val;
7440         u16 device_id;
7441         u16 active_tx, active_rx;
7442         u8 partner_supported_crc;
7443         u8 remote_tx_rate;
7444         u8 device_rev;
7445
7446         set_link_state(ppd, HLS_VERIFY_CAP);
7447
7448         lcb_shutdown(dd, 0);
7449         adjust_lcb_for_fpga_serdes(dd);
7450
7451         read_vc_remote_phy(dd, &power_management, &continuous);
7452         read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7453                               &partner_supported_crc);
7454         read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7455         read_remote_device_id(dd, &device_id, &device_rev);
7456
7457         /* print the active widths */
7458         get_link_widths(dd, &active_tx, &active_rx);
7459         dd_dev_info(dd,
7460                     "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7461                     (int)power_management, (int)continuous);
7462         dd_dev_info(dd,
7463                     "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7464                     (int)vau, (int)z, (int)vcu, (int)vl15buf,
7465                     (int)partner_supported_crc);
7466         dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7467                     (u32)remote_tx_rate, (u32)link_widths);
7468         dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7469                     (u32)device_id, (u32)device_rev);
7470         /*
7471          * The peer vAU value just read is the peer receiver value.  HFI does
7472          * not support a transmit vAU of 0 (AU == 8).  We advertised that
7473          * with Z=1 in the fabric capabilities sent to the peer.  The peer
7474          * will see our Z=1, and, if it advertised a vAU of 0, will move its
7475          * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7476          * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7477          * subject to the Z value exception.
7478          */
7479         if (vau == 0)
7480                 vau = 1;
7481         set_up_vau(dd, vau);
7482
7483         /*
7484          * Set VL15 credits to 0 in global credit register. Cache remote VL15
7485          * credits value and wait for link-up interrupt ot set it.
7486          */
7487         set_up_vl15(dd, 0);
7488         dd->vl15buf_cached = vl15buf;
7489
7490         /* set up the LCB CRC mode */
7491         crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7492
7493         /* order is important: use the lowest bit in common */
7494         if (crc_mask & CAP_CRC_14B)
7495                 crc_val = LCB_CRC_14B;
7496         else if (crc_mask & CAP_CRC_48B)
7497                 crc_val = LCB_CRC_48B;
7498         else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7499                 crc_val = LCB_CRC_12B_16B_PER_LANE;
7500         else
7501                 crc_val = LCB_CRC_16B;
7502
7503         dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7504         write_csr(dd, DC_LCB_CFG_CRC_MODE,
7505                   (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7506
7507         /* set (14b only) or clear sideband credit */
7508         reg = read_csr(dd, SEND_CM_CTRL);
7509         if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7510                 write_csr(dd, SEND_CM_CTRL,
7511                           reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7512         } else {
7513                 write_csr(dd, SEND_CM_CTRL,
7514                           reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7515         }
7516
7517         ppd->link_speed_active = 0;     /* invalid value */
7518         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7519                 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7520                 switch (remote_tx_rate) {
7521                 case 0:
7522                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7523                         break;
7524                 case 1:
7525                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7526                         break;
7527                 }
7528         } else {
7529                 /* actual rate is highest bit of the ANDed rates */
7530                 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7531
7532                 if (rate & 2)
7533                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7534                 else if (rate & 1)
7535                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7536         }
7537         if (ppd->link_speed_active == 0) {
7538                 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7539                            __func__, (int)remote_tx_rate);
7540                 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7541         }
7542
7543         /*
7544          * Cache the values of the supported, enabled, and active
7545          * LTP CRC modes to return in 'portinfo' queries. But the bit
7546          * flags that are returned in the portinfo query differ from
7547          * what's in the link_crc_mask, crc_sizes, and crc_val
7548          * variables. Convert these here.
7549          */
7550         ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7551                 /* supported crc modes */
7552         ppd->port_ltp_crc_mode |=
7553                 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7554                 /* enabled crc modes */
7555         ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7556                 /* active crc mode */
7557
7558         /* set up the remote credit return table */
7559         assign_remote_cm_au_table(dd, vcu);
7560
7561         /*
7562          * The LCB is reset on entry to handle_verify_cap(), so this must
7563          * be applied on every link up.
7564          *
7565          * Adjust LCB error kill enable to kill the link if
7566          * these RBUF errors are seen:
7567          *      REPLAY_BUF_MBE_SMASK
7568          *      FLIT_INPUT_BUF_MBE_SMASK
7569          */
7570         if (is_ax(dd)) {                        /* fixed in B0 */
7571                 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7572                 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7573                         | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7574                 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7575         }
7576
7577         /* pull LCB fifos out of reset - all fifo clocks must be stable */
7578         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7579
7580         /* give 8051 access to the LCB CSRs */
7581         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7582         set_8051_lcb_access(dd);
7583
7584         /* tell the 8051 to go to LinkUp */
7585         set_link_state(ppd, HLS_GOING_UP);
7586 }
7587
7588 /**
7589  * apply_link_downgrade_policy - Apply the link width downgrade enabled
7590  * policy against the current active link widths.
7591  * @ppd: info of physical Hfi port
7592  * @refresh_widths: True indicates link downgrade event
7593  * @return: True indicates a successful link downgrade. False indicates
7594  *          link downgrade event failed and the link will bounce back to
7595  *          default link width.
7596  *
7597  * Called when the enabled policy changes or the active link widths
7598  * change.
7599  * Refresh_widths indicates that a link downgrade occurred. The
7600  * link_downgraded variable is set by refresh_widths and
7601  * determines the success/failure of the policy application.
7602  */
7603 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7604                                  bool refresh_widths)
7605 {
7606         int do_bounce = 0;
7607         int tries;
7608         u16 lwde;
7609         u16 tx, rx;
7610         bool link_downgraded = refresh_widths;
7611
7612         /* use the hls lock to avoid a race with actual link up */
7613         tries = 0;
7614 retry:
7615         mutex_lock(&ppd->hls_lock);
7616         /* only apply if the link is up */
7617         if (ppd->host_link_state & HLS_DOWN) {
7618                 /* still going up..wait and retry */
7619                 if (ppd->host_link_state & HLS_GOING_UP) {
7620                         if (++tries < 1000) {
7621                                 mutex_unlock(&ppd->hls_lock);
7622                                 usleep_range(100, 120); /* arbitrary */
7623                                 goto retry;
7624                         }
7625                         dd_dev_err(ppd->dd,
7626                                    "%s: giving up waiting for link state change\n",
7627                                    __func__);
7628                 }
7629                 goto done;
7630         }
7631
7632         lwde = ppd->link_width_downgrade_enabled;
7633
7634         if (refresh_widths) {
7635                 get_link_widths(ppd->dd, &tx, &rx);
7636                 ppd->link_width_downgrade_tx_active = tx;
7637                 ppd->link_width_downgrade_rx_active = rx;
7638         }
7639
7640         if (ppd->link_width_downgrade_tx_active == 0 ||
7641             ppd->link_width_downgrade_rx_active == 0) {
7642                 /* the 8051 reported a dead link as a downgrade */
7643                 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7644                 link_downgraded = false;
7645         } else if (lwde == 0) {
7646                 /* downgrade is disabled */
7647
7648                 /* bounce if not at starting active width */
7649                 if ((ppd->link_width_active !=
7650                      ppd->link_width_downgrade_tx_active) ||
7651                     (ppd->link_width_active !=
7652                      ppd->link_width_downgrade_rx_active)) {
7653                         dd_dev_err(ppd->dd,
7654                                    "Link downgrade is disabled and link has downgraded, downing link\n");
7655                         dd_dev_err(ppd->dd,
7656                                    "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7657                                    ppd->link_width_active,
7658                                    ppd->link_width_downgrade_tx_active,
7659                                    ppd->link_width_downgrade_rx_active);
7660                         do_bounce = 1;
7661                         link_downgraded = false;
7662                 }
7663         } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7664                    (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7665                 /* Tx or Rx is outside the enabled policy */
7666                 dd_dev_err(ppd->dd,
7667                            "Link is outside of downgrade allowed, downing link\n");
7668                 dd_dev_err(ppd->dd,
7669                            "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7670                            lwde, ppd->link_width_downgrade_tx_active,
7671                            ppd->link_width_downgrade_rx_active);
7672                 do_bounce = 1;
7673                 link_downgraded = false;
7674         }
7675
7676 done:
7677         mutex_unlock(&ppd->hls_lock);
7678
7679         if (do_bounce) {
7680                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7681                                      OPA_LINKDOWN_REASON_WIDTH_POLICY);
7682                 set_link_state(ppd, HLS_DN_OFFLINE);
7683                 start_link(ppd);
7684         }
7685
7686         return link_downgraded;
7687 }
7688
7689 /*
7690  * Handle a link downgrade interrupt from the 8051.
7691  *
7692  * This is a work-queue function outside of the interrupt.
7693  */
7694 void handle_link_downgrade(struct work_struct *work)
7695 {
7696         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7697                                                         link_downgrade_work);
7698
7699         dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7700         if (apply_link_downgrade_policy(ppd, true))
7701                 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7702 }
7703
7704 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7705 {
7706         return flag_string(buf, buf_len, flags, dcc_err_flags,
7707                 ARRAY_SIZE(dcc_err_flags));
7708 }
7709
7710 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7711 {
7712         return flag_string(buf, buf_len, flags, lcb_err_flags,
7713                 ARRAY_SIZE(lcb_err_flags));
7714 }
7715
7716 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7717 {
7718         return flag_string(buf, buf_len, flags, dc8051_err_flags,
7719                 ARRAY_SIZE(dc8051_err_flags));
7720 }
7721
7722 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7723 {
7724         return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7725                 ARRAY_SIZE(dc8051_info_err_flags));
7726 }
7727
7728 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7729 {
7730         return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7731                 ARRAY_SIZE(dc8051_info_host_msg_flags));
7732 }
7733
7734 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7735 {
7736         struct hfi1_pportdata *ppd = dd->pport;
7737         u64 info, err, host_msg;
7738         int queue_link_down = 0;
7739         char buf[96];
7740
7741         /* look at the flags */
7742         if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7743                 /* 8051 information set by firmware */
7744                 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7745                 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7746                 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7747                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7748                 host_msg = (info >>
7749                         DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7750                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7751
7752                 /*
7753                  * Handle error flags.
7754                  */
7755                 if (err & FAILED_LNI) {
7756                         /*
7757                          * LNI error indications are cleared by the 8051
7758                          * only when starting polling.  Only pay attention
7759                          * to them when in the states that occur during
7760                          * LNI.
7761                          */
7762                         if (ppd->host_link_state
7763                             & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7764                                 queue_link_down = 1;
7765                                 dd_dev_info(dd, "Link error: %s\n",
7766                                             dc8051_info_err_string(buf,
7767                                                                    sizeof(buf),
7768                                                                    err &
7769                                                                    FAILED_LNI));
7770                         }
7771                         err &= ~(u64)FAILED_LNI;
7772                 }
7773                 /* unknown frames can happen durning LNI, just count */
7774                 if (err & UNKNOWN_FRAME) {
7775                         ppd->unknown_frame_count++;
7776                         err &= ~(u64)UNKNOWN_FRAME;
7777                 }
7778                 if (err) {
7779                         /* report remaining errors, but do not do anything */
7780                         dd_dev_err(dd, "8051 info error: %s\n",
7781                                    dc8051_info_err_string(buf, sizeof(buf),
7782                                                           err));
7783                 }
7784
7785                 /*
7786                  * Handle host message flags.
7787                  */
7788                 if (host_msg & HOST_REQ_DONE) {
7789                         /*
7790                          * Presently, the driver does a busy wait for
7791                          * host requests to complete.  This is only an
7792                          * informational message.
7793                          * NOTE: The 8051 clears the host message
7794                          * information *on the next 8051 command*.
7795                          * Therefore, when linkup is achieved,
7796                          * this flag will still be set.
7797                          */
7798                         host_msg &= ~(u64)HOST_REQ_DONE;
7799                 }
7800                 if (host_msg & BC_SMA_MSG) {
7801                         queue_work(ppd->link_wq, &ppd->sma_message_work);
7802                         host_msg &= ~(u64)BC_SMA_MSG;
7803                 }
7804                 if (host_msg & LINKUP_ACHIEVED) {
7805                         dd_dev_info(dd, "8051: Link up\n");
7806                         queue_work(ppd->link_wq, &ppd->link_up_work);
7807                         host_msg &= ~(u64)LINKUP_ACHIEVED;
7808                 }
7809                 if (host_msg & EXT_DEVICE_CFG_REQ) {
7810                         handle_8051_request(ppd);
7811                         host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7812                 }
7813                 if (host_msg & VERIFY_CAP_FRAME) {
7814                         queue_work(ppd->link_wq, &ppd->link_vc_work);
7815                         host_msg &= ~(u64)VERIFY_CAP_FRAME;
7816                 }
7817                 if (host_msg & LINK_GOING_DOWN) {
7818                         const char *extra = "";
7819                         /* no downgrade action needed if going down */
7820                         if (host_msg & LINK_WIDTH_DOWNGRADED) {
7821                                 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7822                                 extra = " (ignoring downgrade)";
7823                         }
7824                         dd_dev_info(dd, "8051: Link down%s\n", extra);
7825                         queue_link_down = 1;
7826                         host_msg &= ~(u64)LINK_GOING_DOWN;
7827                 }
7828                 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7829                         queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7830                         host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7831                 }
7832                 if (host_msg) {
7833                         /* report remaining messages, but do not do anything */
7834                         dd_dev_info(dd, "8051 info host message: %s\n",
7835                                     dc8051_info_host_msg_string(buf,
7836                                                                 sizeof(buf),
7837                                                                 host_msg));
7838                 }
7839
7840                 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7841         }
7842         if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7843                 /*
7844                  * Lost the 8051 heartbeat.  If this happens, we
7845                  * receive constant interrupts about it.  Disable
7846                  * the interrupt after the first.
7847                  */
7848                 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7849                 write_csr(dd, DC_DC8051_ERR_EN,
7850                           read_csr(dd, DC_DC8051_ERR_EN) &
7851                           ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7852
7853                 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7854         }
7855         if (reg) {
7856                 /* report the error, but do not do anything */
7857                 dd_dev_err(dd, "8051 error: %s\n",
7858                            dc8051_err_string(buf, sizeof(buf), reg));
7859         }
7860
7861         if (queue_link_down) {
7862                 /*
7863                  * if the link is already going down or disabled, do not
7864                  * queue another. If there's a link down entry already
7865                  * queued, don't queue another one.
7866                  */
7867                 if ((ppd->host_link_state &
7868                     (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7869                     ppd->link_enabled == 0) {
7870                         dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7871                                     __func__, ppd->host_link_state,
7872                                     ppd->link_enabled);
7873                 } else {
7874                         if (xchg(&ppd->is_link_down_queued, 1) == 1)
7875                                 dd_dev_info(dd,
7876                                             "%s: link down request already queued\n",
7877                                             __func__);
7878                         else
7879                                 queue_work(ppd->link_wq, &ppd->link_down_work);
7880                 }
7881         }
7882 }
7883
7884 static const char * const fm_config_txt[] = {
7885 [0] =
7886         "BadHeadDist: Distance violation between two head flits",
7887 [1] =
7888         "BadTailDist: Distance violation between two tail flits",
7889 [2] =
7890         "BadCtrlDist: Distance violation between two credit control flits",
7891 [3] =
7892         "BadCrdAck: Credits return for unsupported VL",
7893 [4] =
7894         "UnsupportedVLMarker: Received VL Marker",
7895 [5] =
7896         "BadPreempt: Exceeded the preemption nesting level",
7897 [6] =
7898         "BadControlFlit: Received unsupported control flit",
7899 /* no 7 */
7900 [8] =
7901         "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7902 };
7903
7904 static const char * const port_rcv_txt[] = {
7905 [1] =
7906         "BadPktLen: Illegal PktLen",
7907 [2] =
7908         "PktLenTooLong: Packet longer than PktLen",
7909 [3] =
7910         "PktLenTooShort: Packet shorter than PktLen",
7911 [4] =
7912         "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7913 [5] =
7914         "BadDLID: Illegal DLID (0, doesn't match HFI)",
7915 [6] =
7916         "BadL2: Illegal L2 opcode",
7917 [7] =
7918         "BadSC: Unsupported SC",
7919 [9] =
7920         "BadRC: Illegal RC",
7921 [11] =
7922         "PreemptError: Preempting with same VL",
7923 [12] =
7924         "PreemptVL15: Preempting a VL15 packet",
7925 };
7926
7927 #define OPA_LDR_FMCONFIG_OFFSET 16
7928 #define OPA_LDR_PORTRCV_OFFSET 0
7929 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7930 {
7931         u64 info, hdr0, hdr1;
7932         const char *extra;
7933         char buf[96];
7934         struct hfi1_pportdata *ppd = dd->pport;
7935         u8 lcl_reason = 0;
7936         int do_bounce = 0;
7937
7938         if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7939                 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7940                         info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7941                         dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7942                         /* set status bit */
7943                         dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7944                 }
7945                 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7946         }
7947
7948         if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7949                 struct hfi1_pportdata *ppd = dd->pport;
7950                 /* this counter saturates at (2^32) - 1 */
7951                 if (ppd->link_downed < (u32)UINT_MAX)
7952                         ppd->link_downed++;
7953                 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7954         }
7955
7956         if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7957                 u8 reason_valid = 1;
7958
7959                 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7960                 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7961                         dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7962                         /* set status bit */
7963                         dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7964                 }
7965                 switch (info) {
7966                 case 0:
7967                 case 1:
7968                 case 2:
7969                 case 3:
7970                 case 4:
7971                 case 5:
7972                 case 6:
7973                         extra = fm_config_txt[info];
7974                         break;
7975                 case 8:
7976                         extra = fm_config_txt[info];
7977                         if (ppd->port_error_action &
7978                             OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7979                                 do_bounce = 1;
7980                                 /*
7981                                  * lcl_reason cannot be derived from info
7982                                  * for this error
7983                                  */
7984                                 lcl_reason =
7985                                   OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7986                         }
7987                         break;
7988                 default:
7989                         reason_valid = 0;
7990                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7991                         extra = buf;
7992                         break;
7993                 }
7994
7995                 if (reason_valid && !do_bounce) {
7996                         do_bounce = ppd->port_error_action &
7997                                         (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7998                         lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7999                 }
8000
8001                 /* just report this */
8002                 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
8003                                         extra);
8004                 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8005         }
8006
8007         if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8008                 u8 reason_valid = 1;
8009
8010                 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8011                 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8012                 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8013                 if (!(dd->err_info_rcvport.status_and_code &
8014                       OPA_EI_STATUS_SMASK)) {
8015                         dd->err_info_rcvport.status_and_code =
8016                                 info & OPA_EI_CODE_SMASK;
8017                         /* set status bit */
8018                         dd->err_info_rcvport.status_and_code |=
8019                                 OPA_EI_STATUS_SMASK;
8020                         /*
8021                          * save first 2 flits in the packet that caused
8022                          * the error
8023                          */
8024                         dd->err_info_rcvport.packet_flit1 = hdr0;
8025                         dd->err_info_rcvport.packet_flit2 = hdr1;
8026                 }
8027                 switch (info) {
8028                 case 1:
8029                 case 2:
8030                 case 3:
8031                 case 4:
8032                 case 5:
8033                 case 6:
8034                 case 7:
8035                 case 9:
8036                 case 11:
8037                 case 12:
8038                         extra = port_rcv_txt[info];
8039                         break;
8040                 default:
8041                         reason_valid = 0;
8042                         snprintf(buf, sizeof(buf), "reserved%lld", info);
8043                         extra = buf;
8044                         break;
8045                 }
8046
8047                 if (reason_valid && !do_bounce) {
8048                         do_bounce = ppd->port_error_action &
8049                                         (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8050                         lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8051                 }
8052
8053                 /* just report this */
8054                 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8055                                         "               hdr0 0x%llx, hdr1 0x%llx\n",
8056                                         extra, hdr0, hdr1);
8057
8058                 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8059         }
8060
8061         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8062                 /* informative only */
8063                 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8064                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8065         }
8066         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8067                 /* informative only */
8068                 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8069                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8070         }
8071
8072         if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8073                 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8074
8075         /* report any remaining errors */
8076         if (reg)
8077                 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8078                                         dcc_err_string(buf, sizeof(buf), reg));
8079
8080         if (lcl_reason == 0)
8081                 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8082
8083         if (do_bounce) {
8084                 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8085                                         __func__);
8086                 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8087                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8088         }
8089 }
8090
8091 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8092 {
8093         char buf[96];
8094
8095         dd_dev_info(dd, "LCB Error: %s\n",
8096                     lcb_err_string(buf, sizeof(buf), reg));
8097 }
8098
8099 /*
8100  * CCE block DC interrupt.  Source is < 8.
8101  */
8102 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8103 {
8104         const struct err_reg_info *eri = &dc_errs[source];
8105
8106         if (eri->handler) {
8107                 interrupt_clear_down(dd, 0, eri);
8108         } else if (source == 3 /* dc_lbm_int */) {
8109                 /*
8110                  * This indicates that a parity error has occurred on the
8111                  * address/control lines presented to the LBM.  The error
8112                  * is a single pulse, there is no associated error flag,
8113                  * and it is non-maskable.  This is because if a parity
8114                  * error occurs on the request the request is dropped.
8115                  * This should never occur, but it is nice to know if it
8116                  * ever does.
8117                  */
8118                 dd_dev_err(dd, "Parity error in DC LBM block\n");
8119         } else {
8120                 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8121         }
8122 }
8123
8124 /*
8125  * TX block send credit interrupt.  Source is < 160.
8126  */
8127 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8128 {
8129         sc_group_release_update(dd, source);
8130 }
8131
8132 /*
8133  * TX block SDMA interrupt.  Source is < 48.
8134  *
8135  * SDMA interrupts are grouped by type:
8136  *
8137  *       0 -  N-1 = SDma
8138  *       N - 2N-1 = SDmaProgress
8139  *      2N - 3N-1 = SDmaIdle
8140  */
8141 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8142 {
8143         /* what interrupt */
8144         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8145         /* which engine */
8146         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8147
8148 #ifdef CONFIG_SDMA_VERBOSITY
8149         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8150                    slashstrip(__FILE__), __LINE__, __func__);
8151         sdma_dumpstate(&dd->per_sdma[which]);
8152 #endif
8153
8154         if (likely(what < 3 && which < dd->num_sdma)) {
8155                 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8156         } else {
8157                 /* should not happen */
8158                 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8159         }
8160 }
8161
8162 /**
8163  * is_rcv_avail_int() - User receive context available IRQ handler
8164  * @dd: valid dd
8165  * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8166  *
8167  * RX block receive available interrupt.  Source is < 160.
8168  *
8169  * This is the general interrupt handler for user (PSM) receive contexts,
8170  * and can only be used for non-threaded IRQs.
8171  */
8172 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8173 {
8174         struct hfi1_ctxtdata *rcd;
8175         char *err_detail;
8176
8177         if (likely(source < dd->num_rcv_contexts)) {
8178                 rcd = hfi1_rcd_get_by_index(dd, source);
8179                 if (rcd) {
8180                         handle_user_interrupt(rcd);
8181                         hfi1_rcd_put(rcd);
8182                         return; /* OK */
8183                 }
8184                 /* received an interrupt, but no rcd */
8185                 err_detail = "dataless";
8186         } else {
8187                 /* received an interrupt, but are not using that context */
8188                 err_detail = "out of range";
8189         }
8190         dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8191                    err_detail, source);
8192 }
8193
8194 /**
8195  * is_rcv_urgent_int() - User receive context urgent IRQ handler
8196  * @dd: valid dd
8197  * @source: logical IRQ source (offset from IS_RCVURGENT_START)
8198  *
8199  * RX block receive urgent interrupt.  Source is < 160.
8200  *
8201  * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8202  */
8203 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8204 {
8205         struct hfi1_ctxtdata *rcd;
8206         char *err_detail;
8207
8208         if (likely(source < dd->num_rcv_contexts)) {
8209                 rcd = hfi1_rcd_get_by_index(dd, source);
8210                 if (rcd) {
8211                         handle_user_interrupt(rcd);
8212                         hfi1_rcd_put(rcd);
8213                         return; /* OK */
8214                 }
8215                 /* received an interrupt, but no rcd */
8216                 err_detail = "dataless";
8217         } else {
8218                 /* received an interrupt, but are not using that context */
8219                 err_detail = "out of range";
8220         }
8221         dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8222                    err_detail, source);
8223 }
8224
8225 /*
8226  * Reserved range interrupt.  Should not be called in normal operation.
8227  */
8228 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8229 {
8230         char name[64];
8231
8232         dd_dev_err(dd, "unexpected %s interrupt\n",
8233                    is_reserved_name(name, sizeof(name), source));
8234 }
8235
8236 static const struct is_table is_table[] = {
8237 /*
8238  * start                 end
8239  *                              name func               interrupt func
8240  */
8241 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8242                                 is_misc_err_name,       is_misc_err_int },
8243 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8244                                 is_sdma_eng_err_name,   is_sdma_eng_err_int },
8245 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8246                                 is_sendctxt_err_name,   is_sendctxt_err_int },
8247 { IS_SDMA_START,             IS_SDMA_IDLE_END,
8248                                 is_sdma_eng_name,       is_sdma_eng_int },
8249 { IS_VARIOUS_START,          IS_VARIOUS_END,
8250                                 is_various_name,        is_various_int },
8251 { IS_DC_START,       IS_DC_END,
8252                                 is_dc_name,             is_dc_int },
8253 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8254                                 is_rcv_avail_name,      is_rcv_avail_int },
8255 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8256                                 is_rcv_urgent_name,     is_rcv_urgent_int },
8257 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8258                                 is_send_credit_name,    is_send_credit_int},
8259 { IS_RESERVED_START,     IS_RESERVED_END,
8260                                 is_reserved_name,       is_reserved_int},
8261 };
8262
8263 /*
8264  * Interrupt source interrupt - called when the given source has an interrupt.
8265  * Source is a bit index into an array of 64-bit integers.
8266  */
8267 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8268 {
8269         const struct is_table *entry;
8270
8271         /* avoids a double compare by walking the table in-order */
8272         for (entry = &is_table[0]; entry->is_name; entry++) {
8273                 if (source <= entry->end) {
8274                         trace_hfi1_interrupt(dd, entry, source);
8275                         entry->is_int(dd, source - entry->start);
8276                         return;
8277                 }
8278         }
8279         /* fell off the end */
8280         dd_dev_err(dd, "invalid interrupt source %u\n", source);
8281 }
8282
8283 /**
8284  * gerneral_interrupt() -  General interrupt handler
8285  * @irq: MSIx IRQ vector
8286  * @data: hfi1 devdata
8287  *
8288  * This is able to correctly handle all non-threaded interrupts.  Receive
8289  * context DATA IRQs are threaded and are not supported by this handler.
8290  *
8291  */
8292 irqreturn_t general_interrupt(int irq, void *data)
8293 {
8294         struct hfi1_devdata *dd = data;
8295         u64 regs[CCE_NUM_INT_CSRS];
8296         u32 bit;
8297         int i;
8298         irqreturn_t handled = IRQ_NONE;
8299
8300         this_cpu_inc(*dd->int_counter);
8301
8302         /* phase 1: scan and clear all handled interrupts */
8303         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8304                 if (dd->gi_mask[i] == 0) {
8305                         regs[i] = 0;    /* used later */
8306                         continue;
8307                 }
8308                 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8309                                 dd->gi_mask[i];
8310                 /* only clear if anything is set */
8311                 if (regs[i])
8312                         write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8313         }
8314
8315         /* phase 2: call the appropriate handler */
8316         for_each_set_bit(bit, (unsigned long *)&regs[0],
8317                          CCE_NUM_INT_CSRS * 64) {
8318                 is_interrupt(dd, bit);
8319                 handled = IRQ_HANDLED;
8320         }
8321
8322         return handled;
8323 }
8324
8325 irqreturn_t sdma_interrupt(int irq, void *data)
8326 {
8327         struct sdma_engine *sde = data;
8328         struct hfi1_devdata *dd = sde->dd;
8329         u64 status;
8330
8331 #ifdef CONFIG_SDMA_VERBOSITY
8332         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8333                    slashstrip(__FILE__), __LINE__, __func__);
8334         sdma_dumpstate(sde);
8335 #endif
8336
8337         this_cpu_inc(*dd->int_counter);
8338
8339         /* This read_csr is really bad in the hot path */
8340         status = read_csr(dd,
8341                           CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8342                           & sde->imask;
8343         if (likely(status)) {
8344                 /* clear the interrupt(s) */
8345                 write_csr(dd,
8346                           CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8347                           status);
8348
8349                 /* handle the interrupt(s) */
8350                 sdma_engine_interrupt(sde, status);
8351         } else {
8352                 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8353                                         sde->this_idx);
8354         }
8355         return IRQ_HANDLED;
8356 }
8357
8358 /*
8359  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8360  * to insure that the write completed.  This does NOT guarantee that
8361  * queued DMA writes to memory from the chip are pushed.
8362  */
8363 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8364 {
8365         struct hfi1_devdata *dd = rcd->dd;
8366         u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8367
8368         mmiowb();       /* make sure everything before is written */
8369         write_csr(dd, addr, rcd->imask);
8370         /* force the above write on the chip and get a value back */
8371         (void)read_csr(dd, addr);
8372 }
8373
8374 /* force the receive interrupt */
8375 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8376 {
8377         write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8378 }
8379
8380 /*
8381  * Return non-zero if a packet is present.
8382  *
8383  * This routine is called when rechecking for packets after the RcvAvail
8384  * interrupt has been cleared down.  First, do a quick check of memory for
8385  * a packet present.  If not found, use an expensive CSR read of the context
8386  * tail to determine the actual tail.  The CSR read is necessary because there
8387  * is no method to push pending DMAs to memory other than an interrupt and we
8388  * are trying to determine if we need to force an interrupt.
8389  */
8390 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8391 {
8392         u32 tail;
8393         int present;
8394
8395         if (!rcd->rcvhdrtail_kvaddr)
8396                 present = (rcd->seq_cnt ==
8397                                 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8398         else /* is RDMA rtail */
8399                 present = (rcd->head != get_rcvhdrtail(rcd));
8400
8401         if (present)
8402                 return 1;
8403
8404         /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8405         tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8406         return rcd->head != tail;
8407 }
8408
8409 /*
8410  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8411  * This routine will try to handle packets immediately (latency), but if
8412  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8413  * chip receive interrupt is *not* cleared down until this or the thread (if
8414  * invoked) is finished.  The intent is to avoid extra interrupts while we
8415  * are processing packets anyway.
8416  */
8417 irqreturn_t receive_context_interrupt(int irq, void *data)
8418 {
8419         struct hfi1_ctxtdata *rcd = data;
8420         struct hfi1_devdata *dd = rcd->dd;
8421         int disposition;
8422         int present;
8423
8424         trace_hfi1_receive_interrupt(dd, rcd);
8425         this_cpu_inc(*dd->int_counter);
8426         aspm_ctx_disable(rcd);
8427
8428         /* receive interrupt remains blocked while processing packets */
8429         disposition = rcd->do_interrupt(rcd, 0);
8430
8431         /*
8432          * Too many packets were seen while processing packets in this
8433          * IRQ handler.  Invoke the handler thread.  The receive interrupt
8434          * remains blocked.
8435          */
8436         if (disposition == RCV_PKT_LIMIT)
8437                 return IRQ_WAKE_THREAD;
8438
8439         /*
8440          * The packet processor detected no more packets.  Clear the receive
8441          * interrupt and recheck for a packet packet that may have arrived
8442          * after the previous check and interrupt clear.  If a packet arrived,
8443          * force another interrupt.
8444          */
8445         clear_recv_intr(rcd);
8446         present = check_packet_present(rcd);
8447         if (present)
8448                 force_recv_intr(rcd);
8449
8450         return IRQ_HANDLED;
8451 }
8452
8453 /*
8454  * Receive packet thread handler.  This expects to be invoked with the
8455  * receive interrupt still blocked.
8456  */
8457 irqreturn_t receive_context_thread(int irq, void *data)
8458 {
8459         struct hfi1_ctxtdata *rcd = data;
8460         int present;
8461
8462         /* receive interrupt is still blocked from the IRQ handler */
8463         (void)rcd->do_interrupt(rcd, 1);
8464
8465         /*
8466          * The packet processor will only return if it detected no more
8467          * packets.  Hold IRQs here so we can safely clear the interrupt and
8468          * recheck for a packet that may have arrived after the previous
8469          * check and the interrupt clear.  If a packet arrived, force another
8470          * interrupt.
8471          */
8472         local_irq_disable();
8473         clear_recv_intr(rcd);
8474         present = check_packet_present(rcd);
8475         if (present)
8476                 force_recv_intr(rcd);
8477         local_irq_enable();
8478
8479         return IRQ_HANDLED;
8480 }
8481
8482 /* ========================================================================= */
8483
8484 u32 read_physical_state(struct hfi1_devdata *dd)
8485 {
8486         u64 reg;
8487
8488         reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8489         return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8490                                 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8491 }
8492
8493 u32 read_logical_state(struct hfi1_devdata *dd)
8494 {
8495         u64 reg;
8496
8497         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8498         return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8499                                 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8500 }
8501
8502 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8503 {
8504         u64 reg;
8505
8506         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8507         /* clear current state, set new state */
8508         reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8509         reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8510         write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8511 }
8512
8513 /*
8514  * Use the 8051 to read a LCB CSR.
8515  */
8516 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8517 {
8518         u32 regno;
8519         int ret;
8520
8521         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8522                 if (acquire_lcb_access(dd, 0) == 0) {
8523                         *data = read_csr(dd, addr);
8524                         release_lcb_access(dd, 0);
8525                         return 0;
8526                 }
8527                 return -EBUSY;
8528         }
8529
8530         /* register is an index of LCB registers: (offset - base) / 8 */
8531         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8532         ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8533         if (ret != HCMD_SUCCESS)
8534                 return -EBUSY;
8535         return 0;
8536 }
8537
8538 /*
8539  * Provide a cache for some of the LCB registers in case the LCB is
8540  * unavailable.
8541  * (The LCB is unavailable in certain link states, for example.)
8542  */
8543 struct lcb_datum {
8544         u32 off;
8545         u64 val;
8546 };
8547
8548 static struct lcb_datum lcb_cache[] = {
8549         { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8550         { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8551         { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8552 };
8553
8554 static void update_lcb_cache(struct hfi1_devdata *dd)
8555 {
8556         int i;
8557         int ret;
8558         u64 val;
8559
8560         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8561                 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8562
8563                 /* Update if we get good data */
8564                 if (likely(ret != -EBUSY))
8565                         lcb_cache[i].val = val;
8566         }
8567 }
8568
8569 static int read_lcb_cache(u32 off, u64 *val)
8570 {
8571         int i;
8572
8573         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8574                 if (lcb_cache[i].off == off) {
8575                         *val = lcb_cache[i].val;
8576                         return 0;
8577                 }
8578         }
8579
8580         pr_warn("%s bad offset 0x%x\n", __func__, off);
8581         return -1;
8582 }
8583
8584 /*
8585  * Read an LCB CSR.  Access may not be in host control, so check.
8586  * Return 0 on success, -EBUSY on failure.
8587  */
8588 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8589 {
8590         struct hfi1_pportdata *ppd = dd->pport;
8591
8592         /* if up, go through the 8051 for the value */
8593         if (ppd->host_link_state & HLS_UP)
8594                 return read_lcb_via_8051(dd, addr, data);
8595         /* if going up or down, check the cache, otherwise, no access */
8596         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8597                 if (read_lcb_cache(addr, data))
8598                         return -EBUSY;
8599                 return 0;
8600         }
8601
8602         /* otherwise, host has access */
8603         *data = read_csr(dd, addr);
8604         return 0;
8605 }
8606
8607 /*
8608  * Use the 8051 to write a LCB CSR.
8609  */
8610 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8611 {
8612         u32 regno;
8613         int ret;
8614
8615         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8616             (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8617                 if (acquire_lcb_access(dd, 0) == 0) {
8618                         write_csr(dd, addr, data);
8619                         release_lcb_access(dd, 0);
8620                         return 0;
8621                 }
8622                 return -EBUSY;
8623         }
8624
8625         /* register is an index of LCB registers: (offset - base) / 8 */
8626         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8627         ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8628         if (ret != HCMD_SUCCESS)
8629                 return -EBUSY;
8630         return 0;
8631 }
8632
8633 /*
8634  * Write an LCB CSR.  Access may not be in host control, so check.
8635  * Return 0 on success, -EBUSY on failure.
8636  */
8637 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8638 {
8639         struct hfi1_pportdata *ppd = dd->pport;
8640
8641         /* if up, go through the 8051 for the value */
8642         if (ppd->host_link_state & HLS_UP)
8643                 return write_lcb_via_8051(dd, addr, data);
8644         /* if going up or down, no access */
8645         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8646                 return -EBUSY;
8647         /* otherwise, host has access */
8648         write_csr(dd, addr, data);
8649         return 0;
8650 }
8651
8652 /*
8653  * Returns:
8654  *      < 0 = Linux error, not able to get access
8655  *      > 0 = 8051 command RETURN_CODE
8656  */
8657 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8658                            u64 *out_data)
8659 {
8660         u64 reg, completed;
8661         int return_code;
8662         unsigned long timeout;
8663
8664         hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8665
8666         mutex_lock(&dd->dc8051_lock);
8667
8668         /* We can't send any commands to the 8051 if it's in reset */
8669         if (dd->dc_shutdown) {
8670                 return_code = -ENODEV;
8671                 goto fail;
8672         }
8673
8674         /*
8675          * If an 8051 host command timed out previously, then the 8051 is
8676          * stuck.
8677          *
8678          * On first timeout, attempt to reset and restart the entire DC
8679          * block (including 8051). (Is this too big of a hammer?)
8680          *
8681          * If the 8051 times out a second time, the reset did not bring it
8682          * back to healthy life. In that case, fail any subsequent commands.
8683          */
8684         if (dd->dc8051_timed_out) {
8685                 if (dd->dc8051_timed_out > 1) {
8686                         dd_dev_err(dd,
8687                                    "Previous 8051 host command timed out, skipping command %u\n",
8688                                    type);
8689                         return_code = -ENXIO;
8690                         goto fail;
8691                 }
8692                 _dc_shutdown(dd);
8693                 _dc_start(dd);
8694         }
8695
8696         /*
8697          * If there is no timeout, then the 8051 command interface is
8698          * waiting for a command.
8699          */
8700
8701         /*
8702          * When writing a LCB CSR, out_data contains the full value to
8703          * to be written, while in_data contains the relative LCB
8704          * address in 7:0.  Do the work here, rather than the caller,
8705          * of distrubting the write data to where it needs to go:
8706          *
8707          * Write data
8708          *   39:00 -> in_data[47:8]
8709          *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8710          *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8711          */
8712         if (type == HCMD_WRITE_LCB_CSR) {
8713                 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8714                 /* must preserve COMPLETED - it is tied to hardware */
8715                 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8716                 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8717                 reg |= ((((*out_data) >> 40) & 0xff) <<
8718                                 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8719                       | ((((*out_data) >> 48) & 0xffff) <<
8720                                 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8721                 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8722         }
8723
8724         /*
8725          * Do two writes: the first to stabilize the type and req_data, the
8726          * second to activate.
8727          */
8728         reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8729                         << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8730                 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8731                         << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8732         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8733         reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8734         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8735
8736         /* wait for completion, alternate: interrupt */
8737         timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8738         while (1) {
8739                 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8740                 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8741                 if (completed)
8742                         break;
8743                 if (time_after(jiffies, timeout)) {
8744                         dd->dc8051_timed_out++;
8745                         dd_dev_err(dd, "8051 host command %u timeout\n", type);
8746                         if (out_data)
8747                                 *out_data = 0;
8748                         return_code = -ETIMEDOUT;
8749                         goto fail;
8750                 }
8751                 udelay(2);
8752         }
8753
8754         if (out_data) {
8755                 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8756                                 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8757                 if (type == HCMD_READ_LCB_CSR) {
8758                         /* top 16 bits are in a different register */
8759                         *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8760                                 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8761                                 << (48
8762                                     - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8763                 }
8764         }
8765         return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8766                                 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8767         dd->dc8051_timed_out = 0;
8768         /*
8769          * Clear command for next user.
8770          */
8771         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8772
8773 fail:
8774         mutex_unlock(&dd->dc8051_lock);
8775         return return_code;
8776 }
8777
8778 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8779 {
8780         return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8781 }
8782
8783 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8784                      u8 lane_id, u32 config_data)
8785 {
8786         u64 data;
8787         int ret;
8788
8789         data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8790                 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8791                 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8792         ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8793         if (ret != HCMD_SUCCESS) {
8794                 dd_dev_err(dd,
8795                            "load 8051 config: field id %d, lane %d, err %d\n",
8796                            (int)field_id, (int)lane_id, ret);
8797         }
8798         return ret;
8799 }
8800
8801 /*
8802  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8803  * set the result, even on error.
8804  * Return 0 on success, -errno on failure
8805  */
8806 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8807                      u32 *result)
8808 {
8809         u64 big_data;
8810         u32 addr;
8811         int ret;
8812
8813         /* address start depends on the lane_id */
8814         if (lane_id < 4)
8815                 addr = (4 * NUM_GENERAL_FIELDS)
8816                         + (lane_id * 4 * NUM_LANE_FIELDS);
8817         else
8818                 addr = 0;
8819         addr += field_id * 4;
8820
8821         /* read is in 8-byte chunks, hardware will truncate the address down */
8822         ret = read_8051_data(dd, addr, 8, &big_data);
8823
8824         if (ret == 0) {
8825                 /* extract the 4 bytes we want */
8826                 if (addr & 0x4)
8827                         *result = (u32)(big_data >> 32);
8828                 else
8829                         *result = (u32)big_data;
8830         } else {
8831                 *result = 0;
8832                 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8833                            __func__, lane_id, field_id);
8834         }
8835
8836         return ret;
8837 }
8838
8839 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8840                               u8 continuous)
8841 {
8842         u32 frame;
8843
8844         frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8845                 | power_management << POWER_MANAGEMENT_SHIFT;
8846         return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8847                                 GENERAL_CONFIG, frame);
8848 }
8849
8850 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8851                                  u16 vl15buf, u8 crc_sizes)
8852 {
8853         u32 frame;
8854
8855         frame = (u32)vau << VAU_SHIFT
8856                 | (u32)z << Z_SHIFT
8857                 | (u32)vcu << VCU_SHIFT
8858                 | (u32)vl15buf << VL15BUF_SHIFT
8859                 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8860         return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8861                                 GENERAL_CONFIG, frame);
8862 }
8863
8864 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8865                                     u8 *flag_bits, u16 *link_widths)
8866 {
8867         u32 frame;
8868
8869         read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8870                          &frame);
8871         *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8872         *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8873         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8874 }
8875
8876 static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8877                                     u8 misc_bits,
8878                                     u8 flag_bits,
8879                                     u16 link_widths)
8880 {
8881         u32 frame;
8882
8883         frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8884                 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8885                 | (u32)link_widths << LINK_WIDTH_SHIFT;
8886         return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8887                      frame);
8888 }
8889
8890 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8891                                  u8 device_rev)
8892 {
8893         u32 frame;
8894
8895         frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8896                 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8897         return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8898 }
8899
8900 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8901                                   u8 *device_rev)
8902 {
8903         u32 frame;
8904
8905         read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8906         *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8907         *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8908                         & REMOTE_DEVICE_REV_MASK;
8909 }
8910
8911 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8912 {
8913         u32 frame;
8914         u32 mask;
8915
8916         mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8917         read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8918         /* Clear, then set field */
8919         frame &= ~mask;
8920         frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8921         return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8922                                 frame);
8923 }
8924
8925 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8926                       u8 *ver_patch)
8927 {
8928         u32 frame;
8929
8930         read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8931         *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8932                 STS_FM_VERSION_MAJOR_MASK;
8933         *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8934                 STS_FM_VERSION_MINOR_MASK;
8935
8936         read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8937         *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8938                 STS_FM_VERSION_PATCH_MASK;
8939 }
8940
8941 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8942                                u8 *continuous)
8943 {
8944         u32 frame;
8945
8946         read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8947         *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8948                                         & POWER_MANAGEMENT_MASK;
8949         *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8950                                         & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8951 }
8952
8953 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8954                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8955 {
8956         u32 frame;
8957
8958         read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8959         *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8960         *z = (frame >> Z_SHIFT) & Z_MASK;
8961         *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8962         *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8963         *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8964 }
8965
8966 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8967                                       u8 *remote_tx_rate,
8968                                       u16 *link_widths)
8969 {
8970         u32 frame;
8971
8972         read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8973                          &frame);
8974         *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8975                                 & REMOTE_TX_RATE_MASK;
8976         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8977 }
8978
8979 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8980 {
8981         u32 frame;
8982
8983         read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8984         *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8985 }
8986
8987 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8988 {
8989         read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8990 }
8991
8992 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8993 {
8994         read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8995 }
8996
8997 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8998 {
8999         u32 frame;
9000         int ret;
9001
9002         *link_quality = 0;
9003         if (dd->pport->host_link_state & HLS_UP) {
9004                 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9005                                        &frame);
9006                 if (ret == 0)
9007                         *link_quality = (frame >> LINK_QUALITY_SHIFT)
9008                                                 & LINK_QUALITY_MASK;
9009         }
9010 }
9011
9012 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9013 {
9014         u32 frame;
9015
9016         read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9017         *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9018 }
9019
9020 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9021 {
9022         u32 frame;
9023
9024         read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9025         *ldr = (frame & 0xff);
9026 }
9027
9028 static int read_tx_settings(struct hfi1_devdata *dd,
9029                             u8 *enable_lane_tx,
9030                             u8 *tx_polarity_inversion,
9031                             u8 *rx_polarity_inversion,
9032                             u8 *max_rate)
9033 {
9034         u32 frame;
9035         int ret;
9036
9037         ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9038         *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9039                                 & ENABLE_LANE_TX_MASK;
9040         *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9041                                 & TX_POLARITY_INVERSION_MASK;
9042         *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9043                                 & RX_POLARITY_INVERSION_MASK;
9044         *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9045         return ret;
9046 }
9047
9048 static int write_tx_settings(struct hfi1_devdata *dd,
9049                              u8 enable_lane_tx,
9050                              u8 tx_polarity_inversion,
9051                              u8 rx_polarity_inversion,
9052                              u8 max_rate)
9053 {
9054         u32 frame;
9055
9056         /* no need to mask, all variable sizes match field widths */
9057         frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9058                 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9059                 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9060                 | max_rate << MAX_RATE_SHIFT;
9061         return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9062 }
9063
9064 /*
9065  * Read an idle LCB message.
9066  *
9067  * Returns 0 on success, -EINVAL on error
9068  */
9069 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9070 {
9071         int ret;
9072
9073         ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9074         if (ret != HCMD_SUCCESS) {
9075                 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9076                            (u32)type, ret);
9077                 return -EINVAL;
9078         }
9079         dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9080         /* return only the payload as we already know the type */
9081         *data_out >>= IDLE_PAYLOAD_SHIFT;
9082         return 0;
9083 }
9084
9085 /*
9086  * Read an idle SMA message.  To be done in response to a notification from
9087  * the 8051.
9088  *
9089  * Returns 0 on success, -EINVAL on error
9090  */
9091 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9092 {
9093         return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9094                                  data);
9095 }
9096
9097 /*
9098  * Send an idle LCB message.
9099  *
9100  * Returns 0 on success, -EINVAL on error
9101  */
9102 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9103 {
9104         int ret;
9105
9106         dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9107         ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9108         if (ret != HCMD_SUCCESS) {
9109                 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9110                            data, ret);
9111                 return -EINVAL;
9112         }
9113         return 0;
9114 }
9115
9116 /*
9117  * Send an idle SMA message.
9118  *
9119  * Returns 0 on success, -EINVAL on error
9120  */
9121 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9122 {
9123         u64 data;
9124
9125         data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9126                 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9127         return send_idle_message(dd, data);
9128 }
9129
9130 /*
9131  * Initialize the LCB then do a quick link up.  This may or may not be
9132  * in loopback.
9133  *
9134  * return 0 on success, -errno on error
9135  */
9136 static int do_quick_linkup(struct hfi1_devdata *dd)
9137 {
9138         int ret;
9139
9140         lcb_shutdown(dd, 0);
9141
9142         if (loopback) {
9143                 /* LCB_CFG_LOOPBACK.VAL = 2 */
9144                 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9145                 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9146                           IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9147                 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9148         }
9149
9150         /* start the LCBs */
9151         /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9152         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9153
9154         /* simulator only loopback steps */
9155         if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9156                 /* LCB_CFG_RUN.EN = 1 */
9157                 write_csr(dd, DC_LCB_CFG_RUN,
9158                           1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9159
9160                 ret = wait_link_transfer_active(dd, 10);
9161                 if (ret)
9162                         return ret;
9163
9164                 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9165                           1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9166         }
9167
9168         if (!loopback) {
9169                 /*
9170                  * When doing quick linkup and not in loopback, both
9171                  * sides must be done with LCB set-up before either
9172                  * starts the quick linkup.  Put a delay here so that
9173                  * both sides can be started and have a chance to be
9174                  * done with LCB set up before resuming.
9175                  */
9176                 dd_dev_err(dd,
9177                            "Pausing for peer to be finished with LCB set up\n");
9178                 msleep(5000);
9179                 dd_dev_err(dd, "Continuing with quick linkup\n");
9180         }
9181
9182         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9183         set_8051_lcb_access(dd);
9184
9185         /*
9186          * State "quick" LinkUp request sets the physical link state to
9187          * LinkUp without a verify capability sequence.
9188          * This state is in simulator v37 and later.
9189          */
9190         ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9191         if (ret != HCMD_SUCCESS) {
9192                 dd_dev_err(dd,
9193                            "%s: set physical link state to quick LinkUp failed with return %d\n",
9194                            __func__, ret);
9195
9196                 set_host_lcb_access(dd);
9197                 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9198
9199                 if (ret >= 0)
9200                         ret = -EINVAL;
9201                 return ret;
9202         }
9203
9204         return 0; /* success */
9205 }
9206
9207 /*
9208  * Do all special steps to set up loopback.
9209  */
9210 static int init_loopback(struct hfi1_devdata *dd)
9211 {
9212         dd_dev_info(dd, "Entering loopback mode\n");
9213
9214         /* all loopbacks should disable self GUID check */
9215         write_csr(dd, DC_DC8051_CFG_MODE,
9216                   (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9217
9218         /*
9219          * The simulator has only one loopback option - LCB.  Switch
9220          * to that option, which includes quick link up.
9221          *
9222          * Accept all valid loopback values.
9223          */
9224         if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9225             (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9226              loopback == LOOPBACK_CABLE)) {
9227                 loopback = LOOPBACK_LCB;
9228                 quick_linkup = 1;
9229                 return 0;
9230         }
9231
9232         /*
9233          * SerDes loopback init sequence is handled in set_local_link_attributes
9234          */
9235         if (loopback == LOOPBACK_SERDES)
9236                 return 0;
9237
9238         /* LCB loopback - handled at poll time */
9239         if (loopback == LOOPBACK_LCB) {
9240                 quick_linkup = 1; /* LCB is always quick linkup */
9241
9242                 /* not supported in emulation due to emulation RTL changes */
9243                 if (dd->icode == ICODE_FPGA_EMULATION) {
9244                         dd_dev_err(dd,
9245                                    "LCB loopback not supported in emulation\n");
9246                         return -EINVAL;
9247                 }
9248                 return 0;
9249         }
9250
9251         /* external cable loopback requires no extra steps */
9252         if (loopback == LOOPBACK_CABLE)
9253                 return 0;
9254
9255         dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9256         return -EINVAL;
9257 }
9258
9259 /*
9260  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9261  * used in the Verify Capability link width attribute.
9262  */
9263 static u16 opa_to_vc_link_widths(u16 opa_widths)
9264 {
9265         int i;
9266         u16 result = 0;
9267
9268         static const struct link_bits {
9269                 u16 from;
9270                 u16 to;
9271         } opa_link_xlate[] = {
9272                 { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9273                 { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9274                 { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9275                 { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9276         };
9277
9278         for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9279                 if (opa_widths & opa_link_xlate[i].from)
9280                         result |= opa_link_xlate[i].to;
9281         }
9282         return result;
9283 }
9284
9285 /*
9286  * Set link attributes before moving to polling.
9287  */
9288 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9289 {
9290         struct hfi1_devdata *dd = ppd->dd;
9291         u8 enable_lane_tx;
9292         u8 tx_polarity_inversion;
9293         u8 rx_polarity_inversion;
9294         int ret;
9295         u32 misc_bits = 0;
9296         /* reset our fabric serdes to clear any lingering problems */
9297         fabric_serdes_reset(dd);
9298
9299         /* set the local tx rate - need to read-modify-write */
9300         ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9301                                &rx_polarity_inversion, &ppd->local_tx_rate);
9302         if (ret)
9303                 goto set_local_link_attributes_fail;
9304
9305         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9306                 /* set the tx rate to the fastest enabled */
9307                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9308                         ppd->local_tx_rate = 1;
9309                 else
9310                         ppd->local_tx_rate = 0;
9311         } else {
9312                 /* set the tx rate to all enabled */
9313                 ppd->local_tx_rate = 0;
9314                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9315                         ppd->local_tx_rate |= 2;
9316                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9317                         ppd->local_tx_rate |= 1;
9318         }
9319
9320         enable_lane_tx = 0xF; /* enable all four lanes */
9321         ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9322                                 rx_polarity_inversion, ppd->local_tx_rate);
9323         if (ret != HCMD_SUCCESS)
9324                 goto set_local_link_attributes_fail;
9325
9326         ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9327         if (ret != HCMD_SUCCESS) {
9328                 dd_dev_err(dd,
9329                            "Failed to set host interface version, return 0x%x\n",
9330                            ret);
9331                 goto set_local_link_attributes_fail;
9332         }
9333
9334         /*
9335          * DC supports continuous updates.
9336          */
9337         ret = write_vc_local_phy(dd,
9338                                  0 /* no power management */,
9339                                  1 /* continuous updates */);
9340         if (ret != HCMD_SUCCESS)
9341                 goto set_local_link_attributes_fail;
9342
9343         /* z=1 in the next call: AU of 0 is not supported by the hardware */
9344         ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9345                                     ppd->port_crc_mode_enabled);
9346         if (ret != HCMD_SUCCESS)
9347                 goto set_local_link_attributes_fail;
9348
9349         /*
9350          * SerDes loopback init sequence requires
9351          * setting bit 0 of MISC_CONFIG_BITS
9352          */
9353         if (loopback == LOOPBACK_SERDES)
9354                 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9355
9356         /*
9357          * An external device configuration request is used to reset the LCB
9358          * to retry to obtain operational lanes when the first attempt is
9359          * unsuccesful.
9360          */
9361         if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9362                 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9363
9364         ret = write_vc_local_link_mode(dd, misc_bits, 0,
9365                                        opa_to_vc_link_widths(
9366                                                 ppd->link_width_enabled));
9367         if (ret != HCMD_SUCCESS)
9368                 goto set_local_link_attributes_fail;
9369
9370         /* let peer know who we are */
9371         ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9372         if (ret == HCMD_SUCCESS)
9373                 return 0;
9374
9375 set_local_link_attributes_fail:
9376         dd_dev_err(dd,
9377                    "Failed to set local link attributes, return 0x%x\n",
9378                    ret);
9379         return ret;
9380 }
9381
9382 /*
9383  * Call this to start the link.
9384  * Do not do anything if the link is disabled.
9385  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9386  */
9387 int start_link(struct hfi1_pportdata *ppd)
9388 {
9389         /*
9390          * Tune the SerDes to a ballpark setting for optimal signal and bit
9391          * error rate.  Needs to be done before starting the link.
9392          */
9393         tune_serdes(ppd);
9394
9395         if (!ppd->driver_link_ready) {
9396                 dd_dev_info(ppd->dd,
9397                             "%s: stopping link start because driver is not ready\n",
9398                             __func__);
9399                 return 0;
9400         }
9401
9402         /*
9403          * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9404          * pkey table can be configured properly if the HFI unit is connected
9405          * to switch port with MgmtAllowed=NO
9406          */
9407         clear_full_mgmt_pkey(ppd);
9408
9409         return set_link_state(ppd, HLS_DN_POLL);
9410 }
9411
9412 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9413 {
9414         struct hfi1_devdata *dd = ppd->dd;
9415         u64 mask;
9416         unsigned long timeout;
9417
9418         /*
9419          * Some QSFP cables have a quirk that asserts the IntN line as a side
9420          * effect of power up on plug-in. We ignore this false positive
9421          * interrupt until the module has finished powering up by waiting for
9422          * a minimum timeout of the module inrush initialization time of
9423          * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9424          * module have stabilized.
9425          */
9426         msleep(500);
9427
9428         /*
9429          * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9430          */
9431         timeout = jiffies + msecs_to_jiffies(2000);
9432         while (1) {
9433                 mask = read_csr(dd, dd->hfi1_id ?
9434                                 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9435                 if (!(mask & QSFP_HFI0_INT_N))
9436                         break;
9437                 if (time_after(jiffies, timeout)) {
9438                         dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9439                                     __func__);
9440                         break;
9441                 }
9442                 udelay(2);
9443         }
9444 }
9445
9446 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9447 {
9448         struct hfi1_devdata *dd = ppd->dd;
9449         u64 mask;
9450
9451         mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9452         if (enable) {
9453                 /*
9454                  * Clear the status register to avoid an immediate interrupt
9455                  * when we re-enable the IntN pin
9456                  */
9457                 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9458                           QSFP_HFI0_INT_N);
9459                 mask |= (u64)QSFP_HFI0_INT_N;
9460         } else {
9461                 mask &= ~(u64)QSFP_HFI0_INT_N;
9462         }
9463         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9464 }
9465
9466 int reset_qsfp(struct hfi1_pportdata *ppd)
9467 {
9468         struct hfi1_devdata *dd = ppd->dd;
9469         u64 mask, qsfp_mask;
9470
9471         /* Disable INT_N from triggering QSFP interrupts */
9472         set_qsfp_int_n(ppd, 0);
9473
9474         /* Reset the QSFP */
9475         mask = (u64)QSFP_HFI0_RESET_N;
9476
9477         qsfp_mask = read_csr(dd,
9478                              dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9479         qsfp_mask &= ~mask;
9480         write_csr(dd,
9481                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9482
9483         udelay(10);
9484
9485         qsfp_mask |= mask;
9486         write_csr(dd,
9487                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9488
9489         wait_for_qsfp_init(ppd);
9490
9491         /*
9492          * Allow INT_N to trigger the QSFP interrupt to watch
9493          * for alarms and warnings
9494          */
9495         set_qsfp_int_n(ppd, 1);
9496
9497         /*
9498          * After the reset, AOC transmitters are enabled by default. They need
9499          * to be turned off to complete the QSFP setup before they can be
9500          * enabled again.
9501          */
9502         return set_qsfp_tx(ppd, 0);
9503 }
9504
9505 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9506                                         u8 *qsfp_interrupt_status)
9507 {
9508         struct hfi1_devdata *dd = ppd->dd;
9509
9510         if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9511             (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9512                 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9513                            __func__);
9514
9515         if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9516             (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9517                 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9518                            __func__);
9519
9520         /*
9521          * The remaining alarms/warnings don't matter if the link is down.
9522          */
9523         if (ppd->host_link_state & HLS_DOWN)
9524                 return 0;
9525
9526         if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9527             (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9528                 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9529                            __func__);
9530
9531         if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9532             (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9533                 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9534                            __func__);
9535
9536         /* Byte 2 is vendor specific */
9537
9538         if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9539             (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9540                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9541                            __func__);
9542
9543         if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9544             (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9545                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9546                            __func__);
9547
9548         if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9549             (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9550                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9551                            __func__);
9552
9553         if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9554             (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9555                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9556                            __func__);
9557
9558         if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9559             (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9560                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9561                            __func__);
9562
9563         if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9564             (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9565                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9566                            __func__);
9567
9568         if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9569             (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9570                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9571                            __func__);
9572
9573         if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9574             (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9575                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9576                            __func__);
9577
9578         if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9579             (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9580                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9581                            __func__);
9582
9583         if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9584             (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9585                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9586                            __func__);
9587
9588         if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9589             (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9590                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9591                            __func__);
9592
9593         if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9594             (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9595                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9596                            __func__);
9597
9598         /* Bytes 9-10 and 11-12 are reserved */
9599         /* Bytes 13-15 are vendor specific */
9600
9601         return 0;
9602 }
9603
9604 /* This routine will only be scheduled if the QSFP module present is asserted */
9605 void qsfp_event(struct work_struct *work)
9606 {
9607         struct qsfp_data *qd;
9608         struct hfi1_pportdata *ppd;
9609         struct hfi1_devdata *dd;
9610
9611         qd = container_of(work, struct qsfp_data, qsfp_work);
9612         ppd = qd->ppd;
9613         dd = ppd->dd;
9614
9615         /* Sanity check */
9616         if (!qsfp_mod_present(ppd))
9617                 return;
9618
9619         if (ppd->host_link_state == HLS_DN_DISABLE) {
9620                 dd_dev_info(ppd->dd,
9621                             "%s: stopping link start because link is disabled\n",
9622                             __func__);
9623                 return;
9624         }
9625
9626         /*
9627          * Turn DC back on after cable has been re-inserted. Up until
9628          * now, the DC has been in reset to save power.
9629          */
9630         dc_start(dd);
9631
9632         if (qd->cache_refresh_required) {
9633                 set_qsfp_int_n(ppd, 0);
9634
9635                 wait_for_qsfp_init(ppd);
9636
9637                 /*
9638                  * Allow INT_N to trigger the QSFP interrupt to watch
9639                  * for alarms and warnings
9640                  */
9641                 set_qsfp_int_n(ppd, 1);
9642
9643                 start_link(ppd);
9644         }
9645
9646         if (qd->check_interrupt_flags) {
9647                 u8 qsfp_interrupt_status[16] = {0,};
9648
9649                 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9650                                   &qsfp_interrupt_status[0], 16) != 16) {
9651                         dd_dev_info(dd,
9652                                     "%s: Failed to read status of QSFP module\n",
9653                                     __func__);
9654                 } else {
9655                         unsigned long flags;
9656
9657                         handle_qsfp_error_conditions(
9658                                         ppd, qsfp_interrupt_status);
9659                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9660                         ppd->qsfp_info.check_interrupt_flags = 0;
9661                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9662                                                flags);
9663                 }
9664         }
9665 }
9666
9667 void init_qsfp_int(struct hfi1_devdata *dd)
9668 {
9669         struct hfi1_pportdata *ppd = dd->pport;
9670         u64 qsfp_mask;
9671
9672         qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9673         /* Clear current status to avoid spurious interrupts */
9674         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9675                   qsfp_mask);
9676         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9677                   qsfp_mask);
9678
9679         set_qsfp_int_n(ppd, 0);
9680
9681         /* Handle active low nature of INT_N and MODPRST_N pins */
9682         if (qsfp_mod_present(ppd))
9683                 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9684         write_csr(dd,
9685                   dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9686                   qsfp_mask);
9687
9688         /* Enable the appropriate QSFP IRQ source */
9689         if (!dd->hfi1_id)
9690                 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9691         else
9692                 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9693 }
9694
9695 /*
9696  * Do a one-time initialize of the LCB block.
9697  */
9698 static void init_lcb(struct hfi1_devdata *dd)
9699 {
9700         /* simulator does not correctly handle LCB cclk loopback, skip */
9701         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9702                 return;
9703
9704         /* the DC has been reset earlier in the driver load */
9705
9706         /* set LCB for cclk loopback on the port */
9707         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9708         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9709         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9710         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9711         write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9712         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9713         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9714 }
9715
9716 /*
9717  * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9718  * on error.
9719  */
9720 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9721 {
9722         int ret;
9723         u8 status;
9724
9725         /*
9726          * Report success if not a QSFP or, if it is a QSFP, but the cable is
9727          * not present
9728          */
9729         if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9730                 return 0;
9731
9732         /* read byte 2, the status byte */
9733         ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9734         if (ret < 0)
9735                 return ret;
9736         if (ret != 1)
9737                 return -EIO;
9738
9739         return 0; /* success */
9740 }
9741
9742 /*
9743  * Values for QSFP retry.
9744  *
9745  * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9746  * arrived at from experience on a large cluster.
9747  */
9748 #define MAX_QSFP_RETRIES 20
9749 #define QSFP_RETRY_WAIT 500 /* msec */
9750
9751 /*
9752  * Try a QSFP read.  If it fails, schedule a retry for later.
9753  * Called on first link activation after driver load.
9754  */
9755 static void try_start_link(struct hfi1_pportdata *ppd)
9756 {
9757         if (test_qsfp_read(ppd)) {
9758                 /* read failed */
9759                 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9760                         dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9761                         return;
9762                 }
9763                 dd_dev_info(ppd->dd,
9764                             "QSFP not responding, waiting and retrying %d\n",
9765                             (int)ppd->qsfp_retry_count);
9766                 ppd->qsfp_retry_count++;
9767                 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9768                                    msecs_to_jiffies(QSFP_RETRY_WAIT));
9769                 return;
9770         }
9771         ppd->qsfp_retry_count = 0;
9772
9773         start_link(ppd);
9774 }
9775
9776 /*
9777  * Workqueue function to start the link after a delay.
9778  */
9779 void handle_start_link(struct work_struct *work)
9780 {
9781         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9782                                                   start_link_work.work);
9783         try_start_link(ppd);
9784 }
9785
9786 int bringup_serdes(struct hfi1_pportdata *ppd)
9787 {
9788         struct hfi1_devdata *dd = ppd->dd;
9789         u64 guid;
9790         int ret;
9791
9792         if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9793                 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9794
9795         guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9796         if (!guid) {
9797                 if (dd->base_guid)
9798                         guid = dd->base_guid + ppd->port - 1;
9799                 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9800         }
9801
9802         /* Set linkinit_reason on power up per OPA spec */
9803         ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9804
9805         /* one-time init of the LCB */
9806         init_lcb(dd);
9807
9808         if (loopback) {
9809                 ret = init_loopback(dd);
9810                 if (ret < 0)
9811                         return ret;
9812         }
9813
9814         get_port_type(ppd);
9815         if (ppd->port_type == PORT_TYPE_QSFP) {
9816                 set_qsfp_int_n(ppd, 0);
9817                 wait_for_qsfp_init(ppd);
9818                 set_qsfp_int_n(ppd, 1);
9819         }
9820
9821         try_start_link(ppd);
9822         return 0;
9823 }
9824
9825 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9826 {
9827         struct hfi1_devdata *dd = ppd->dd;
9828
9829         /*
9830          * Shut down the link and keep it down.   First turn off that the
9831          * driver wants to allow the link to be up (driver_link_ready).
9832          * Then make sure the link is not automatically restarted
9833          * (link_enabled).  Cancel any pending restart.  And finally
9834          * go offline.
9835          */
9836         ppd->driver_link_ready = 0;
9837         ppd->link_enabled = 0;
9838
9839         ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9840         flush_delayed_work(&ppd->start_link_work);
9841         cancel_delayed_work_sync(&ppd->start_link_work);
9842
9843         ppd->offline_disabled_reason =
9844                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9845         set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9846                              OPA_LINKDOWN_REASON_REBOOT);
9847         set_link_state(ppd, HLS_DN_OFFLINE);
9848
9849         /* disable the port */
9850         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9851 }
9852
9853 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9854 {
9855         struct hfi1_pportdata *ppd;
9856         int i;
9857
9858         ppd = (struct hfi1_pportdata *)(dd + 1);
9859         for (i = 0; i < dd->num_pports; i++, ppd++) {
9860                 ppd->ibport_data.rvp.rc_acks = NULL;
9861                 ppd->ibport_data.rvp.rc_qacks = NULL;
9862                 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9863                 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9864                 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9865                 if (!ppd->ibport_data.rvp.rc_acks ||
9866                     !ppd->ibport_data.rvp.rc_delayed_comp ||
9867                     !ppd->ibport_data.rvp.rc_qacks)
9868                         return -ENOMEM;
9869         }
9870
9871         return 0;
9872 }
9873
9874 /*
9875  * index is the index into the receive array
9876  */
9877 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9878                   u32 type, unsigned long pa, u16 order)
9879 {
9880         u64 reg;
9881
9882         if (!(dd->flags & HFI1_PRESENT))
9883                 goto done;
9884
9885         if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9886                 pa = 0;
9887                 order = 0;
9888         } else if (type > PT_INVALID) {
9889                 dd_dev_err(dd,
9890                            "unexpected receive array type %u for index %u, not handled\n",
9891                            type, index);
9892                 goto done;
9893         }
9894         trace_hfi1_put_tid(dd, index, type, pa, order);
9895
9896 #define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9897         reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9898                 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9899                 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9900                                         << RCV_ARRAY_RT_ADDR_SHIFT;
9901         trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9902         writeq(reg, dd->rcvarray_wc + (index * 8));
9903
9904         if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9905                 /*
9906                  * Eager entries are written and flushed
9907                  *
9908                  * Expected entries are flushed every 4 writes
9909                  */
9910                 flush_wc();
9911 done:
9912         return;
9913 }
9914
9915 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9916 {
9917         struct hfi1_devdata *dd = rcd->dd;
9918         u32 i;
9919
9920         /* this could be optimized */
9921         for (i = rcd->eager_base; i < rcd->eager_base +
9922                      rcd->egrbufs.alloced; i++)
9923                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9924
9925         for (i = rcd->expected_base;
9926                         i < rcd->expected_base + rcd->expected_count; i++)
9927                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9928 }
9929
9930 static const char * const ib_cfg_name_strings[] = {
9931         "HFI1_IB_CFG_LIDLMC",
9932         "HFI1_IB_CFG_LWID_DG_ENB",
9933         "HFI1_IB_CFG_LWID_ENB",
9934         "HFI1_IB_CFG_LWID",
9935         "HFI1_IB_CFG_SPD_ENB",
9936         "HFI1_IB_CFG_SPD",
9937         "HFI1_IB_CFG_RXPOL_ENB",
9938         "HFI1_IB_CFG_LREV_ENB",
9939         "HFI1_IB_CFG_LINKLATENCY",
9940         "HFI1_IB_CFG_HRTBT",
9941         "HFI1_IB_CFG_OP_VLS",
9942         "HFI1_IB_CFG_VL_HIGH_CAP",
9943         "HFI1_IB_CFG_VL_LOW_CAP",
9944         "HFI1_IB_CFG_OVERRUN_THRESH",
9945         "HFI1_IB_CFG_PHYERR_THRESH",
9946         "HFI1_IB_CFG_LINKDEFAULT",
9947         "HFI1_IB_CFG_PKEYS",
9948         "HFI1_IB_CFG_MTU",
9949         "HFI1_IB_CFG_LSTATE",
9950         "HFI1_IB_CFG_VL_HIGH_LIMIT",
9951         "HFI1_IB_CFG_PMA_TICKS",
9952         "HFI1_IB_CFG_PORT"
9953 };
9954
9955 static const char *ib_cfg_name(int which)
9956 {
9957         if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9958                 return "invalid";
9959         return ib_cfg_name_strings[which];
9960 }
9961
9962 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9963 {
9964         struct hfi1_devdata *dd = ppd->dd;
9965         int val = 0;
9966
9967         switch (which) {
9968         case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9969                 val = ppd->link_width_enabled;
9970                 break;
9971         case HFI1_IB_CFG_LWID: /* currently active Link-width */
9972                 val = ppd->link_width_active;
9973                 break;
9974         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9975                 val = ppd->link_speed_enabled;
9976                 break;
9977         case HFI1_IB_CFG_SPD: /* current Link speed */
9978                 val = ppd->link_speed_active;
9979                 break;
9980
9981         case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9982         case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9983         case HFI1_IB_CFG_LINKLATENCY:
9984                 goto unimplemented;
9985
9986         case HFI1_IB_CFG_OP_VLS:
9987                 val = ppd->actual_vls_operational;
9988                 break;
9989         case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9990                 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9991                 break;
9992         case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9993                 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9994                 break;
9995         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9996                 val = ppd->overrun_threshold;
9997                 break;
9998         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9999                 val = ppd->phy_error_threshold;
10000                 break;
10001         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10002                 val = HLS_DEFAULT;
10003                 break;
10004
10005         case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10006         case HFI1_IB_CFG_PMA_TICKS:
10007         default:
10008 unimplemented:
10009                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10010                         dd_dev_info(
10011                                 dd,
10012                                 "%s: which %s: not implemented\n",
10013                                 __func__,
10014                                 ib_cfg_name(which));
10015                 break;
10016         }
10017
10018         return val;
10019 }
10020
10021 /*
10022  * The largest MAD packet size.
10023  */
10024 #define MAX_MAD_PACKET 2048
10025
10026 /*
10027  * Return the maximum header bytes that can go on the _wire_
10028  * for this device. This count includes the ICRC which is
10029  * not part of the packet held in memory but it is appended
10030  * by the HW.
10031  * This is dependent on the device's receive header entry size.
10032  * HFI allows this to be set per-receive context, but the
10033  * driver presently enforces a global value.
10034  */
10035 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10036 {
10037         /*
10038          * The maximum non-payload (MTU) bytes in LRH.PktLen are
10039          * the Receive Header Entry Size minus the PBC (or RHF) size
10040          * plus one DW for the ICRC appended by HW.
10041          *
10042          * dd->rcd[0].rcvhdrqentsize is in DW.
10043          * We use rcd[0] as all context will have the same value. Also,
10044          * the first kernel context would have been allocated by now so
10045          * we are guaranteed a valid value.
10046          */
10047         return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10048 }
10049
10050 /*
10051  * Set Send Length
10052  * @ppd - per port data
10053  *
10054  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10055  * registers compare against LRH.PktLen, so use the max bytes included
10056  * in the LRH.
10057  *
10058  * This routine changes all VL values except VL15, which it maintains at
10059  * the same value.
10060  */
10061 static void set_send_length(struct hfi1_pportdata *ppd)
10062 {
10063         struct hfi1_devdata *dd = ppd->dd;
10064         u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10065         u32 maxvlmtu = dd->vld[15].mtu;
10066         u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10067                               & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10068                 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10069         int i, j;
10070         u32 thres;
10071
10072         for (i = 0; i < ppd->vls_supported; i++) {
10073                 if (dd->vld[i].mtu > maxvlmtu)
10074                         maxvlmtu = dd->vld[i].mtu;
10075                 if (i <= 3)
10076                         len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10077                                  & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10078                                 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10079                 else
10080                         len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10081                                  & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10082                                 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10083         }
10084         write_csr(dd, SEND_LEN_CHECK0, len1);
10085         write_csr(dd, SEND_LEN_CHECK1, len2);
10086         /* adjust kernel credit return thresholds based on new MTUs */
10087         /* all kernel receive contexts have the same hdrqentsize */
10088         for (i = 0; i < ppd->vls_supported; i++) {
10089                 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10090                             sc_mtu_to_threshold(dd->vld[i].sc,
10091                                                 dd->vld[i].mtu,
10092                                                 dd->rcd[0]->rcvhdrqentsize));
10093                 for (j = 0; j < INIT_SC_PER_VL; j++)
10094                         sc_set_cr_threshold(
10095                                         pio_select_send_context_vl(dd, j, i),
10096                                             thres);
10097         }
10098         thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10099                     sc_mtu_to_threshold(dd->vld[15].sc,
10100                                         dd->vld[15].mtu,
10101                                         dd->rcd[0]->rcvhdrqentsize));
10102         sc_set_cr_threshold(dd->vld[15].sc, thres);
10103
10104         /* Adjust maximum MTU for the port in DC */
10105         dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10106                 (ilog2(maxvlmtu >> 8) + 1);
10107         len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10108         len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10109         len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10110                 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10111         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10112 }
10113
10114 static void set_lidlmc(struct hfi1_pportdata *ppd)
10115 {
10116         int i;
10117         u64 sreg = 0;
10118         struct hfi1_devdata *dd = ppd->dd;
10119         u32 mask = ~((1U << ppd->lmc) - 1);
10120         u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10121         u32 lid;
10122
10123         /*
10124          * Program 0 in CSR if port lid is extended. This prevents
10125          * 9B packets being sent out for large lids.
10126          */
10127         lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10128         c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10129                 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10130         c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10131                         << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10132               ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10133                         << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10134         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10135
10136         /*
10137          * Iterate over all the send contexts and set their SLID check
10138          */
10139         sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10140                         SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10141                (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10142                         SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10143
10144         for (i = 0; i < chip_send_contexts(dd); i++) {
10145                 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10146                           i, (u32)sreg);
10147                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10148         }
10149
10150         /* Now we have to do the same thing for the sdma engines */
10151         sdma_update_lmc(dd, mask, lid);
10152 }
10153
10154 static const char *state_completed_string(u32 completed)
10155 {
10156         static const char * const state_completed[] = {
10157                 "EstablishComm",
10158                 "OptimizeEQ",
10159                 "VerifyCap"
10160         };
10161
10162         if (completed < ARRAY_SIZE(state_completed))
10163                 return state_completed[completed];
10164
10165         return "unknown";
10166 }
10167
10168 static const char all_lanes_dead_timeout_expired[] =
10169         "All lanes were inactive â€“ was the interconnect media removed?";
10170 static const char tx_out_of_policy[] =
10171         "Passing lanes on local port do not meet the local link width policy";
10172 static const char no_state_complete[] =
10173         "State timeout occurred before link partner completed the state";
10174 static const char * const state_complete_reasons[] = {
10175         [0x00] = "Reason unknown",
10176         [0x01] = "Link was halted by driver, refer to LinkDownReason",
10177         [0x02] = "Link partner reported failure",
10178         [0x10] = "Unable to achieve frame sync on any lane",
10179         [0x11] =
10180           "Unable to find a common bit rate with the link partner",
10181         [0x12] =
10182           "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10183         [0x13] =
10184           "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10185         [0x14] = no_state_complete,
10186         [0x15] =
10187           "State timeout occurred before link partner identified equalization presets",
10188         [0x16] =
10189           "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10190         [0x17] = tx_out_of_policy,
10191         [0x20] = all_lanes_dead_timeout_expired,
10192         [0x21] =
10193           "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10194         [0x22] = no_state_complete,
10195         [0x23] =
10196           "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10197         [0x24] = tx_out_of_policy,
10198         [0x30] = all_lanes_dead_timeout_expired,
10199         [0x31] =
10200           "State timeout occurred waiting for host to process received frames",
10201         [0x32] = no_state_complete,
10202         [0x33] =
10203           "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10204         [0x34] = tx_out_of_policy,
10205         [0x35] = "Negotiated link width is mutually exclusive",
10206         [0x36] =
10207           "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10208         [0x37] = "Unable to resolve secure data exchange",
10209 };
10210
10211 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10212                                                      u32 code)
10213 {
10214         const char *str = NULL;
10215
10216         if (code < ARRAY_SIZE(state_complete_reasons))
10217                 str = state_complete_reasons[code];
10218
10219         if (str)
10220                 return str;
10221         return "Reserved";
10222 }
10223
10224 /* describe the given last state complete frame */
10225 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10226                                   const char *prefix)
10227 {
10228         struct hfi1_devdata *dd = ppd->dd;
10229         u32 success;
10230         u32 state;
10231         u32 reason;
10232         u32 lanes;
10233
10234         /*
10235          * Decode frame:
10236          *  [ 0: 0] - success
10237          *  [ 3: 1] - state
10238          *  [ 7: 4] - next state timeout
10239          *  [15: 8] - reason code
10240          *  [31:16] - lanes
10241          */
10242         success = frame & 0x1;
10243         state = (frame >> 1) & 0x7;
10244         reason = (frame >> 8) & 0xff;
10245         lanes = (frame >> 16) & 0xffff;
10246
10247         dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10248                    prefix, frame);
10249         dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10250                    state_completed_string(state), state);
10251         dd_dev_err(dd, "    state successfully completed: %s\n",
10252                    success ? "yes" : "no");
10253         dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10254                    reason, state_complete_reason_code_string(ppd, reason));
10255         dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10256 }
10257
10258 /*
10259  * Read the last state complete frames and explain them.  This routine
10260  * expects to be called if the link went down during link negotiation
10261  * and initialization (LNI).  That is, anywhere between polling and link up.
10262  */
10263 static void check_lni_states(struct hfi1_pportdata *ppd)
10264 {
10265         u32 last_local_state;
10266         u32 last_remote_state;
10267
10268         read_last_local_state(ppd->dd, &last_local_state);
10269         read_last_remote_state(ppd->dd, &last_remote_state);
10270
10271         /*
10272          * Don't report anything if there is nothing to report.  A value of
10273          * 0 means the link was taken down while polling and there was no
10274          * training in-process.
10275          */
10276         if (last_local_state == 0 && last_remote_state == 0)
10277                 return;
10278
10279         decode_state_complete(ppd, last_local_state, "transmitted");
10280         decode_state_complete(ppd, last_remote_state, "received");
10281 }
10282
10283 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10284 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10285 {
10286         u64 reg;
10287         unsigned long timeout;
10288
10289         /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10290         timeout = jiffies + msecs_to_jiffies(wait_ms);
10291         while (1) {
10292                 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10293                 if (reg)
10294                         break;
10295                 if (time_after(jiffies, timeout)) {
10296                         dd_dev_err(dd,
10297                                    "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10298                         return -ETIMEDOUT;
10299                 }
10300                 udelay(2);
10301         }
10302         return 0;
10303 }
10304
10305 /* called when the logical link state is not down as it should be */
10306 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10307 {
10308         struct hfi1_devdata *dd = ppd->dd;
10309
10310         /*
10311          * Bring link up in LCB loopback
10312          */
10313         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10314         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10315                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10316
10317         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10318         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10319         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10320         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10321
10322         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10323         (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10324         udelay(3);
10325         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10326         write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10327
10328         wait_link_transfer_active(dd, 100);
10329
10330         /*
10331          * Bring the link down again.
10332          */
10333         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10334         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10335         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10336
10337         dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10338 }
10339
10340 /*
10341  * Helper for set_link_state().  Do not call except from that routine.
10342  * Expects ppd->hls_mutex to be held.
10343  *
10344  * @rem_reason value to be sent to the neighbor
10345  *
10346  * LinkDownReasons only set if transition succeeds.
10347  */
10348 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10349 {
10350         struct hfi1_devdata *dd = ppd->dd;
10351         u32 previous_state;
10352         int offline_state_ret;
10353         int ret;
10354
10355         update_lcb_cache(dd);
10356
10357         previous_state = ppd->host_link_state;
10358         ppd->host_link_state = HLS_GOING_OFFLINE;
10359
10360         /* start offline transition */
10361         ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10362
10363         if (ret != HCMD_SUCCESS) {
10364                 dd_dev_err(dd,
10365                            "Failed to transition to Offline link state, return %d\n",
10366                            ret);
10367                 return -EINVAL;
10368         }
10369         if (ppd->offline_disabled_reason ==
10370                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10371                 ppd->offline_disabled_reason =
10372                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10373
10374         offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10375         if (offline_state_ret < 0)
10376                 return offline_state_ret;
10377
10378         /* Disabling AOC transmitters */
10379         if (ppd->port_type == PORT_TYPE_QSFP &&
10380             ppd->qsfp_info.limiting_active &&
10381             qsfp_mod_present(ppd)) {
10382                 int ret;
10383
10384                 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10385                 if (ret == 0) {
10386                         set_qsfp_tx(ppd, 0);
10387                         release_chip_resource(dd, qsfp_resource(dd));
10388                 } else {
10389                         /* not fatal, but should warn */
10390                         dd_dev_err(dd,
10391                                    "Unable to acquire lock to turn off QSFP TX\n");
10392                 }
10393         }
10394
10395         /*
10396          * Wait for the offline.Quiet transition if it hasn't happened yet. It
10397          * can take a while for the link to go down.
10398          */
10399         if (offline_state_ret != PLS_OFFLINE_QUIET) {
10400                 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10401                 if (ret < 0)
10402                         return ret;
10403         }
10404
10405         /*
10406          * Now in charge of LCB - must be after the physical state is
10407          * offline.quiet and before host_link_state is changed.
10408          */
10409         set_host_lcb_access(dd);
10410         write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10411
10412         /* make sure the logical state is also down */
10413         ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10414         if (ret)
10415                 force_logical_link_state_down(ppd);
10416
10417         ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10418         update_statusp(ppd, IB_PORT_DOWN);
10419
10420         /*
10421          * The LNI has a mandatory wait time after the physical state
10422          * moves to Offline.Quiet.  The wait time may be different
10423          * depending on how the link went down.  The 8051 firmware
10424          * will observe the needed wait time and only move to ready
10425          * when that is completed.  The largest of the quiet timeouts
10426          * is 6s, so wait that long and then at least 0.5s more for
10427          * other transitions, and another 0.5s for a buffer.
10428          */
10429         ret = wait_fm_ready(dd, 7000);
10430         if (ret) {
10431                 dd_dev_err(dd,
10432                            "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10433                 /* state is really offline, so make it so */
10434                 ppd->host_link_state = HLS_DN_OFFLINE;
10435                 return ret;
10436         }
10437
10438         /*
10439          * The state is now offline and the 8051 is ready to accept host
10440          * requests.
10441          *      - change our state
10442          *      - notify others if we were previously in a linkup state
10443          */
10444         ppd->host_link_state = HLS_DN_OFFLINE;
10445         if (previous_state & HLS_UP) {
10446                 /* went down while link was up */
10447                 handle_linkup_change(dd, 0);
10448         } else if (previous_state
10449                         & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10450                 /* went down while attempting link up */
10451                 check_lni_states(ppd);
10452
10453                 /* The QSFP doesn't need to be reset on LNI failure */
10454                 ppd->qsfp_info.reset_needed = 0;
10455         }
10456
10457         /* the active link width (downgrade) is 0 on link down */
10458         ppd->link_width_active = 0;
10459         ppd->link_width_downgrade_tx_active = 0;
10460         ppd->link_width_downgrade_rx_active = 0;
10461         ppd->current_egress_rate = 0;
10462         return 0;
10463 }
10464
10465 /* return the link state name */
10466 static const char *link_state_name(u32 state)
10467 {
10468         const char *name;
10469         int n = ilog2(state);
10470         static const char * const names[] = {
10471                 [__HLS_UP_INIT_BP]       = "INIT",
10472                 [__HLS_UP_ARMED_BP]      = "ARMED",
10473                 [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
10474                 [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
10475                 [__HLS_DN_POLL_BP]       = "POLL",
10476                 [__HLS_DN_DISABLE_BP]    = "DISABLE",
10477                 [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
10478                 [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
10479                 [__HLS_GOING_UP_BP]      = "GOING_UP",
10480                 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10481                 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10482         };
10483
10484         name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10485         return name ? name : "unknown";
10486 }
10487
10488 /* return the link state reason name */
10489 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10490 {
10491         if (state == HLS_UP_INIT) {
10492                 switch (ppd->linkinit_reason) {
10493                 case OPA_LINKINIT_REASON_LINKUP:
10494                         return "(LINKUP)";
10495                 case OPA_LINKINIT_REASON_FLAPPING:
10496                         return "(FLAPPING)";
10497                 case OPA_LINKINIT_OUTSIDE_POLICY:
10498                         return "(OUTSIDE_POLICY)";
10499                 case OPA_LINKINIT_QUARANTINED:
10500                         return "(QUARANTINED)";
10501                 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10502                         return "(INSUFIC_CAPABILITY)";
10503                 default:
10504                         break;
10505                 }
10506         }
10507         return "";
10508 }
10509
10510 /*
10511  * driver_pstate - convert the driver's notion of a port's
10512  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10513  * Return -1 (converted to a u32) to indicate error.
10514  */
10515 u32 driver_pstate(struct hfi1_pportdata *ppd)
10516 {
10517         switch (ppd->host_link_state) {
10518         case HLS_UP_INIT:
10519         case HLS_UP_ARMED:
10520         case HLS_UP_ACTIVE:
10521                 return IB_PORTPHYSSTATE_LINKUP;
10522         case HLS_DN_POLL:
10523                 return IB_PORTPHYSSTATE_POLLING;
10524         case HLS_DN_DISABLE:
10525                 return IB_PORTPHYSSTATE_DISABLED;
10526         case HLS_DN_OFFLINE:
10527                 return OPA_PORTPHYSSTATE_OFFLINE;
10528         case HLS_VERIFY_CAP:
10529                 return IB_PORTPHYSSTATE_TRAINING;
10530         case HLS_GOING_UP:
10531                 return IB_PORTPHYSSTATE_TRAINING;
10532         case HLS_GOING_OFFLINE:
10533                 return OPA_PORTPHYSSTATE_OFFLINE;
10534         case HLS_LINK_COOLDOWN:
10535                 return OPA_PORTPHYSSTATE_OFFLINE;
10536         case HLS_DN_DOWNDEF:
10537         default:
10538                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10539                            ppd->host_link_state);
10540                 return  -1;
10541         }
10542 }
10543
10544 /*
10545  * driver_lstate - convert the driver's notion of a port's
10546  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10547  * (converted to a u32) to indicate error.
10548  */
10549 u32 driver_lstate(struct hfi1_pportdata *ppd)
10550 {
10551         if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10552                 return IB_PORT_DOWN;
10553
10554         switch (ppd->host_link_state & HLS_UP) {
10555         case HLS_UP_INIT:
10556                 return IB_PORT_INIT;
10557         case HLS_UP_ARMED:
10558                 return IB_PORT_ARMED;
10559         case HLS_UP_ACTIVE:
10560                 return IB_PORT_ACTIVE;
10561         default:
10562                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10563                            ppd->host_link_state);
10564         return -1;
10565         }
10566 }
10567
10568 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10569                           u8 neigh_reason, u8 rem_reason)
10570 {
10571         if (ppd->local_link_down_reason.latest == 0 &&
10572             ppd->neigh_link_down_reason.latest == 0) {
10573                 ppd->local_link_down_reason.latest = lcl_reason;
10574                 ppd->neigh_link_down_reason.latest = neigh_reason;
10575                 ppd->remote_link_down_reason = rem_reason;
10576         }
10577 }
10578
10579 /**
10580  * data_vls_operational() - Verify if data VL BCT credits and MTU
10581  *                          are both set.
10582  * @ppd: pointer to hfi1_pportdata structure
10583  *
10584  * Return: true - Ok, false -otherwise.
10585  */
10586 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10587 {
10588         int i;
10589         u64 reg;
10590
10591         if (!ppd->actual_vls_operational)
10592                 return false;
10593
10594         for (i = 0; i < ppd->vls_supported; i++) {
10595                 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10596                 if ((reg && !ppd->dd->vld[i].mtu) ||
10597                     (!reg && ppd->dd->vld[i].mtu))
10598                         return false;
10599         }
10600
10601         return true;
10602 }
10603
10604 /*
10605  * Change the physical and/or logical link state.
10606  *
10607  * Do not call this routine while inside an interrupt.  It contains
10608  * calls to routines that can take multiple seconds to finish.
10609  *
10610  * Returns 0 on success, -errno on failure.
10611  */
10612 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10613 {
10614         struct hfi1_devdata *dd = ppd->dd;
10615         struct ib_event event = {.device = NULL};
10616         int ret1, ret = 0;
10617         int orig_new_state, poll_bounce;
10618
10619         mutex_lock(&ppd->hls_lock);
10620
10621         orig_new_state = state;
10622         if (state == HLS_DN_DOWNDEF)
10623                 state = HLS_DEFAULT;
10624
10625         /* interpret poll -> poll as a link bounce */
10626         poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10627                       state == HLS_DN_POLL;
10628
10629         dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10630                     link_state_name(ppd->host_link_state),
10631                     link_state_name(orig_new_state),
10632                     poll_bounce ? "(bounce) " : "",
10633                     link_state_reason_name(ppd, state));
10634
10635         /*
10636          * If we're going to a (HLS_*) link state that implies the logical
10637          * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10638          * reset is_sm_config_started to 0.
10639          */
10640         if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10641                 ppd->is_sm_config_started = 0;
10642
10643         /*
10644          * Do nothing if the states match.  Let a poll to poll link bounce
10645          * go through.
10646          */
10647         if (ppd->host_link_state == state && !poll_bounce)
10648                 goto done;
10649
10650         switch (state) {
10651         case HLS_UP_INIT:
10652                 if (ppd->host_link_state == HLS_DN_POLL &&
10653                     (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10654                         /*
10655                          * Quick link up jumps from polling to here.
10656                          *
10657                          * Whether in normal or loopback mode, the
10658                          * simulator jumps from polling to link up.
10659                          * Accept that here.
10660                          */
10661                         /* OK */
10662                 } else if (ppd->host_link_state != HLS_GOING_UP) {
10663                         goto unexpected;
10664                 }
10665
10666                 /*
10667                  * Wait for Link_Up physical state.
10668                  * Physical and Logical states should already be
10669                  * be transitioned to LinkUp and LinkInit respectively.
10670                  */
10671                 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10672                 if (ret) {
10673                         dd_dev_err(dd,
10674                                    "%s: physical state did not change to LINK-UP\n",
10675                                    __func__);
10676                         break;
10677                 }
10678
10679                 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10680                 if (ret) {
10681                         dd_dev_err(dd,
10682                                    "%s: logical state did not change to INIT\n",
10683                                    __func__);
10684                         break;
10685                 }
10686
10687                 /* clear old transient LINKINIT_REASON code */
10688                 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10689                         ppd->linkinit_reason =
10690                                 OPA_LINKINIT_REASON_LINKUP;
10691
10692                 /* enable the port */
10693                 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10694
10695                 handle_linkup_change(dd, 1);
10696                 pio_kernel_linkup(dd);
10697
10698                 /*
10699                  * After link up, a new link width will have been set.
10700                  * Update the xmit counters with regards to the new
10701                  * link width.
10702                  */
10703                 update_xmit_counters(ppd, ppd->link_width_active);
10704
10705                 ppd->host_link_state = HLS_UP_INIT;
10706                 update_statusp(ppd, IB_PORT_INIT);
10707                 break;
10708         case HLS_UP_ARMED:
10709                 if (ppd->host_link_state != HLS_UP_INIT)
10710                         goto unexpected;
10711
10712                 if (!data_vls_operational(ppd)) {
10713                         dd_dev_err(dd,
10714                                    "%s: Invalid data VL credits or mtu\n",
10715                                    __func__);
10716                         ret = -EINVAL;
10717                         break;
10718                 }
10719
10720                 set_logical_state(dd, LSTATE_ARMED);
10721                 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10722                 if (ret) {
10723                         dd_dev_err(dd,
10724                                    "%s: logical state did not change to ARMED\n",
10725                                    __func__);
10726                         break;
10727                 }
10728                 ppd->host_link_state = HLS_UP_ARMED;
10729                 update_statusp(ppd, IB_PORT_ARMED);
10730                 /*
10731                  * The simulator does not currently implement SMA messages,
10732                  * so neighbor_normal is not set.  Set it here when we first
10733                  * move to Armed.
10734                  */
10735                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10736                         ppd->neighbor_normal = 1;
10737                 break;
10738         case HLS_UP_ACTIVE:
10739                 if (ppd->host_link_state != HLS_UP_ARMED)
10740                         goto unexpected;
10741
10742                 set_logical_state(dd, LSTATE_ACTIVE);
10743                 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10744                 if (ret) {
10745                         dd_dev_err(dd,
10746                                    "%s: logical state did not change to ACTIVE\n",
10747                                    __func__);
10748                 } else {
10749                         /* tell all engines to go running */
10750                         sdma_all_running(dd);
10751                         ppd->host_link_state = HLS_UP_ACTIVE;
10752                         update_statusp(ppd, IB_PORT_ACTIVE);
10753
10754                         /* Signal the IB layer that the port has went active */
10755                         event.device = &dd->verbs_dev.rdi.ibdev;
10756                         event.element.port_num = ppd->port;
10757                         event.event = IB_EVENT_PORT_ACTIVE;
10758                 }
10759                 break;
10760         case HLS_DN_POLL:
10761                 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10762                      ppd->host_link_state == HLS_DN_OFFLINE) &&
10763                     dd->dc_shutdown)
10764                         dc_start(dd);
10765                 /* Hand LED control to the DC */
10766                 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10767
10768                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10769                         u8 tmp = ppd->link_enabled;
10770
10771                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10772                         if (ret) {
10773                                 ppd->link_enabled = tmp;
10774                                 break;
10775                         }
10776                         ppd->remote_link_down_reason = 0;
10777
10778                         if (ppd->driver_link_ready)
10779                                 ppd->link_enabled = 1;
10780                 }
10781
10782                 set_all_slowpath(ppd->dd);
10783                 ret = set_local_link_attributes(ppd);
10784                 if (ret)
10785                         break;
10786
10787                 ppd->port_error_action = 0;
10788
10789                 if (quick_linkup) {
10790                         /* quick linkup does not go into polling */
10791                         ret = do_quick_linkup(dd);
10792                 } else {
10793                         ret1 = set_physical_link_state(dd, PLS_POLLING);
10794                         if (!ret1)
10795                                 ret1 = wait_phys_link_out_of_offline(ppd,
10796                                                                      3000);
10797                         if (ret1 != HCMD_SUCCESS) {
10798                                 dd_dev_err(dd,
10799                                            "Failed to transition to Polling link state, return 0x%x\n",
10800                                            ret1);
10801                                 ret = -EINVAL;
10802                         }
10803                 }
10804
10805                 /*
10806                  * Change the host link state after requesting DC8051 to
10807                  * change its physical state so that we can ignore any
10808                  * interrupt with stale LNI(XX) error, which will not be
10809                  * cleared until DC8051 transitions to Polling state.
10810                  */
10811                 ppd->host_link_state = HLS_DN_POLL;
10812                 ppd->offline_disabled_reason =
10813                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10814                 /*
10815                  * If an error occurred above, go back to offline.  The
10816                  * caller may reschedule another attempt.
10817                  */
10818                 if (ret)
10819                         goto_offline(ppd, 0);
10820                 else
10821                         log_physical_state(ppd, PLS_POLLING);
10822                 break;
10823         case HLS_DN_DISABLE:
10824                 /* link is disabled */
10825                 ppd->link_enabled = 0;
10826
10827                 /* allow any state to transition to disabled */
10828
10829                 /* must transition to offline first */
10830                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10831                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10832                         if (ret)
10833                                 break;
10834                         ppd->remote_link_down_reason = 0;
10835                 }
10836
10837                 if (!dd->dc_shutdown) {
10838                         ret1 = set_physical_link_state(dd, PLS_DISABLED);
10839                         if (ret1 != HCMD_SUCCESS) {
10840                                 dd_dev_err(dd,
10841                                            "Failed to transition to Disabled link state, return 0x%x\n",
10842                                            ret1);
10843                                 ret = -EINVAL;
10844                                 break;
10845                         }
10846                         ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10847                         if (ret) {
10848                                 dd_dev_err(dd,
10849                                            "%s: physical state did not change to DISABLED\n",
10850                                            __func__);
10851                                 break;
10852                         }
10853                         dc_shutdown(dd);
10854                 }
10855                 ppd->host_link_state = HLS_DN_DISABLE;
10856                 break;
10857         case HLS_DN_OFFLINE:
10858                 if (ppd->host_link_state == HLS_DN_DISABLE)
10859                         dc_start(dd);
10860
10861                 /* allow any state to transition to offline */
10862                 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10863                 if (!ret)
10864                         ppd->remote_link_down_reason = 0;
10865                 break;
10866         case HLS_VERIFY_CAP:
10867                 if (ppd->host_link_state != HLS_DN_POLL)
10868                         goto unexpected;
10869                 ppd->host_link_state = HLS_VERIFY_CAP;
10870                 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10871                 break;
10872         case HLS_GOING_UP:
10873                 if (ppd->host_link_state != HLS_VERIFY_CAP)
10874                         goto unexpected;
10875
10876                 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10877                 if (ret1 != HCMD_SUCCESS) {
10878                         dd_dev_err(dd,
10879                                    "Failed to transition to link up state, return 0x%x\n",
10880                                    ret1);
10881                         ret = -EINVAL;
10882                         break;
10883                 }
10884                 ppd->host_link_state = HLS_GOING_UP;
10885                 break;
10886
10887         case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10888         case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10889         default:
10890                 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10891                             __func__, state);
10892                 ret = -EINVAL;
10893                 break;
10894         }
10895
10896         goto done;
10897
10898 unexpected:
10899         dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10900                    __func__, link_state_name(ppd->host_link_state),
10901                    link_state_name(state));
10902         ret = -EINVAL;
10903
10904 done:
10905         mutex_unlock(&ppd->hls_lock);
10906
10907         if (event.device)
10908                 ib_dispatch_event(&event);
10909
10910         return ret;
10911 }
10912
10913 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10914 {
10915         u64 reg;
10916         int ret = 0;
10917
10918         switch (which) {
10919         case HFI1_IB_CFG_LIDLMC:
10920                 set_lidlmc(ppd);
10921                 break;
10922         case HFI1_IB_CFG_VL_HIGH_LIMIT:
10923                 /*
10924                  * The VL Arbitrator high limit is sent in units of 4k
10925                  * bytes, while HFI stores it in units of 64 bytes.
10926                  */
10927                 val *= 4096 / 64;
10928                 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10929                         << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10930                 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10931                 break;
10932         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10933                 /* HFI only supports POLL as the default link down state */
10934                 if (val != HLS_DN_POLL)
10935                         ret = -EINVAL;
10936                 break;
10937         case HFI1_IB_CFG_OP_VLS:
10938                 if (ppd->vls_operational != val) {
10939                         ppd->vls_operational = val;
10940                         if (!ppd->port)
10941                                 ret = -EINVAL;
10942                 }
10943                 break;
10944         /*
10945          * For link width, link width downgrade, and speed enable, always AND
10946          * the setting with what is actually supported.  This has two benefits.
10947          * First, enabled can't have unsupported values, no matter what the
10948          * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10949          * "fill in with your supported value" have all the bits in the
10950          * field set, so simply ANDing with supported has the desired result.
10951          */
10952         case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10953                 ppd->link_width_enabled = val & ppd->link_width_supported;
10954                 break;
10955         case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10956                 ppd->link_width_downgrade_enabled =
10957                                 val & ppd->link_width_downgrade_supported;
10958                 break;
10959         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10960                 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10961                 break;
10962         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10963                 /*
10964                  * HFI does not follow IB specs, save this value
10965                  * so we can report it, if asked.
10966                  */
10967                 ppd->overrun_threshold = val;
10968                 break;
10969         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10970                 /*
10971                  * HFI does not follow IB specs, save this value
10972                  * so we can report it, if asked.
10973                  */
10974                 ppd->phy_error_threshold = val;
10975                 break;
10976
10977         case HFI1_IB_CFG_MTU:
10978                 set_send_length(ppd);
10979                 break;
10980
10981         case HFI1_IB_CFG_PKEYS:
10982                 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10983                         set_partition_keys(ppd);
10984                 break;
10985
10986         default:
10987                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10988                         dd_dev_info(ppd->dd,
10989                                     "%s: which %s, val 0x%x: not implemented\n",
10990                                     __func__, ib_cfg_name(which), val);
10991                 break;
10992         }
10993         return ret;
10994 }
10995
10996 /* begin functions related to vl arbitration table caching */
10997 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10998 {
10999         int i;
11000
11001         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11002                         VL_ARB_LOW_PRIO_TABLE_SIZE);
11003         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11004                         VL_ARB_HIGH_PRIO_TABLE_SIZE);
11005
11006         /*
11007          * Note that we always return values directly from the
11008          * 'vl_arb_cache' (and do no CSR reads) in response to a
11009          * 'Get(VLArbTable)'. This is obviously correct after a
11010          * 'Set(VLArbTable)', since the cache will then be up to
11011          * date. But it's also correct prior to any 'Set(VLArbTable)'
11012          * since then both the cache, and the relevant h/w registers
11013          * will be zeroed.
11014          */
11015
11016         for (i = 0; i < MAX_PRIO_TABLE; i++)
11017                 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11018 }
11019
11020 /*
11021  * vl_arb_lock_cache
11022  *
11023  * All other vl_arb_* functions should be called only after locking
11024  * the cache.
11025  */
11026 static inline struct vl_arb_cache *
11027 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11028 {
11029         if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11030                 return NULL;
11031         spin_lock(&ppd->vl_arb_cache[idx].lock);
11032         return &ppd->vl_arb_cache[idx];
11033 }
11034
11035 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11036 {
11037         spin_unlock(&ppd->vl_arb_cache[idx].lock);
11038 }
11039
11040 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11041                              struct ib_vl_weight_elem *vl)
11042 {
11043         memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11044 }
11045
11046 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11047                              struct ib_vl_weight_elem *vl)
11048 {
11049         memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11050 }
11051
11052 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11053                               struct ib_vl_weight_elem *vl)
11054 {
11055         return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11056 }
11057
11058 /* end functions related to vl arbitration table caching */
11059
11060 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11061                           u32 size, struct ib_vl_weight_elem *vl)
11062 {
11063         struct hfi1_devdata *dd = ppd->dd;
11064         u64 reg;
11065         unsigned int i, is_up = 0;
11066         int drain, ret = 0;
11067
11068         mutex_lock(&ppd->hls_lock);
11069
11070         if (ppd->host_link_state & HLS_UP)
11071                 is_up = 1;
11072
11073         drain = !is_ax(dd) && is_up;
11074
11075         if (drain)
11076                 /*
11077                  * Before adjusting VL arbitration weights, empty per-VL
11078                  * FIFOs, otherwise a packet whose VL weight is being
11079                  * set to 0 could get stuck in a FIFO with no chance to
11080                  * egress.
11081                  */
11082                 ret = stop_drain_data_vls(dd);
11083
11084         if (ret) {
11085                 dd_dev_err(
11086                         dd,
11087                         "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11088                         __func__);
11089                 goto err;
11090         }
11091
11092         for (i = 0; i < size; i++, vl++) {
11093                 /*
11094                  * NOTE: The low priority shift and mask are used here, but
11095                  * they are the same for both the low and high registers.
11096                  */
11097                 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11098                                 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11099                       | (((u64)vl->weight
11100                                 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11101                                 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11102                 write_csr(dd, target + (i * 8), reg);
11103         }
11104         pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11105
11106         if (drain)
11107                 open_fill_data_vls(dd); /* reopen all VLs */
11108
11109 err:
11110         mutex_unlock(&ppd->hls_lock);
11111
11112         return ret;
11113 }
11114
11115 /*
11116  * Read one credit merge VL register.
11117  */
11118 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11119                            struct vl_limit *vll)
11120 {
11121         u64 reg = read_csr(dd, csr);
11122
11123         vll->dedicated = cpu_to_be16(
11124                 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11125                 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11126         vll->shared = cpu_to_be16(
11127                 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11128                 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11129 }
11130
11131 /*
11132  * Read the current credit merge limits.
11133  */
11134 static int get_buffer_control(struct hfi1_devdata *dd,
11135                               struct buffer_control *bc, u16 *overall_limit)
11136 {
11137         u64 reg;
11138         int i;
11139
11140         /* not all entries are filled in */
11141         memset(bc, 0, sizeof(*bc));
11142
11143         /* OPA and HFI have a 1-1 mapping */
11144         for (i = 0; i < TXE_NUM_DATA_VL; i++)
11145                 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11146
11147         /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11148         read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11149
11150         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11151         bc->overall_shared_limit = cpu_to_be16(
11152                 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11153                 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11154         if (overall_limit)
11155                 *overall_limit = (reg
11156                         >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11157                         & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11158         return sizeof(struct buffer_control);
11159 }
11160
11161 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11162 {
11163         u64 reg;
11164         int i;
11165
11166         /* each register contains 16 SC->VLnt mappings, 4 bits each */
11167         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11168         for (i = 0; i < sizeof(u64); i++) {
11169                 u8 byte = *(((u8 *)&reg) + i);
11170
11171                 dp->vlnt[2 * i] = byte & 0xf;
11172                 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11173         }
11174
11175         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11176         for (i = 0; i < sizeof(u64); i++) {
11177                 u8 byte = *(((u8 *)&reg) + i);
11178
11179                 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11180                 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11181         }
11182         return sizeof(struct sc2vlnt);
11183 }
11184
11185 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11186                               struct ib_vl_weight_elem *vl)
11187 {
11188         unsigned int i;
11189
11190         for (i = 0; i < nelems; i++, vl++) {
11191                 vl->vl = 0xf;
11192                 vl->weight = 0;
11193         }
11194 }
11195
11196 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11197 {
11198         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11199                   DC_SC_VL_VAL(15_0,
11200                                0, dp->vlnt[0] & 0xf,
11201                                1, dp->vlnt[1] & 0xf,
11202                                2, dp->vlnt[2] & 0xf,
11203                                3, dp->vlnt[3] & 0xf,
11204                                4, dp->vlnt[4] & 0xf,
11205                                5, dp->vlnt[5] & 0xf,
11206                                6, dp->vlnt[6] & 0xf,
11207                                7, dp->vlnt[7] & 0xf,
11208                                8, dp->vlnt[8] & 0xf,
11209                                9, dp->vlnt[9] & 0xf,
11210                                10, dp->vlnt[10] & 0xf,
11211                                11, dp->vlnt[11] & 0xf,
11212                                12, dp->vlnt[12] & 0xf,
11213                                13, dp->vlnt[13] & 0xf,
11214                                14, dp->vlnt[14] & 0xf,
11215                                15, dp->vlnt[15] & 0xf));
11216         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11217                   DC_SC_VL_VAL(31_16,
11218                                16, dp->vlnt[16] & 0xf,
11219                                17, dp->vlnt[17] & 0xf,
11220                                18, dp->vlnt[18] & 0xf,
11221                                19, dp->vlnt[19] & 0xf,
11222                                20, dp->vlnt[20] & 0xf,
11223                                21, dp->vlnt[21] & 0xf,
11224                                22, dp->vlnt[22] & 0xf,
11225                                23, dp->vlnt[23] & 0xf,
11226                                24, dp->vlnt[24] & 0xf,
11227                                25, dp->vlnt[25] & 0xf,
11228                                26, dp->vlnt[26] & 0xf,
11229                                27, dp->vlnt[27] & 0xf,
11230                                28, dp->vlnt[28] & 0xf,
11231                                29, dp->vlnt[29] & 0xf,
11232                                30, dp->vlnt[30] & 0xf,
11233                                31, dp->vlnt[31] & 0xf));
11234 }
11235
11236 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11237                         u16 limit)
11238 {
11239         if (limit != 0)
11240                 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11241                             what, (int)limit, idx);
11242 }
11243
11244 /* change only the shared limit portion of SendCmGLobalCredit */
11245 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11246 {
11247         u64 reg;
11248
11249         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11250         reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11251         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11252         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11253 }
11254
11255 /* change only the total credit limit portion of SendCmGLobalCredit */
11256 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11257 {
11258         u64 reg;
11259
11260         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11261         reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11262         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11263         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11264 }
11265
11266 /* set the given per-VL shared limit */
11267 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11268 {
11269         u64 reg;
11270         u32 addr;
11271
11272         if (vl < TXE_NUM_DATA_VL)
11273                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11274         else
11275                 addr = SEND_CM_CREDIT_VL15;
11276
11277         reg = read_csr(dd, addr);
11278         reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11279         reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11280         write_csr(dd, addr, reg);
11281 }
11282
11283 /* set the given per-VL dedicated limit */
11284 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11285 {
11286         u64 reg;
11287         u32 addr;
11288
11289         if (vl < TXE_NUM_DATA_VL)
11290                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11291         else
11292                 addr = SEND_CM_CREDIT_VL15;
11293
11294         reg = read_csr(dd, addr);
11295         reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11296         reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11297         write_csr(dd, addr, reg);
11298 }
11299
11300 /* spin until the given per-VL status mask bits clear */
11301 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11302                                      const char *which)
11303 {
11304         unsigned long timeout;
11305         u64 reg;
11306
11307         timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11308         while (1) {
11309                 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11310
11311                 if (reg == 0)
11312                         return; /* success */
11313                 if (time_after(jiffies, timeout))
11314                         break;          /* timed out */
11315                 udelay(1);
11316         }
11317
11318         dd_dev_err(dd,
11319                    "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11320                    which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11321         /*
11322          * If this occurs, it is likely there was a credit loss on the link.
11323          * The only recovery from that is a link bounce.
11324          */
11325         dd_dev_err(dd,
11326                    "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11327 }
11328
11329 /*
11330  * The number of credits on the VLs may be changed while everything
11331  * is "live", but the following algorithm must be followed due to
11332  * how the hardware is actually implemented.  In particular,
11333  * Return_Credit_Status[] is the only correct status check.
11334  *
11335  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11336  *     set Global_Shared_Credit_Limit = 0
11337  *     use_all_vl = 1
11338  * mask0 = all VLs that are changing either dedicated or shared limits
11339  * set Shared_Limit[mask0] = 0
11340  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11341  * if (changing any dedicated limit)
11342  *     mask1 = all VLs that are lowering dedicated limits
11343  *     lower Dedicated_Limit[mask1]
11344  *     spin until Return_Credit_Status[mask1] == 0
11345  *     raise Dedicated_Limits
11346  * raise Shared_Limits
11347  * raise Global_Shared_Credit_Limit
11348  *
11349  * lower = if the new limit is lower, set the limit to the new value
11350  * raise = if the new limit is higher than the current value (may be changed
11351  *      earlier in the algorithm), set the new limit to the new value
11352  */
11353 int set_buffer_control(struct hfi1_pportdata *ppd,
11354                        struct buffer_control *new_bc)
11355 {
11356         struct hfi1_devdata *dd = ppd->dd;
11357         u64 changing_mask, ld_mask, stat_mask;
11358         int change_count;
11359         int i, use_all_mask;
11360         int this_shared_changing;
11361         int vl_count = 0, ret;
11362         /*
11363          * A0: add the variable any_shared_limit_changing below and in the
11364          * algorithm above.  If removing A0 support, it can be removed.
11365          */
11366         int any_shared_limit_changing;
11367         struct buffer_control cur_bc;
11368         u8 changing[OPA_MAX_VLS];
11369         u8 lowering_dedicated[OPA_MAX_VLS];
11370         u16 cur_total;
11371         u32 new_total = 0;
11372         const u64 all_mask =
11373         SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11374          | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11375          | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11376          | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11377          | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11378          | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11379          | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11380          | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11381          | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11382
11383 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11384 #define NUM_USABLE_VLS 16       /* look at VL15 and less */
11385
11386         /* find the new total credits, do sanity check on unused VLs */
11387         for (i = 0; i < OPA_MAX_VLS; i++) {
11388                 if (valid_vl(i)) {
11389                         new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11390                         continue;
11391                 }
11392                 nonzero_msg(dd, i, "dedicated",
11393                             be16_to_cpu(new_bc->vl[i].dedicated));
11394                 nonzero_msg(dd, i, "shared",
11395                             be16_to_cpu(new_bc->vl[i].shared));
11396                 new_bc->vl[i].dedicated = 0;
11397                 new_bc->vl[i].shared = 0;
11398         }
11399         new_total += be16_to_cpu(new_bc->overall_shared_limit);
11400
11401         /* fetch the current values */
11402         get_buffer_control(dd, &cur_bc, &cur_total);
11403
11404         /*
11405          * Create the masks we will use.
11406          */
11407         memset(changing, 0, sizeof(changing));
11408         memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11409         /*
11410          * NOTE: Assumes that the individual VL bits are adjacent and in
11411          * increasing order
11412          */
11413         stat_mask =
11414                 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11415         changing_mask = 0;
11416         ld_mask = 0;
11417         change_count = 0;
11418         any_shared_limit_changing = 0;
11419         for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11420                 if (!valid_vl(i))
11421                         continue;
11422                 this_shared_changing = new_bc->vl[i].shared
11423                                                 != cur_bc.vl[i].shared;
11424                 if (this_shared_changing)
11425                         any_shared_limit_changing = 1;
11426                 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11427                     this_shared_changing) {
11428                         changing[i] = 1;
11429                         changing_mask |= stat_mask;
11430                         change_count++;
11431                 }
11432                 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11433                                         be16_to_cpu(cur_bc.vl[i].dedicated)) {
11434                         lowering_dedicated[i] = 1;
11435                         ld_mask |= stat_mask;
11436                 }
11437         }
11438
11439         /* bracket the credit change with a total adjustment */
11440         if (new_total > cur_total)
11441                 set_global_limit(dd, new_total);
11442
11443         /*
11444          * Start the credit change algorithm.
11445          */
11446         use_all_mask = 0;
11447         if ((be16_to_cpu(new_bc->overall_shared_limit) <
11448              be16_to_cpu(cur_bc.overall_shared_limit)) ||
11449             (is_ax(dd) && any_shared_limit_changing)) {
11450                 set_global_shared(dd, 0);
11451                 cur_bc.overall_shared_limit = 0;
11452                 use_all_mask = 1;
11453         }
11454
11455         for (i = 0; i < NUM_USABLE_VLS; i++) {
11456                 if (!valid_vl(i))
11457                         continue;
11458
11459                 if (changing[i]) {
11460                         set_vl_shared(dd, i, 0);
11461                         cur_bc.vl[i].shared = 0;
11462                 }
11463         }
11464
11465         wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11466                                  "shared");
11467
11468         if (change_count > 0) {
11469                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11470                         if (!valid_vl(i))
11471                                 continue;
11472
11473                         if (lowering_dedicated[i]) {
11474                                 set_vl_dedicated(dd, i,
11475                                                  be16_to_cpu(new_bc->
11476                                                              vl[i].dedicated));
11477                                 cur_bc.vl[i].dedicated =
11478                                                 new_bc->vl[i].dedicated;
11479                         }
11480                 }
11481
11482                 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11483
11484                 /* now raise all dedicated that are going up */
11485                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11486                         if (!valid_vl(i))
11487                                 continue;
11488
11489                         if (be16_to_cpu(new_bc->vl[i].dedicated) >
11490                                         be16_to_cpu(cur_bc.vl[i].dedicated))
11491                                 set_vl_dedicated(dd, i,
11492                                                  be16_to_cpu(new_bc->
11493                                                              vl[i].dedicated));
11494                 }
11495         }
11496
11497         /* next raise all shared that are going up */
11498         for (i = 0; i < NUM_USABLE_VLS; i++) {
11499                 if (!valid_vl(i))
11500                         continue;
11501
11502                 if (be16_to_cpu(new_bc->vl[i].shared) >
11503                                 be16_to_cpu(cur_bc.vl[i].shared))
11504                         set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11505         }
11506
11507         /* finally raise the global shared */
11508         if (be16_to_cpu(new_bc->overall_shared_limit) >
11509             be16_to_cpu(cur_bc.overall_shared_limit))
11510                 set_global_shared(dd,
11511                                   be16_to_cpu(new_bc->overall_shared_limit));
11512
11513         /* bracket the credit change with a total adjustment */
11514         if (new_total < cur_total)
11515                 set_global_limit(dd, new_total);
11516
11517         /*
11518          * Determine the actual number of operational VLS using the number of
11519          * dedicated and shared credits for each VL.
11520          */
11521         if (change_count > 0) {
11522                 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11523                         if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11524                             be16_to_cpu(new_bc->vl[i].shared) > 0)
11525                                 vl_count++;
11526                 ppd->actual_vls_operational = vl_count;
11527                 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11528                                     ppd->actual_vls_operational :
11529                                     ppd->vls_operational,
11530                                     NULL);
11531                 if (ret == 0)
11532                         ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11533                                            ppd->actual_vls_operational :
11534                                            ppd->vls_operational, NULL);
11535                 if (ret)
11536                         return ret;
11537         }
11538         return 0;
11539 }
11540
11541 /*
11542  * Read the given fabric manager table. Return the size of the
11543  * table (in bytes) on success, and a negative error code on
11544  * failure.
11545  */
11546 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11547
11548 {
11549         int size;
11550         struct vl_arb_cache *vlc;
11551
11552         switch (which) {
11553         case FM_TBL_VL_HIGH_ARB:
11554                 size = 256;
11555                 /*
11556                  * OPA specifies 128 elements (of 2 bytes each), though
11557                  * HFI supports only 16 elements in h/w.
11558                  */
11559                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11560                 vl_arb_get_cache(vlc, t);
11561                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11562                 break;
11563         case FM_TBL_VL_LOW_ARB:
11564                 size = 256;
11565                 /*
11566                  * OPA specifies 128 elements (of 2 bytes each), though
11567                  * HFI supports only 16 elements in h/w.
11568                  */
11569                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11570                 vl_arb_get_cache(vlc, t);
11571                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11572                 break;
11573         case FM_TBL_BUFFER_CONTROL:
11574                 size = get_buffer_control(ppd->dd, t, NULL);
11575                 break;
11576         case FM_TBL_SC2VLNT:
11577                 size = get_sc2vlnt(ppd->dd, t);
11578                 break;
11579         case FM_TBL_VL_PREEMPT_ELEMS:
11580                 size = 256;
11581                 /* OPA specifies 128 elements, of 2 bytes each */
11582                 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11583                 break;
11584         case FM_TBL_VL_PREEMPT_MATRIX:
11585                 size = 256;
11586                 /*
11587                  * OPA specifies that this is the same size as the VL
11588                  * arbitration tables (i.e., 256 bytes).
11589                  */
11590                 break;
11591         default:
11592                 return -EINVAL;
11593         }
11594         return size;
11595 }
11596
11597 /*
11598  * Write the given fabric manager table.
11599  */
11600 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11601 {
11602         int ret = 0;
11603         struct vl_arb_cache *vlc;
11604
11605         switch (which) {
11606         case FM_TBL_VL_HIGH_ARB:
11607                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11608                 if (vl_arb_match_cache(vlc, t)) {
11609                         vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11610                         break;
11611                 }
11612                 vl_arb_set_cache(vlc, t);
11613                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11614                 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11615                                      VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11616                 break;
11617         case FM_TBL_VL_LOW_ARB:
11618                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11619                 if (vl_arb_match_cache(vlc, t)) {
11620                         vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11621                         break;
11622                 }
11623                 vl_arb_set_cache(vlc, t);
11624                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11625                 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11626                                      VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11627                 break;
11628         case FM_TBL_BUFFER_CONTROL:
11629                 ret = set_buffer_control(ppd, t);
11630                 break;
11631         case FM_TBL_SC2VLNT:
11632                 set_sc2vlnt(ppd->dd, t);
11633                 break;
11634         default:
11635                 ret = -EINVAL;
11636         }
11637         return ret;
11638 }
11639
11640 /*
11641  * Disable all data VLs.
11642  *
11643  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11644  */
11645 static int disable_data_vls(struct hfi1_devdata *dd)
11646 {
11647         if (is_ax(dd))
11648                 return 1;
11649
11650         pio_send_control(dd, PSC_DATA_VL_DISABLE);
11651
11652         return 0;
11653 }
11654
11655 /*
11656  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11657  * Just re-enables all data VLs (the "fill" part happens
11658  * automatically - the name was chosen for symmetry with
11659  * stop_drain_data_vls()).
11660  *
11661  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11662  */
11663 int open_fill_data_vls(struct hfi1_devdata *dd)
11664 {
11665         if (is_ax(dd))
11666                 return 1;
11667
11668         pio_send_control(dd, PSC_DATA_VL_ENABLE);
11669
11670         return 0;
11671 }
11672
11673 /*
11674  * drain_data_vls() - assumes that disable_data_vls() has been called,
11675  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11676  * engines to drop to 0.
11677  */
11678 static void drain_data_vls(struct hfi1_devdata *dd)
11679 {
11680         sc_wait(dd);
11681         sdma_wait(dd);
11682         pause_for_credit_return(dd);
11683 }
11684
11685 /*
11686  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11687  *
11688  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11689  * meant to be used like this:
11690  *
11691  * stop_drain_data_vls(dd);
11692  * // do things with per-VL resources
11693  * open_fill_data_vls(dd);
11694  */
11695 int stop_drain_data_vls(struct hfi1_devdata *dd)
11696 {
11697         int ret;
11698
11699         ret = disable_data_vls(dd);
11700         if (ret == 0)
11701                 drain_data_vls(dd);
11702
11703         return ret;
11704 }
11705
11706 /*
11707  * Convert a nanosecond time to a cclock count.  No matter how slow
11708  * the cclock, a non-zero ns will always have a non-zero result.
11709  */
11710 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11711 {
11712         u32 cclocks;
11713
11714         if (dd->icode == ICODE_FPGA_EMULATION)
11715                 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11716         else  /* simulation pretends to be ASIC */
11717                 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11718         if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11719                 cclocks = 1;
11720         return cclocks;
11721 }
11722
11723 /*
11724  * Convert a cclock count to nanoseconds. Not matter how slow
11725  * the cclock, a non-zero cclocks will always have a non-zero result.
11726  */
11727 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11728 {
11729         u32 ns;
11730
11731         if (dd->icode == ICODE_FPGA_EMULATION)
11732                 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11733         else  /* simulation pretends to be ASIC */
11734                 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11735         if (cclocks && !ns)
11736                 ns = 1;
11737         return ns;
11738 }
11739
11740 /*
11741  * Dynamically adjust the receive interrupt timeout for a context based on
11742  * incoming packet rate.
11743  *
11744  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11745  */
11746 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11747 {
11748         struct hfi1_devdata *dd = rcd->dd;
11749         u32 timeout = rcd->rcvavail_timeout;
11750
11751         /*
11752          * This algorithm doubles or halves the timeout depending on whether
11753          * the number of packets received in this interrupt were less than or
11754          * greater equal the interrupt count.
11755          *
11756          * The calculations below do not allow a steady state to be achieved.
11757          * Only at the endpoints it is possible to have an unchanging
11758          * timeout.
11759          */
11760         if (npkts < rcv_intr_count) {
11761                 /*
11762                  * Not enough packets arrived before the timeout, adjust
11763                  * timeout downward.
11764                  */
11765                 if (timeout < 2) /* already at minimum? */
11766                         return;
11767                 timeout >>= 1;
11768         } else {
11769                 /*
11770                  * More than enough packets arrived before the timeout, adjust
11771                  * timeout upward.
11772                  */
11773                 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11774                         return;
11775                 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11776         }
11777
11778         rcd->rcvavail_timeout = timeout;
11779         /*
11780          * timeout cannot be larger than rcv_intr_timeout_csr which has already
11781          * been verified to be in range
11782          */
11783         write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11784                         (u64)timeout <<
11785                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11786 }
11787
11788 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11789                     u32 intr_adjust, u32 npkts)
11790 {
11791         struct hfi1_devdata *dd = rcd->dd;
11792         u64 reg;
11793         u32 ctxt = rcd->ctxt;
11794
11795         /*
11796          * Need to write timeout register before updating RcvHdrHead to ensure
11797          * that a new value is used when the HW decides to restart counting.
11798          */
11799         if (intr_adjust)
11800                 adjust_rcv_timeout(rcd, npkts);
11801         if (updegr) {
11802                 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11803                         << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11804                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11805         }
11806         mmiowb();
11807         reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11808                 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11809                         << RCV_HDR_HEAD_HEAD_SHIFT);
11810         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11811         mmiowb();
11812 }
11813
11814 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11815 {
11816         u32 head, tail;
11817
11818         head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11819                 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11820
11821         if (rcd->rcvhdrtail_kvaddr)
11822                 tail = get_rcvhdrtail(rcd);
11823         else
11824                 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11825
11826         return head == tail;
11827 }
11828
11829 /*
11830  * Context Control and Receive Array encoding for buffer size:
11831  *      0x0 invalid
11832  *      0x1   4 KB
11833  *      0x2   8 KB
11834  *      0x3  16 KB
11835  *      0x4  32 KB
11836  *      0x5  64 KB
11837  *      0x6 128 KB
11838  *      0x7 256 KB
11839  *      0x8 512 KB (Receive Array only)
11840  *      0x9   1 MB (Receive Array only)
11841  *      0xa   2 MB (Receive Array only)
11842  *
11843  *      0xB-0xF - reserved (Receive Array only)
11844  *
11845  *
11846  * This routine assumes that the value has already been sanity checked.
11847  */
11848 static u32 encoded_size(u32 size)
11849 {
11850         switch (size) {
11851         case   4 * 1024: return 0x1;
11852         case   8 * 1024: return 0x2;
11853         case  16 * 1024: return 0x3;
11854         case  32 * 1024: return 0x4;
11855         case  64 * 1024: return 0x5;
11856         case 128 * 1024: return 0x6;
11857         case 256 * 1024: return 0x7;
11858         case 512 * 1024: return 0x8;
11859         case   1 * 1024 * 1024: return 0x9;
11860         case   2 * 1024 * 1024: return 0xa;
11861         }
11862         return 0x1;     /* if invalid, go with the minimum size */
11863 }
11864
11865 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11866                   struct hfi1_ctxtdata *rcd)
11867 {
11868         u64 rcvctrl, reg;
11869         int did_enable = 0;
11870         u16 ctxt;
11871
11872         if (!rcd)
11873                 return;
11874
11875         ctxt = rcd->ctxt;
11876
11877         hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11878
11879         rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11880         /* if the context already enabled, don't do the extra steps */
11881         if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11882             !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11883                 /* reset the tail and hdr addresses, and sequence count */
11884                 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11885                                 rcd->rcvhdrq_dma);
11886                 if (rcd->rcvhdrtail_kvaddr)
11887                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11888                                         rcd->rcvhdrqtailaddr_dma);
11889                 rcd->seq_cnt = 1;
11890
11891                 /* reset the cached receive header queue head value */
11892                 rcd->head = 0;
11893
11894                 /*
11895                  * Zero the receive header queue so we don't get false
11896                  * positives when checking the sequence number.  The
11897                  * sequence numbers could land exactly on the same spot.
11898                  * E.g. a rcd restart before the receive header wrapped.
11899                  */
11900                 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
11901
11902                 /* starting timeout */
11903                 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11904
11905                 /* enable the context */
11906                 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11907
11908                 /* clean the egr buffer size first */
11909                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11910                 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11911                                 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11912                                         << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11913
11914                 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11915                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11916                 did_enable = 1;
11917
11918                 /* zero RcvEgrIndexHead */
11919                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11920
11921                 /* set eager count and base index */
11922                 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11923                         & RCV_EGR_CTRL_EGR_CNT_MASK)
11924                        << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11925                         (((rcd->eager_base >> RCV_SHIFT)
11926                           & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11927                          << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11928                 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11929
11930                 /*
11931                  * Set TID (expected) count and base index.
11932                  * rcd->expected_count is set to individual RcvArray entries,
11933                  * not pairs, and the CSR takes a pair-count in groups of
11934                  * four, so divide by 8.
11935                  */
11936                 reg = (((rcd->expected_count >> RCV_SHIFT)
11937                                         & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11938                                 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11939                       (((rcd->expected_base >> RCV_SHIFT)
11940                                         & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11941                                 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11942                 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11943                 if (ctxt == HFI1_CTRL_CTXT)
11944                         write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11945         }
11946         if (op & HFI1_RCVCTRL_CTXT_DIS) {
11947                 write_csr(dd, RCV_VL15, 0);
11948                 /*
11949                  * When receive context is being disabled turn on tail
11950                  * update with a dummy tail address and then disable
11951                  * receive context.
11952                  */
11953                 if (dd->rcvhdrtail_dummy_dma) {
11954                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11955                                         dd->rcvhdrtail_dummy_dma);
11956                         /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11957                         rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11958                 }
11959
11960                 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11961         }
11962         if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
11963                 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11964                               IS_RCVAVAIL_START + rcd->ctxt, true);
11965                 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11966         }
11967         if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
11968                 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11969                               IS_RCVAVAIL_START + rcd->ctxt, false);
11970                 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11971         }
11972         if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11973                 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11974         if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11975                 /* See comment on RcvCtxtCtrl.TailUpd above */
11976                 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11977                         rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11978         }
11979         if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11980                 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11981         if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11982                 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11983         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11984                 /*
11985                  * In one-packet-per-eager mode, the size comes from
11986                  * the RcvArray entry.
11987                  */
11988                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11989                 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11990         }
11991         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11992                 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11993         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11994                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11995         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11996                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11997         if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11998                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11999         if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12000                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12001         if (op & HFI1_RCVCTRL_URGENT_ENB)
12002                 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12003                               IS_RCVURGENT_START + rcd->ctxt, true);
12004         if (op & HFI1_RCVCTRL_URGENT_DIS)
12005                 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12006                               IS_RCVURGENT_START + rcd->ctxt, false);
12007
12008         hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12009         write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12010
12011         /* work around sticky RcvCtxtStatus.BlockedRHQFull */
12012         if (did_enable &&
12013             (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12014                 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12015                 if (reg != 0) {
12016                         dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12017                                     ctxt, reg);
12018                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12019                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12020                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12021                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12022                         reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12023                         dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12024                                     ctxt, reg, reg == 0 ? "not" : "still");
12025                 }
12026         }
12027
12028         if (did_enable) {
12029                 /*
12030                  * The interrupt timeout and count must be set after
12031                  * the context is enabled to take effect.
12032                  */
12033                 /* set interrupt timeout */
12034                 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12035                                 (u64)rcd->rcvavail_timeout <<
12036                                 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12037
12038                 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12039                 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12040                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12041         }
12042
12043         if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12044                 /*
12045                  * If the context has been disabled and the Tail Update has
12046                  * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12047                  * so it doesn't contain an address that is invalid.
12048                  */
12049                 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12050                                 dd->rcvhdrtail_dummy_dma);
12051 }
12052
12053 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12054 {
12055         int ret;
12056         u64 val = 0;
12057
12058         if (namep) {
12059                 ret = dd->cntrnameslen;
12060                 *namep = dd->cntrnames;
12061         } else {
12062                 const struct cntr_entry *entry;
12063                 int i, j;
12064
12065                 ret = (dd->ndevcntrs) * sizeof(u64);
12066
12067                 /* Get the start of the block of counters */
12068                 *cntrp = dd->cntrs;
12069
12070                 /*
12071                  * Now go and fill in each counter in the block.
12072                  */
12073                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12074                         entry = &dev_cntrs[i];
12075                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12076                         if (entry->flags & CNTR_DISABLED) {
12077                                 /* Nothing */
12078                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12079                         } else {
12080                                 if (entry->flags & CNTR_VL) {
12081                                         hfi1_cdbg(CNTR, "\tPer VL\n");
12082                                         for (j = 0; j < C_VL_COUNT; j++) {
12083                                                 val = entry->rw_cntr(entry,
12084                                                                   dd, j,
12085                                                                   CNTR_MODE_R,
12086                                                                   0);
12087                                                 hfi1_cdbg(
12088                                                    CNTR,
12089                                                    "\t\tRead 0x%llx for %d\n",
12090                                                    val, j);
12091                                                 dd->cntrs[entry->offset + j] =
12092                                                                             val;
12093                                         }
12094                                 } else if (entry->flags & CNTR_SDMA) {
12095                                         hfi1_cdbg(CNTR,
12096                                                   "\t Per SDMA Engine\n");
12097                                         for (j = 0; j < chip_sdma_engines(dd);
12098                                              j++) {
12099                                                 val =
12100                                                 entry->rw_cntr(entry, dd, j,
12101                                                                CNTR_MODE_R, 0);
12102                                                 hfi1_cdbg(CNTR,
12103                                                           "\t\tRead 0x%llx for %d\n",
12104                                                           val, j);
12105                                                 dd->cntrs[entry->offset + j] =
12106                                                                         val;
12107                                         }
12108                                 } else {
12109                                         val = entry->rw_cntr(entry, dd,
12110                                                         CNTR_INVALID_VL,
12111                                                         CNTR_MODE_R, 0);
12112                                         dd->cntrs[entry->offset] = val;
12113                                         hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12114                                 }
12115                         }
12116                 }
12117         }
12118         return ret;
12119 }
12120
12121 /*
12122  * Used by sysfs to create files for hfi stats to read
12123  */
12124 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12125 {
12126         int ret;
12127         u64 val = 0;
12128
12129         if (namep) {
12130                 ret = ppd->dd->portcntrnameslen;
12131                 *namep = ppd->dd->portcntrnames;
12132         } else {
12133                 const struct cntr_entry *entry;
12134                 int i, j;
12135
12136                 ret = ppd->dd->nportcntrs * sizeof(u64);
12137                 *cntrp = ppd->cntrs;
12138
12139                 for (i = 0; i < PORT_CNTR_LAST; i++) {
12140                         entry = &port_cntrs[i];
12141                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12142                         if (entry->flags & CNTR_DISABLED) {
12143                                 /* Nothing */
12144                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12145                                 continue;
12146                         }
12147
12148                         if (entry->flags & CNTR_VL) {
12149                                 hfi1_cdbg(CNTR, "\tPer VL");
12150                                 for (j = 0; j < C_VL_COUNT; j++) {
12151                                         val = entry->rw_cntr(entry, ppd, j,
12152                                                                CNTR_MODE_R,
12153                                                                0);
12154                                         hfi1_cdbg(
12155                                            CNTR,
12156                                            "\t\tRead 0x%llx for %d",
12157                                            val, j);
12158                                         ppd->cntrs[entry->offset + j] = val;
12159                                 }
12160                         } else {
12161                                 val = entry->rw_cntr(entry, ppd,
12162                                                        CNTR_INVALID_VL,
12163                                                        CNTR_MODE_R,
12164                                                        0);
12165                                 ppd->cntrs[entry->offset] = val;
12166                                 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12167                         }
12168                 }
12169         }
12170         return ret;
12171 }
12172
12173 static void free_cntrs(struct hfi1_devdata *dd)
12174 {
12175         struct hfi1_pportdata *ppd;
12176         int i;
12177
12178         if (dd->synth_stats_timer.function)
12179                 del_timer_sync(&dd->synth_stats_timer);
12180         ppd = (struct hfi1_pportdata *)(dd + 1);
12181         for (i = 0; i < dd->num_pports; i++, ppd++) {
12182                 kfree(ppd->cntrs);
12183                 kfree(ppd->scntrs);
12184                 free_percpu(ppd->ibport_data.rvp.rc_acks);
12185                 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12186                 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12187                 ppd->cntrs = NULL;
12188                 ppd->scntrs = NULL;
12189                 ppd->ibport_data.rvp.rc_acks = NULL;
12190                 ppd->ibport_data.rvp.rc_qacks = NULL;
12191                 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12192         }
12193         kfree(dd->portcntrnames);
12194         dd->portcntrnames = NULL;
12195         kfree(dd->cntrs);
12196         dd->cntrs = NULL;
12197         kfree(dd->scntrs);
12198         dd->scntrs = NULL;
12199         kfree(dd->cntrnames);
12200         dd->cntrnames = NULL;
12201         if (dd->update_cntr_wq) {
12202                 destroy_workqueue(dd->update_cntr_wq);
12203                 dd->update_cntr_wq = NULL;
12204         }
12205 }
12206
12207 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12208                               u64 *psval, void *context, int vl)
12209 {
12210         u64 val;
12211         u64 sval = *psval;
12212
12213         if (entry->flags & CNTR_DISABLED) {
12214                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12215                 return 0;
12216         }
12217
12218         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12219
12220         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12221
12222         /* If its a synthetic counter there is more work we need to do */
12223         if (entry->flags & CNTR_SYNTH) {
12224                 if (sval == CNTR_MAX) {
12225                         /* No need to read already saturated */
12226                         return CNTR_MAX;
12227                 }
12228
12229                 if (entry->flags & CNTR_32BIT) {
12230                         /* 32bit counters can wrap multiple times */
12231                         u64 upper = sval >> 32;
12232                         u64 lower = (sval << 32) >> 32;
12233
12234                         if (lower > val) { /* hw wrapped */
12235                                 if (upper == CNTR_32BIT_MAX)
12236                                         val = CNTR_MAX;
12237                                 else
12238                                         upper++;
12239                         }
12240
12241                         if (val != CNTR_MAX)
12242                                 val = (upper << 32) | val;
12243
12244                 } else {
12245                         /* If we rolled we are saturated */
12246                         if ((val < sval) || (val > CNTR_MAX))
12247                                 val = CNTR_MAX;
12248                 }
12249         }
12250
12251         *psval = val;
12252
12253         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12254
12255         return val;
12256 }
12257
12258 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12259                                struct cntr_entry *entry,
12260                                u64 *psval, void *context, int vl, u64 data)
12261 {
12262         u64 val;
12263
12264         if (entry->flags & CNTR_DISABLED) {
12265                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12266                 return 0;
12267         }
12268
12269         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12270
12271         if (entry->flags & CNTR_SYNTH) {
12272                 *psval = data;
12273                 if (entry->flags & CNTR_32BIT) {
12274                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12275                                              (data << 32) >> 32);
12276                         val = data; /* return the full 64bit value */
12277                 } else {
12278                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12279                                              data);
12280                 }
12281         } else {
12282                 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12283         }
12284
12285         *psval = val;
12286
12287         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12288
12289         return val;
12290 }
12291
12292 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12293 {
12294         struct cntr_entry *entry;
12295         u64 *sval;
12296
12297         entry = &dev_cntrs[index];
12298         sval = dd->scntrs + entry->offset;
12299
12300         if (vl != CNTR_INVALID_VL)
12301                 sval += vl;
12302
12303         return read_dev_port_cntr(dd, entry, sval, dd, vl);
12304 }
12305
12306 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12307 {
12308         struct cntr_entry *entry;
12309         u64 *sval;
12310
12311         entry = &dev_cntrs[index];
12312         sval = dd->scntrs + entry->offset;
12313
12314         if (vl != CNTR_INVALID_VL)
12315                 sval += vl;
12316
12317         return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12318 }
12319
12320 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12321 {
12322         struct cntr_entry *entry;
12323         u64 *sval;
12324
12325         entry = &port_cntrs[index];
12326         sval = ppd->scntrs + entry->offset;
12327
12328         if (vl != CNTR_INVALID_VL)
12329                 sval += vl;
12330
12331         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12332             (index <= C_RCV_HDR_OVF_LAST)) {
12333                 /* We do not want to bother for disabled contexts */
12334                 return 0;
12335         }
12336
12337         return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12338 }
12339
12340 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12341 {
12342         struct cntr_entry *entry;
12343         u64 *sval;
12344
12345         entry = &port_cntrs[index];
12346         sval = ppd->scntrs + entry->offset;
12347
12348         if (vl != CNTR_INVALID_VL)
12349                 sval += vl;
12350
12351         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12352             (index <= C_RCV_HDR_OVF_LAST)) {
12353                 /* We do not want to bother for disabled contexts */
12354                 return 0;
12355         }
12356
12357         return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12358 }
12359
12360 static void do_update_synth_timer(struct work_struct *work)
12361 {
12362         u64 cur_tx;
12363         u64 cur_rx;
12364         u64 total_flits;
12365         u8 update = 0;
12366         int i, j, vl;
12367         struct hfi1_pportdata *ppd;
12368         struct cntr_entry *entry;
12369         struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12370                                                update_cntr_work);
12371
12372         /*
12373          * Rather than keep beating on the CSRs pick a minimal set that we can
12374          * check to watch for potential roll over. We can do this by looking at
12375          * the number of flits sent/recv. If the total flits exceeds 32bits then
12376          * we have to iterate all the counters and update.
12377          */
12378         entry = &dev_cntrs[C_DC_RCV_FLITS];
12379         cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12380
12381         entry = &dev_cntrs[C_DC_XMIT_FLITS];
12382         cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12383
12384         hfi1_cdbg(
12385             CNTR,
12386             "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12387             dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12388
12389         if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12390                 /*
12391                  * May not be strictly necessary to update but it won't hurt and
12392                  * simplifies the logic here.
12393                  */
12394                 update = 1;
12395                 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12396                           dd->unit);
12397         } else {
12398                 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12399                 hfi1_cdbg(CNTR,
12400                           "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12401                           total_flits, (u64)CNTR_32BIT_MAX);
12402                 if (total_flits >= CNTR_32BIT_MAX) {
12403                         hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12404                                   dd->unit);
12405                         update = 1;
12406                 }
12407         }
12408
12409         if (update) {
12410                 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12411                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12412                         entry = &dev_cntrs[i];
12413                         if (entry->flags & CNTR_VL) {
12414                                 for (vl = 0; vl < C_VL_COUNT; vl++)
12415                                         read_dev_cntr(dd, i, vl);
12416                         } else {
12417                                 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12418                         }
12419                 }
12420                 ppd = (struct hfi1_pportdata *)(dd + 1);
12421                 for (i = 0; i < dd->num_pports; i++, ppd++) {
12422                         for (j = 0; j < PORT_CNTR_LAST; j++) {
12423                                 entry = &port_cntrs[j];
12424                                 if (entry->flags & CNTR_VL) {
12425                                         for (vl = 0; vl < C_VL_COUNT; vl++)
12426                                                 read_port_cntr(ppd, j, vl);
12427                                 } else {
12428                                         read_port_cntr(ppd, j, CNTR_INVALID_VL);
12429                                 }
12430                         }
12431                 }
12432
12433                 /*
12434                  * We want the value in the register. The goal is to keep track
12435                  * of the number of "ticks" not the counter value. In other
12436                  * words if the register rolls we want to notice it and go ahead
12437                  * and force an update.
12438                  */
12439                 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12440                 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12441                                                 CNTR_MODE_R, 0);
12442
12443                 entry = &dev_cntrs[C_DC_RCV_FLITS];
12444                 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12445                                                 CNTR_MODE_R, 0);
12446
12447                 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12448                           dd->unit, dd->last_tx, dd->last_rx);
12449
12450         } else {
12451                 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12452         }
12453 }
12454
12455 static void update_synth_timer(struct timer_list *t)
12456 {
12457         struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12458
12459         queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12460         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12461 }
12462
12463 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12464 static int init_cntrs(struct hfi1_devdata *dd)
12465 {
12466         int i, rcv_ctxts, j;
12467         size_t sz;
12468         char *p;
12469         char name[C_MAX_NAME];
12470         struct hfi1_pportdata *ppd;
12471         const char *bit_type_32 = ",32";
12472         const int bit_type_32_sz = strlen(bit_type_32);
12473         u32 sdma_engines = chip_sdma_engines(dd);
12474
12475         /* set up the stats timer; the add_timer is done at the end */
12476         timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12477
12478         /***********************/
12479         /* per device counters */
12480         /***********************/
12481
12482         /* size names and determine how many we have*/
12483         dd->ndevcntrs = 0;
12484         sz = 0;
12485
12486         for (i = 0; i < DEV_CNTR_LAST; i++) {
12487                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12488                         hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12489                         continue;
12490                 }
12491
12492                 if (dev_cntrs[i].flags & CNTR_VL) {
12493                         dev_cntrs[i].offset = dd->ndevcntrs;
12494                         for (j = 0; j < C_VL_COUNT; j++) {
12495                                 snprintf(name, C_MAX_NAME, "%s%d",
12496                                          dev_cntrs[i].name, vl_from_idx(j));
12497                                 sz += strlen(name);
12498                                 /* Add ",32" for 32-bit counters */
12499                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12500                                         sz += bit_type_32_sz;
12501                                 sz++;
12502                                 dd->ndevcntrs++;
12503                         }
12504                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12505                         dev_cntrs[i].offset = dd->ndevcntrs;
12506                         for (j = 0; j < sdma_engines; j++) {
12507                                 snprintf(name, C_MAX_NAME, "%s%d",
12508                                          dev_cntrs[i].name, j);
12509                                 sz += strlen(name);
12510                                 /* Add ",32" for 32-bit counters */
12511                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12512                                         sz += bit_type_32_sz;
12513                                 sz++;
12514                                 dd->ndevcntrs++;
12515                         }
12516                 } else {
12517                         /* +1 for newline. */
12518                         sz += strlen(dev_cntrs[i].name) + 1;
12519                         /* Add ",32" for 32-bit counters */
12520                         if (dev_cntrs[i].flags & CNTR_32BIT)
12521                                 sz += bit_type_32_sz;
12522                         dev_cntrs[i].offset = dd->ndevcntrs;
12523                         dd->ndevcntrs++;
12524                 }
12525         }
12526
12527         /* allocate space for the counter values */
12528         dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12529                             GFP_KERNEL);
12530         if (!dd->cntrs)
12531                 goto bail;
12532
12533         dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12534         if (!dd->scntrs)
12535                 goto bail;
12536
12537         /* allocate space for the counter names */
12538         dd->cntrnameslen = sz;
12539         dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12540         if (!dd->cntrnames)
12541                 goto bail;
12542
12543         /* fill in the names */
12544         for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12545                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12546                         /* Nothing */
12547                 } else if (dev_cntrs[i].flags & CNTR_VL) {
12548                         for (j = 0; j < C_VL_COUNT; j++) {
12549                                 snprintf(name, C_MAX_NAME, "%s%d",
12550                                          dev_cntrs[i].name,
12551                                          vl_from_idx(j));
12552                                 memcpy(p, name, strlen(name));
12553                                 p += strlen(name);
12554
12555                                 /* Counter is 32 bits */
12556                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12557                                         memcpy(p, bit_type_32, bit_type_32_sz);
12558                                         p += bit_type_32_sz;
12559                                 }
12560
12561                                 *p++ = '\n';
12562                         }
12563                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12564                         for (j = 0; j < sdma_engines; j++) {
12565                                 snprintf(name, C_MAX_NAME, "%s%d",
12566                                          dev_cntrs[i].name, j);
12567                                 memcpy(p, name, strlen(name));
12568                                 p += strlen(name);
12569
12570                                 /* Counter is 32 bits */
12571                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12572                                         memcpy(p, bit_type_32, bit_type_32_sz);
12573                                         p += bit_type_32_sz;
12574                                 }
12575
12576                                 *p++ = '\n';
12577                         }
12578                 } else {
12579                         memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12580                         p += strlen(dev_cntrs[i].name);
12581
12582                         /* Counter is 32 bits */
12583                         if (dev_cntrs[i].flags & CNTR_32BIT) {
12584                                 memcpy(p, bit_type_32, bit_type_32_sz);
12585                                 p += bit_type_32_sz;
12586                         }
12587
12588                         *p++ = '\n';
12589                 }
12590         }
12591
12592         /*********************/
12593         /* per port counters */
12594         /*********************/
12595
12596         /*
12597          * Go through the counters for the overflows and disable the ones we
12598          * don't need. This varies based on platform so we need to do it
12599          * dynamically here.
12600          */
12601         rcv_ctxts = dd->num_rcv_contexts;
12602         for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12603              i <= C_RCV_HDR_OVF_LAST; i++) {
12604                 port_cntrs[i].flags |= CNTR_DISABLED;
12605         }
12606
12607         /* size port counter names and determine how many we have*/
12608         sz = 0;
12609         dd->nportcntrs = 0;
12610         for (i = 0; i < PORT_CNTR_LAST; i++) {
12611                 if (port_cntrs[i].flags & CNTR_DISABLED) {
12612                         hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12613                         continue;
12614                 }
12615
12616                 if (port_cntrs[i].flags & CNTR_VL) {
12617                         port_cntrs[i].offset = dd->nportcntrs;
12618                         for (j = 0; j < C_VL_COUNT; j++) {
12619                                 snprintf(name, C_MAX_NAME, "%s%d",
12620                                          port_cntrs[i].name, vl_from_idx(j));
12621                                 sz += strlen(name);
12622                                 /* Add ",32" for 32-bit counters */
12623                                 if (port_cntrs[i].flags & CNTR_32BIT)
12624                                         sz += bit_type_32_sz;
12625                                 sz++;
12626                                 dd->nportcntrs++;
12627                         }
12628                 } else {
12629                         /* +1 for newline */
12630                         sz += strlen(port_cntrs[i].name) + 1;
12631                         /* Add ",32" for 32-bit counters */
12632                         if (port_cntrs[i].flags & CNTR_32BIT)
12633                                 sz += bit_type_32_sz;
12634                         port_cntrs[i].offset = dd->nportcntrs;
12635                         dd->nportcntrs++;
12636                 }
12637         }
12638
12639         /* allocate space for the counter names */
12640         dd->portcntrnameslen = sz;
12641         dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12642         if (!dd->portcntrnames)
12643                 goto bail;
12644
12645         /* fill in port cntr names */
12646         for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12647                 if (port_cntrs[i].flags & CNTR_DISABLED)
12648                         continue;
12649
12650                 if (port_cntrs[i].flags & CNTR_VL) {
12651                         for (j = 0; j < C_VL_COUNT; j++) {
12652                                 snprintf(name, C_MAX_NAME, "%s%d",
12653                                          port_cntrs[i].name, vl_from_idx(j));
12654                                 memcpy(p, name, strlen(name));
12655                                 p += strlen(name);
12656
12657                                 /* Counter is 32 bits */
12658                                 if (port_cntrs[i].flags & CNTR_32BIT) {
12659                                         memcpy(p, bit_type_32, bit_type_32_sz);
12660                                         p += bit_type_32_sz;
12661                                 }
12662
12663                                 *p++ = '\n';
12664                         }
12665                 } else {
12666                         memcpy(p, port_cntrs[i].name,
12667                                strlen(port_cntrs[i].name));
12668                         p += strlen(port_cntrs[i].name);
12669
12670                         /* Counter is 32 bits */
12671                         if (port_cntrs[i].flags & CNTR_32BIT) {
12672                                 memcpy(p, bit_type_32, bit_type_32_sz);
12673                                 p += bit_type_32_sz;
12674                         }
12675
12676                         *p++ = '\n';
12677                 }
12678         }
12679
12680         /* allocate per port storage for counter values */
12681         ppd = (struct hfi1_pportdata *)(dd + 1);
12682         for (i = 0; i < dd->num_pports; i++, ppd++) {
12683                 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12684                 if (!ppd->cntrs)
12685                         goto bail;
12686
12687                 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12688                 if (!ppd->scntrs)
12689                         goto bail;
12690         }
12691
12692         /* CPU counters need to be allocated and zeroed */
12693         if (init_cpu_counters(dd))
12694                 goto bail;
12695
12696         dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12697                                                      WQ_MEM_RECLAIM, dd->unit);
12698         if (!dd->update_cntr_wq)
12699                 goto bail;
12700
12701         INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12702
12703         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12704         return 0;
12705 bail:
12706         free_cntrs(dd);
12707         return -ENOMEM;
12708 }
12709
12710 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12711 {
12712         switch (chip_lstate) {
12713         default:
12714                 dd_dev_err(dd,
12715                            "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12716                            chip_lstate);
12717                 /* fall through */
12718         case LSTATE_DOWN:
12719                 return IB_PORT_DOWN;
12720         case LSTATE_INIT:
12721                 return IB_PORT_INIT;
12722         case LSTATE_ARMED:
12723                 return IB_PORT_ARMED;
12724         case LSTATE_ACTIVE:
12725                 return IB_PORT_ACTIVE;
12726         }
12727 }
12728
12729 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12730 {
12731         /* look at the HFI meta-states only */
12732         switch (chip_pstate & 0xf0) {
12733         default:
12734                 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12735                            chip_pstate);
12736                 /* fall through */
12737         case PLS_DISABLED:
12738                 return IB_PORTPHYSSTATE_DISABLED;
12739         case PLS_OFFLINE:
12740                 return OPA_PORTPHYSSTATE_OFFLINE;
12741         case PLS_POLLING:
12742                 return IB_PORTPHYSSTATE_POLLING;
12743         case PLS_CONFIGPHY:
12744                 return IB_PORTPHYSSTATE_TRAINING;
12745         case PLS_LINKUP:
12746                 return IB_PORTPHYSSTATE_LINKUP;
12747         case PLS_PHYTEST:
12748                 return IB_PORTPHYSSTATE_PHY_TEST;
12749         }
12750 }
12751
12752 /* return the OPA port logical state name */
12753 const char *opa_lstate_name(u32 lstate)
12754 {
12755         static const char * const port_logical_names[] = {
12756                 "PORT_NOP",
12757                 "PORT_DOWN",
12758                 "PORT_INIT",
12759                 "PORT_ARMED",
12760                 "PORT_ACTIVE",
12761                 "PORT_ACTIVE_DEFER",
12762         };
12763         if (lstate < ARRAY_SIZE(port_logical_names))
12764                 return port_logical_names[lstate];
12765         return "unknown";
12766 }
12767
12768 /* return the OPA port physical state name */
12769 const char *opa_pstate_name(u32 pstate)
12770 {
12771         static const char * const port_physical_names[] = {
12772                 "PHYS_NOP",
12773                 "reserved1",
12774                 "PHYS_POLL",
12775                 "PHYS_DISABLED",
12776                 "PHYS_TRAINING",
12777                 "PHYS_LINKUP",
12778                 "PHYS_LINK_ERR_RECOVER",
12779                 "PHYS_PHY_TEST",
12780                 "reserved8",
12781                 "PHYS_OFFLINE",
12782                 "PHYS_GANGED",
12783                 "PHYS_TEST",
12784         };
12785         if (pstate < ARRAY_SIZE(port_physical_names))
12786                 return port_physical_names[pstate];
12787         return "unknown";
12788 }
12789
12790 /**
12791  * update_statusp - Update userspace status flag
12792  * @ppd: Port data structure
12793  * @state: port state information
12794  *
12795  * Actual port status is determined by the host_link_state value
12796  * in the ppd.
12797  *
12798  * host_link_state MUST be updated before updating the user space
12799  * statusp.
12800  */
12801 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12802 {
12803         /*
12804          * Set port status flags in the page mapped into userspace
12805          * memory. Do it here to ensure a reliable state - this is
12806          * the only function called by all state handling code.
12807          * Always set the flags due to the fact that the cache value
12808          * might have been changed explicitly outside of this
12809          * function.
12810          */
12811         if (ppd->statusp) {
12812                 switch (state) {
12813                 case IB_PORT_DOWN:
12814                 case IB_PORT_INIT:
12815                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12816                                            HFI1_STATUS_IB_READY);
12817                         break;
12818                 case IB_PORT_ARMED:
12819                         *ppd->statusp |= HFI1_STATUS_IB_CONF;
12820                         break;
12821                 case IB_PORT_ACTIVE:
12822                         *ppd->statusp |= HFI1_STATUS_IB_READY;
12823                         break;
12824                 }
12825         }
12826         dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12827                     opa_lstate_name(state), state);
12828 }
12829
12830 /**
12831  * wait_logical_linkstate - wait for an IB link state change to occur
12832  * @ppd: port device
12833  * @state: the state to wait for
12834  * @msecs: the number of milliseconds to wait
12835  *
12836  * Wait up to msecs milliseconds for IB link state change to occur.
12837  * For now, take the easy polling route.
12838  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12839  */
12840 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12841                                   int msecs)
12842 {
12843         unsigned long timeout;
12844         u32 new_state;
12845
12846         timeout = jiffies + msecs_to_jiffies(msecs);
12847         while (1) {
12848                 new_state = chip_to_opa_lstate(ppd->dd,
12849                                                read_logical_state(ppd->dd));
12850                 if (new_state == state)
12851                         break;
12852                 if (time_after(jiffies, timeout)) {
12853                         dd_dev_err(ppd->dd,
12854                                    "timeout waiting for link state 0x%x\n",
12855                                    state);
12856                         return -ETIMEDOUT;
12857                 }
12858                 msleep(20);
12859         }
12860
12861         return 0;
12862 }
12863
12864 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12865 {
12866         u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12867
12868         dd_dev_info(ppd->dd,
12869                     "physical state changed to %s (0x%x), phy 0x%x\n",
12870                     opa_pstate_name(ib_pstate), ib_pstate, state);
12871 }
12872
12873 /*
12874  * Read the physical hardware link state and check if it matches host
12875  * drivers anticipated state.
12876  */
12877 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12878 {
12879         u32 read_state = read_physical_state(ppd->dd);
12880
12881         if (read_state == state) {
12882                 log_state_transition(ppd, state);
12883         } else {
12884                 dd_dev_err(ppd->dd,
12885                            "anticipated phy link state 0x%x, read 0x%x\n",
12886                            state, read_state);
12887         }
12888 }
12889
12890 /*
12891  * wait_physical_linkstate - wait for an physical link state change to occur
12892  * @ppd: port device
12893  * @state: the state to wait for
12894  * @msecs: the number of milliseconds to wait
12895  *
12896  * Wait up to msecs milliseconds for physical link state change to occur.
12897  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12898  */
12899 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12900                                    int msecs)
12901 {
12902         u32 read_state;
12903         unsigned long timeout;
12904
12905         timeout = jiffies + msecs_to_jiffies(msecs);
12906         while (1) {
12907                 read_state = read_physical_state(ppd->dd);
12908                 if (read_state == state)
12909                         break;
12910                 if (time_after(jiffies, timeout)) {
12911                         dd_dev_err(ppd->dd,
12912                                    "timeout waiting for phy link state 0x%x\n",
12913                                    state);
12914                         return -ETIMEDOUT;
12915                 }
12916                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12917         }
12918
12919         log_state_transition(ppd, state);
12920         return 0;
12921 }
12922
12923 /*
12924  * wait_phys_link_offline_quiet_substates - wait for any offline substate
12925  * @ppd: port device
12926  * @msecs: the number of milliseconds to wait
12927  *
12928  * Wait up to msecs milliseconds for any offline physical link
12929  * state change to occur.
12930  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12931  */
12932 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12933                                             int msecs)
12934 {
12935         u32 read_state;
12936         unsigned long timeout;
12937
12938         timeout = jiffies + msecs_to_jiffies(msecs);
12939         while (1) {
12940                 read_state = read_physical_state(ppd->dd);
12941                 if ((read_state & 0xF0) == PLS_OFFLINE)
12942                         break;
12943                 if (time_after(jiffies, timeout)) {
12944                         dd_dev_err(ppd->dd,
12945                                    "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12946                                    read_state, msecs);
12947                         return -ETIMEDOUT;
12948                 }
12949                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12950         }
12951
12952         log_state_transition(ppd, read_state);
12953         return read_state;
12954 }
12955
12956 /*
12957  * wait_phys_link_out_of_offline - wait for any out of offline state
12958  * @ppd: port device
12959  * @msecs: the number of milliseconds to wait
12960  *
12961  * Wait up to msecs milliseconds for any out of offline physical link
12962  * state change to occur.
12963  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12964  */
12965 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12966                                          int msecs)
12967 {
12968         u32 read_state;
12969         unsigned long timeout;
12970
12971         timeout = jiffies + msecs_to_jiffies(msecs);
12972         while (1) {
12973                 read_state = read_physical_state(ppd->dd);
12974                 if ((read_state & 0xF0) != PLS_OFFLINE)
12975                         break;
12976                 if (time_after(jiffies, timeout)) {
12977                         dd_dev_err(ppd->dd,
12978                                    "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12979                                    read_state, msecs);
12980                         return -ETIMEDOUT;
12981                 }
12982                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12983         }
12984
12985         log_state_transition(ppd, read_state);
12986         return read_state;
12987 }
12988
12989 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12990 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12991
12992 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12993 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12994
12995 void hfi1_init_ctxt(struct send_context *sc)
12996 {
12997         if (sc) {
12998                 struct hfi1_devdata *dd = sc->dd;
12999                 u64 reg;
13000                 u8 set = (sc->type == SC_USER ?
13001                           HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13002                           HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13003                 reg = read_kctxt_csr(dd, sc->hw_context,
13004                                      SEND_CTXT_CHECK_ENABLE);
13005                 if (set)
13006                         CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13007                 else
13008                         SET_STATIC_RATE_CONTROL_SMASK(reg);
13009                 write_kctxt_csr(dd, sc->hw_context,
13010                                 SEND_CTXT_CHECK_ENABLE, reg);
13011         }
13012 }
13013
13014 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13015 {
13016         int ret = 0;
13017         u64 reg;
13018
13019         if (dd->icode != ICODE_RTL_SILICON) {
13020                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13021                         dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13022                                     __func__);
13023                 return -EINVAL;
13024         }
13025         reg = read_csr(dd, ASIC_STS_THERM);
13026         temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13027                       ASIC_STS_THERM_CURR_TEMP_MASK);
13028         temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13029                         ASIC_STS_THERM_LO_TEMP_MASK);
13030         temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13031                         ASIC_STS_THERM_HI_TEMP_MASK);
13032         temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13033                           ASIC_STS_THERM_CRIT_TEMP_MASK);
13034         /* triggers is a 3-bit value - 1 bit per trigger. */
13035         temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13036
13037         return ret;
13038 }
13039
13040 /* ========================================================================= */
13041
13042 /**
13043  * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13044  * @dd: valid devdata
13045  * @src: IRQ source to determine register index from
13046  * @bits: the bits to set or clear
13047  * @set: true == set the bits, false == clear the bits
13048  *
13049  */
13050 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13051                            bool set)
13052 {
13053         u64 reg;
13054         u16 idx = src / BITS_PER_REGISTER;
13055
13056         spin_lock(&dd->irq_src_lock);
13057         reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13058         if (set)
13059                 reg |= bits;
13060         else
13061                 reg &= ~bits;
13062         write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13063         spin_unlock(&dd->irq_src_lock);
13064 }
13065
13066 /**
13067  * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13068  * @dd: valid devdata
13069  * @first: first IRQ source to set/clear
13070  * @last: last IRQ source (inclusive) to set/clear
13071  * @set: true == set the bits, false == clear the bits
13072  *
13073  * If first == last, set the exact source.
13074  */
13075 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13076 {
13077         u64 bits = 0;
13078         u64 bit;
13079         u16 src;
13080
13081         if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13082                 return -EINVAL;
13083
13084         if (last < first)
13085                 return -ERANGE;
13086
13087         for (src = first; src <= last; src++) {
13088                 bit = src % BITS_PER_REGISTER;
13089                 /* wrapped to next register? */
13090                 if (!bit && bits) {
13091                         read_mod_write(dd, src - 1, bits, set);
13092                         bits = 0;
13093                 }
13094                 bits |= BIT_ULL(bit);
13095         }
13096         read_mod_write(dd, last, bits, set);
13097
13098         return 0;
13099 }
13100
13101 /*
13102  * Clear all interrupt sources on the chip.
13103  */
13104 void clear_all_interrupts(struct hfi1_devdata *dd)
13105 {
13106         int i;
13107
13108         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13109                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13110
13111         write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13112         write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13113         write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13114         write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13115         write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13116         write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13117         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13118         for (i = 0; i < chip_send_contexts(dd); i++)
13119                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13120         for (i = 0; i < chip_sdma_engines(dd); i++)
13121                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13122
13123         write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13124         write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13125         write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13126 }
13127
13128 /*
13129  * Remap the interrupt source from the general handler to the given MSI-X
13130  * interrupt.
13131  */
13132 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13133 {
13134         u64 reg;
13135         int m, n;
13136
13137         /* clear from the handled mask of the general interrupt */
13138         m = isrc / 64;
13139         n = isrc % 64;
13140         if (likely(m < CCE_NUM_INT_CSRS)) {
13141                 dd->gi_mask[m] &= ~((u64)1 << n);
13142         } else {
13143                 dd_dev_err(dd, "remap interrupt err\n");
13144                 return;
13145         }
13146
13147         /* direct the chip source to the given MSI-X interrupt */
13148         m = isrc / 8;
13149         n = isrc % 8;
13150         reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13151         reg &= ~((u64)0xff << (8 * n));
13152         reg |= ((u64)msix_intr & 0xff) << (8 * n);
13153         write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13154 }
13155
13156 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13157 {
13158         /*
13159          * SDMA engine interrupt sources grouped by type, rather than
13160          * engine.  Per-engine interrupts are as follows:
13161          *      SDMA
13162          *      SDMAProgress
13163          *      SDMAIdle
13164          */
13165         remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13166         remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13167         remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13168 }
13169
13170 /*
13171  * Set the general handler to accept all interrupts, remap all
13172  * chip interrupts back to MSI-X 0.
13173  */
13174 void reset_interrupts(struct hfi1_devdata *dd)
13175 {
13176         int i;
13177
13178         /* all interrupts handled by the general handler */
13179         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13180                 dd->gi_mask[i] = ~(u64)0;
13181
13182         /* all chip interrupts map to MSI-X 0 */
13183         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13184                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13185 }
13186
13187 /**
13188  * set_up_interrupts() - Initialize the IRQ resources and state
13189  * @dd: valid devdata
13190  *
13191  */
13192 static int set_up_interrupts(struct hfi1_devdata *dd)
13193 {
13194         int ret;
13195
13196         /* mask all interrupts */
13197         set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13198
13199         /* clear all pending interrupts */
13200         clear_all_interrupts(dd);
13201
13202         /* reset general handler mask, chip MSI-X mappings */
13203         reset_interrupts(dd);
13204
13205         /* ask for MSI-X interrupts */
13206         ret = msix_initialize(dd);
13207         if (ret)
13208                 return ret;
13209
13210         ret = msix_request_irqs(dd);
13211         if (ret)
13212                 msix_clean_up_interrupts(dd);
13213
13214         return ret;
13215 }
13216
13217 /*
13218  * Set up context values in dd.  Sets:
13219  *
13220  *      num_rcv_contexts - number of contexts being used
13221  *      n_krcv_queues - number of kernel contexts
13222  *      first_dyn_alloc_ctxt - first dynamically allocated context
13223  *                             in array of contexts
13224  *      freectxts  - number of free user contexts
13225  *      num_send_contexts - number of PIO send contexts being used
13226  *      num_vnic_contexts - number of contexts reserved for VNIC
13227  */
13228 static int set_up_context_variables(struct hfi1_devdata *dd)
13229 {
13230         unsigned long num_kernel_contexts;
13231         u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13232         int total_contexts;
13233         int ret;
13234         unsigned ngroups;
13235         int rmt_count;
13236         int user_rmt_reduced;
13237         u32 n_usr_ctxts;
13238         u32 send_contexts = chip_send_contexts(dd);
13239         u32 rcv_contexts = chip_rcv_contexts(dd);
13240
13241         /*
13242          * Kernel receive contexts:
13243          * - Context 0 - control context (VL15/multicast/error)
13244          * - Context 1 - first kernel context
13245          * - Context 2 - second kernel context
13246          * ...
13247          */
13248         if (n_krcvqs)
13249                 /*
13250                  * n_krcvqs is the sum of module parameter kernel receive
13251                  * contexts, krcvqs[].  It does not include the control
13252                  * context, so add that.
13253                  */
13254                 num_kernel_contexts = n_krcvqs + 1;
13255         else
13256                 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13257         /*
13258          * Every kernel receive context needs an ACK send context.
13259          * one send context is allocated for each VL{0-7} and VL15
13260          */
13261         if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13262                 dd_dev_err(dd,
13263                            "Reducing # kernel rcv contexts to: %d, from %lu\n",
13264                            send_contexts - num_vls - 1,
13265                            num_kernel_contexts);
13266                 num_kernel_contexts = send_contexts - num_vls - 1;
13267         }
13268
13269         /* Accommodate VNIC contexts if possible */
13270         if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13271                 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13272                 num_vnic_contexts = 0;
13273         }
13274         total_contexts = num_kernel_contexts + num_vnic_contexts;
13275
13276         /*
13277          * User contexts:
13278          *      - default to 1 user context per real (non-HT) CPU core if
13279          *        num_user_contexts is negative
13280          */
13281         if (num_user_contexts < 0)
13282                 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13283         else
13284                 n_usr_ctxts = num_user_contexts;
13285         /*
13286          * Adjust the counts given a global max.
13287          */
13288         if (total_contexts + n_usr_ctxts > rcv_contexts) {
13289                 dd_dev_err(dd,
13290                            "Reducing # user receive contexts to: %d, from %u\n",
13291                            rcv_contexts - total_contexts,
13292                            n_usr_ctxts);
13293                 /* recalculate */
13294                 n_usr_ctxts = rcv_contexts - total_contexts;
13295         }
13296
13297         /*
13298          * The RMT entries are currently allocated as shown below:
13299          * 1. QOS (0 to 128 entries);
13300          * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
13301          * 3. VNIC (num_vnic_contexts).
13302          * It should be noted that PSM FECN oversubscribe num_vnic_contexts
13303          * entries of RMT because both VNIC and PSM could allocate any receive
13304          * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
13305          * and PSM FECN must reserve an RMT entry for each possible PSM receive
13306          * context.
13307          */
13308         rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
13309         if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13310                 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13311                 dd_dev_err(dd,
13312                            "RMT size is reducing the number of user receive contexts from %u to %d\n",
13313                            n_usr_ctxts,
13314                            user_rmt_reduced);
13315                 /* recalculate */
13316                 n_usr_ctxts = user_rmt_reduced;
13317         }
13318
13319         total_contexts += n_usr_ctxts;
13320
13321         /* the first N are kernel contexts, the rest are user/vnic contexts */
13322         dd->num_rcv_contexts = total_contexts;
13323         dd->n_krcv_queues = num_kernel_contexts;
13324         dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13325         dd->num_vnic_contexts = num_vnic_contexts;
13326         dd->num_user_contexts = n_usr_ctxts;
13327         dd->freectxts = n_usr_ctxts;
13328         dd_dev_info(dd,
13329                     "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13330                     rcv_contexts,
13331                     (int)dd->num_rcv_contexts,
13332                     (int)dd->n_krcv_queues,
13333                     dd->num_vnic_contexts,
13334                     dd->num_user_contexts);
13335
13336         /*
13337          * Receive array allocation:
13338          *   All RcvArray entries are divided into groups of 8. This
13339          *   is required by the hardware and will speed up writes to
13340          *   consecutive entries by using write-combining of the entire
13341          *   cacheline.
13342          *
13343          *   The number of groups are evenly divided among all contexts.
13344          *   any left over groups will be given to the first N user
13345          *   contexts.
13346          */
13347         dd->rcv_entries.group_size = RCV_INCREMENT;
13348         ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13349         dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13350         dd->rcv_entries.nctxt_extra = ngroups -
13351                 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13352         dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13353                     dd->rcv_entries.ngroups,
13354                     dd->rcv_entries.nctxt_extra);
13355         if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13356             MAX_EAGER_ENTRIES * 2) {
13357                 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13358                         dd->rcv_entries.group_size;
13359                 dd_dev_info(dd,
13360                             "RcvArray group count too high, change to %u\n",
13361                             dd->rcv_entries.ngroups);
13362                 dd->rcv_entries.nctxt_extra = 0;
13363         }
13364         /*
13365          * PIO send contexts
13366          */
13367         ret = init_sc_pools_and_sizes(dd);
13368         if (ret >= 0) { /* success */
13369                 dd->num_send_contexts = ret;
13370                 dd_dev_info(
13371                         dd,
13372                         "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13373                         send_contexts,
13374                         dd->num_send_contexts,
13375                         dd->sc_sizes[SC_KERNEL].count,
13376                         dd->sc_sizes[SC_ACK].count,
13377                         dd->sc_sizes[SC_USER].count,
13378                         dd->sc_sizes[SC_VL15].count);
13379                 ret = 0;        /* success */
13380         }
13381
13382         return ret;
13383 }
13384
13385 /*
13386  * Set the device/port partition key table. The MAD code
13387  * will ensure that, at least, the partial management
13388  * partition key is present in the table.
13389  */
13390 static void set_partition_keys(struct hfi1_pportdata *ppd)
13391 {
13392         struct hfi1_devdata *dd = ppd->dd;
13393         u64 reg = 0;
13394         int i;
13395
13396         dd_dev_info(dd, "Setting partition keys\n");
13397         for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13398                 reg |= (ppd->pkeys[i] &
13399                         RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13400                         ((i % 4) *
13401                          RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13402                 /* Each register holds 4 PKey values. */
13403                 if ((i % 4) == 3) {
13404                         write_csr(dd, RCV_PARTITION_KEY +
13405                                   ((i - 3) * 2), reg);
13406                         reg = 0;
13407                 }
13408         }
13409
13410         /* Always enable HW pkeys check when pkeys table is set */
13411         add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13412 }
13413
13414 /*
13415  * These CSRs and memories are uninitialized on reset and must be
13416  * written before reading to set the ECC/parity bits.
13417  *
13418  * NOTE: All user context CSRs that are not mmaped write-only
13419  * (e.g. the TID flows) must be initialized even if the driver never
13420  * reads them.
13421  */
13422 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13423 {
13424         int i, j;
13425
13426         /* CceIntMap */
13427         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13428                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13429
13430         /* SendCtxtCreditReturnAddr */
13431         for (i = 0; i < chip_send_contexts(dd); i++)
13432                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13433
13434         /* PIO Send buffers */
13435         /* SDMA Send buffers */
13436         /*
13437          * These are not normally read, and (presently) have no method
13438          * to be read, so are not pre-initialized
13439          */
13440
13441         /* RcvHdrAddr */
13442         /* RcvHdrTailAddr */
13443         /* RcvTidFlowTable */
13444         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13445                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13446                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13447                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13448                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13449         }
13450
13451         /* RcvArray */
13452         for (i = 0; i < chip_rcv_array_count(dd); i++)
13453                 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13454
13455         /* RcvQPMapTable */
13456         for (i = 0; i < 32; i++)
13457                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13458 }
13459
13460 /*
13461  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13462  */
13463 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13464                              u64 ctrl_bits)
13465 {
13466         unsigned long timeout;
13467         u64 reg;
13468
13469         /* is the condition present? */
13470         reg = read_csr(dd, CCE_STATUS);
13471         if ((reg & status_bits) == 0)
13472                 return;
13473
13474         /* clear the condition */
13475         write_csr(dd, CCE_CTRL, ctrl_bits);
13476
13477         /* wait for the condition to clear */
13478         timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13479         while (1) {
13480                 reg = read_csr(dd, CCE_STATUS);
13481                 if ((reg & status_bits) == 0)
13482                         return;
13483                 if (time_after(jiffies, timeout)) {
13484                         dd_dev_err(dd,
13485                                    "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13486                                    status_bits, reg & status_bits);
13487                         return;
13488                 }
13489                 udelay(1);
13490         }
13491 }
13492
13493 /* set CCE CSRs to chip reset defaults */
13494 static void reset_cce_csrs(struct hfi1_devdata *dd)
13495 {
13496         int i;
13497
13498         /* CCE_REVISION read-only */
13499         /* CCE_REVISION2 read-only */
13500         /* CCE_CTRL - bits clear automatically */
13501         /* CCE_STATUS read-only, use CceCtrl to clear */
13502         clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13503         clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13504         clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13505         for (i = 0; i < CCE_NUM_SCRATCH; i++)
13506                 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13507         /* CCE_ERR_STATUS read-only */
13508         write_csr(dd, CCE_ERR_MASK, 0);
13509         write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13510         /* CCE_ERR_FORCE leave alone */
13511         for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13512                 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13513         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13514         /* CCE_PCIE_CTRL leave alone */
13515         for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13516                 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13517                 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13518                           CCE_MSIX_TABLE_UPPER_RESETCSR);
13519         }
13520         for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13521                 /* CCE_MSIX_PBA read-only */
13522                 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13523                 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13524         }
13525         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13526                 write_csr(dd, CCE_INT_MAP, 0);
13527         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13528                 /* CCE_INT_STATUS read-only */
13529                 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13530                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13531                 /* CCE_INT_FORCE leave alone */
13532                 /* CCE_INT_BLOCKED read-only */
13533         }
13534         for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13535                 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13536 }
13537
13538 /* set MISC CSRs to chip reset defaults */
13539 static void reset_misc_csrs(struct hfi1_devdata *dd)
13540 {
13541         int i;
13542
13543         for (i = 0; i < 32; i++) {
13544                 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13545                 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13546                 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13547         }
13548         /*
13549          * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13550          * only be written 128-byte chunks
13551          */
13552         /* init RSA engine to clear lingering errors */
13553         write_csr(dd, MISC_CFG_RSA_CMD, 1);
13554         write_csr(dd, MISC_CFG_RSA_MU, 0);
13555         write_csr(dd, MISC_CFG_FW_CTRL, 0);
13556         /* MISC_STS_8051_DIGEST read-only */
13557         /* MISC_STS_SBM_DIGEST read-only */
13558         /* MISC_STS_PCIE_DIGEST read-only */
13559         /* MISC_STS_FAB_DIGEST read-only */
13560         /* MISC_ERR_STATUS read-only */
13561         write_csr(dd, MISC_ERR_MASK, 0);
13562         write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13563         /* MISC_ERR_FORCE leave alone */
13564 }
13565
13566 /* set TXE CSRs to chip reset defaults */
13567 static void reset_txe_csrs(struct hfi1_devdata *dd)
13568 {
13569         int i;
13570
13571         /*
13572          * TXE Kernel CSRs
13573          */
13574         write_csr(dd, SEND_CTRL, 0);
13575         __cm_reset(dd, 0);      /* reset CM internal state */
13576         /* SEND_CONTEXTS read-only */
13577         /* SEND_DMA_ENGINES read-only */
13578         /* SEND_PIO_MEM_SIZE read-only */
13579         /* SEND_DMA_MEM_SIZE read-only */
13580         write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13581         pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13582         /* SEND_PIO_ERR_STATUS read-only */
13583         write_csr(dd, SEND_PIO_ERR_MASK, 0);
13584         write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13585         /* SEND_PIO_ERR_FORCE leave alone */
13586         /* SEND_DMA_ERR_STATUS read-only */
13587         write_csr(dd, SEND_DMA_ERR_MASK, 0);
13588         write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13589         /* SEND_DMA_ERR_FORCE leave alone */
13590         /* SEND_EGRESS_ERR_STATUS read-only */
13591         write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13592         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13593         /* SEND_EGRESS_ERR_FORCE leave alone */
13594         write_csr(dd, SEND_BTH_QP, 0);
13595         write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13596         write_csr(dd, SEND_SC2VLT0, 0);
13597         write_csr(dd, SEND_SC2VLT1, 0);
13598         write_csr(dd, SEND_SC2VLT2, 0);
13599         write_csr(dd, SEND_SC2VLT3, 0);
13600         write_csr(dd, SEND_LEN_CHECK0, 0);
13601         write_csr(dd, SEND_LEN_CHECK1, 0);
13602         /* SEND_ERR_STATUS read-only */
13603         write_csr(dd, SEND_ERR_MASK, 0);
13604         write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13605         /* SEND_ERR_FORCE read-only */
13606         for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13607                 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13608         for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13609                 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13610         for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13611                 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13612         for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13613                 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13614         for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13615                 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13616         write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13617         write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13618         /* SEND_CM_CREDIT_USED_STATUS read-only */
13619         write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13620         write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13621         write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13622         write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13623         write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13624         for (i = 0; i < TXE_NUM_DATA_VL; i++)
13625                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13626         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13627         /* SEND_CM_CREDIT_USED_VL read-only */
13628         /* SEND_CM_CREDIT_USED_VL15 read-only */
13629         /* SEND_EGRESS_CTXT_STATUS read-only */
13630         /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13631         write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13632         /* SEND_EGRESS_ERR_INFO read-only */
13633         /* SEND_EGRESS_ERR_SOURCE read-only */
13634
13635         /*
13636          * TXE Per-Context CSRs
13637          */
13638         for (i = 0; i < chip_send_contexts(dd); i++) {
13639                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13640                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13641                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13642                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13643                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13644                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13645                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13646                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13647                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13648                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13649                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13650                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13651         }
13652
13653         /*
13654          * TXE Per-SDMA CSRs
13655          */
13656         for (i = 0; i < chip_sdma_engines(dd); i++) {
13657                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13658                 /* SEND_DMA_STATUS read-only */
13659                 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13660                 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13661                 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13662                 /* SEND_DMA_HEAD read-only */
13663                 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13664                 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13665                 /* SEND_DMA_IDLE_CNT read-only */
13666                 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13667                 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13668                 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13669                 /* SEND_DMA_ENG_ERR_STATUS read-only */
13670                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13671                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13672                 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13673                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13674                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13675                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13676                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13677                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13678                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13679                 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13680         }
13681 }
13682
13683 /*
13684  * Expect on entry:
13685  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13686  */
13687 static void init_rbufs(struct hfi1_devdata *dd)
13688 {
13689         u64 reg;
13690         int count;
13691
13692         /*
13693          * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13694          * clear.
13695          */
13696         count = 0;
13697         while (1) {
13698                 reg = read_csr(dd, RCV_STATUS);
13699                 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13700                             | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13701                         break;
13702                 /*
13703                  * Give up after 1ms - maximum wait time.
13704                  *
13705                  * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13706                  * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13707                  *      136 KB / (66% * 250MB/s) = 844us
13708                  */
13709                 if (count++ > 500) {
13710                         dd_dev_err(dd,
13711                                    "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13712                                    __func__, reg);
13713                         break;
13714                 }
13715                 udelay(2); /* do not busy-wait the CSR */
13716         }
13717
13718         /* start the init - expect RcvCtrl to be 0 */
13719         write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13720
13721         /*
13722          * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13723          * period after the write before RcvStatus.RxRbufInitDone is valid.
13724          * The delay in the first run through the loop below is sufficient and
13725          * required before the first read of RcvStatus.RxRbufInintDone.
13726          */
13727         read_csr(dd, RCV_CTRL);
13728
13729         /* wait for the init to finish */
13730         count = 0;
13731         while (1) {
13732                 /* delay is required first time through - see above */
13733                 udelay(2); /* do not busy-wait the CSR */
13734                 reg = read_csr(dd, RCV_STATUS);
13735                 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13736                         break;
13737
13738                 /* give up after 100us - slowest possible at 33MHz is 73us */
13739                 if (count++ > 50) {
13740                         dd_dev_err(dd,
13741                                    "%s: RcvStatus.RxRbufInit not set, continuing\n",
13742                                    __func__);
13743                         break;
13744                 }
13745         }
13746 }
13747
13748 /* set RXE CSRs to chip reset defaults */
13749 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13750 {
13751         int i, j;
13752
13753         /*
13754          * RXE Kernel CSRs
13755          */
13756         write_csr(dd, RCV_CTRL, 0);
13757         init_rbufs(dd);
13758         /* RCV_STATUS read-only */
13759         /* RCV_CONTEXTS read-only */
13760         /* RCV_ARRAY_CNT read-only */
13761         /* RCV_BUF_SIZE read-only */
13762         write_csr(dd, RCV_BTH_QP, 0);
13763         write_csr(dd, RCV_MULTICAST, 0);
13764         write_csr(dd, RCV_BYPASS, 0);
13765         write_csr(dd, RCV_VL15, 0);
13766         /* this is a clear-down */
13767         write_csr(dd, RCV_ERR_INFO,
13768                   RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13769         /* RCV_ERR_STATUS read-only */
13770         write_csr(dd, RCV_ERR_MASK, 0);
13771         write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13772         /* RCV_ERR_FORCE leave alone */
13773         for (i = 0; i < 32; i++)
13774                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13775         for (i = 0; i < 4; i++)
13776                 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13777         for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13778                 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13779         for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13780                 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13781         for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13782                 clear_rsm_rule(dd, i);
13783         for (i = 0; i < 32; i++)
13784                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13785
13786         /*
13787          * RXE Kernel and User Per-Context CSRs
13788          */
13789         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13790                 /* kernel */
13791                 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13792                 /* RCV_CTXT_STATUS read-only */
13793                 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13794                 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13795                 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13796                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13797                 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13798                 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13799                 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13800                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13801                 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13802                 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13803
13804                 /* user */
13805                 /* RCV_HDR_TAIL read-only */
13806                 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13807                 /* RCV_EGR_INDEX_TAIL read-only */
13808                 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13809                 /* RCV_EGR_OFFSET_TAIL read-only */
13810                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13811                         write_uctxt_csr(dd, i,
13812                                         RCV_TID_FLOW_TABLE + (8 * j), 0);
13813                 }
13814         }
13815 }
13816
13817 /*
13818  * Set sc2vl tables.
13819  *
13820  * They power on to zeros, so to avoid send context errors
13821  * they need to be set:
13822  *
13823  * SC 0-7 -> VL 0-7 (respectively)
13824  * SC 15  -> VL 15
13825  * otherwise
13826  *        -> VL 0
13827  */
13828 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13829 {
13830         int i;
13831         /* init per architecture spec, constrained by hardware capability */
13832
13833         /* HFI maps sent packets */
13834         write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13835                 0,
13836                 0, 0, 1, 1,
13837                 2, 2, 3, 3,
13838                 4, 4, 5, 5,
13839                 6, 6, 7, 7));
13840         write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13841                 1,
13842                 8, 0, 9, 0,
13843                 10, 0, 11, 0,
13844                 12, 0, 13, 0,
13845                 14, 0, 15, 15));
13846         write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13847                 2,
13848                 16, 0, 17, 0,
13849                 18, 0, 19, 0,
13850                 20, 0, 21, 0,
13851                 22, 0, 23, 0));
13852         write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13853                 3,
13854                 24, 0, 25, 0,
13855                 26, 0, 27, 0,
13856                 28, 0, 29, 0,
13857                 30, 0, 31, 0));
13858
13859         /* DC maps received packets */
13860         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13861                 15_0,
13862                 0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13863                 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13864         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13865                 31_16,
13866                 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13867                 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13868
13869         /* initialize the cached sc2vl values consistently with h/w */
13870         for (i = 0; i < 32; i++) {
13871                 if (i < 8 || i == 15)
13872                         *((u8 *)(dd->sc2vl) + i) = (u8)i;
13873                 else
13874                         *((u8 *)(dd->sc2vl) + i) = 0;
13875         }
13876 }
13877
13878 /*
13879  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13880  * depend on the chip going through a power-on reset - a driver may be loaded
13881  * and unloaded many times.
13882  *
13883  * Do not write any CSR values to the chip in this routine - there may be
13884  * a reset following the (possible) FLR in this routine.
13885  *
13886  */
13887 static int init_chip(struct hfi1_devdata *dd)
13888 {
13889         int i;
13890         int ret = 0;
13891
13892         /*
13893          * Put the HFI CSRs in a known state.
13894          * Combine this with a DC reset.
13895          *
13896          * Stop the device from doing anything while we do a
13897          * reset.  We know there are no other active users of
13898          * the device since we are now in charge.  Turn off
13899          * off all outbound and inbound traffic and make sure
13900          * the device does not generate any interrupts.
13901          */
13902
13903         /* disable send contexts and SDMA engines */
13904         write_csr(dd, SEND_CTRL, 0);
13905         for (i = 0; i < chip_send_contexts(dd); i++)
13906                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13907         for (i = 0; i < chip_sdma_engines(dd); i++)
13908                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13909         /* disable port (turn off RXE inbound traffic) and contexts */
13910         write_csr(dd, RCV_CTRL, 0);
13911         for (i = 0; i < chip_rcv_contexts(dd); i++)
13912                 write_csr(dd, RCV_CTXT_CTRL, 0);
13913         /* mask all interrupt sources */
13914         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13915                 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13916
13917         /*
13918          * DC Reset: do a full DC reset before the register clear.
13919          * A recommended length of time to hold is one CSR read,
13920          * so reread the CceDcCtrl.  Then, hold the DC in reset
13921          * across the clear.
13922          */
13923         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13924         (void)read_csr(dd, CCE_DC_CTRL);
13925
13926         if (use_flr) {
13927                 /*
13928                  * A FLR will reset the SPC core and part of the PCIe.
13929                  * The parts that need to be restored have already been
13930                  * saved.
13931                  */
13932                 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13933
13934                 /* do the FLR, the DC reset will remain */
13935                 pcie_flr(dd->pcidev);
13936
13937                 /* restore command and BARs */
13938                 ret = restore_pci_variables(dd);
13939                 if (ret) {
13940                         dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13941                                    __func__);
13942                         return ret;
13943                 }
13944
13945                 if (is_ax(dd)) {
13946                         dd_dev_info(dd, "Resetting CSRs with FLR\n");
13947                         pcie_flr(dd->pcidev);
13948                         ret = restore_pci_variables(dd);
13949                         if (ret) {
13950                                 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13951                                            __func__);
13952                                 return ret;
13953                         }
13954                 }
13955         } else {
13956                 dd_dev_info(dd, "Resetting CSRs with writes\n");
13957                 reset_cce_csrs(dd);
13958                 reset_txe_csrs(dd);
13959                 reset_rxe_csrs(dd);
13960                 reset_misc_csrs(dd);
13961         }
13962         /* clear the DC reset */
13963         write_csr(dd, CCE_DC_CTRL, 0);
13964
13965         /* Set the LED off */
13966         setextled(dd, 0);
13967
13968         /*
13969          * Clear the QSFP reset.
13970          * An FLR enforces a 0 on all out pins. The driver does not touch
13971          * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13972          * anything plugged constantly in reset, if it pays attention
13973          * to RESET_N.
13974          * Prime examples of this are optical cables. Set all pins high.
13975          * I2CCLK and I2CDAT will change per direction, and INT_N and
13976          * MODPRS_N are input only and their value is ignored.
13977          */
13978         write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13979         write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13980         init_chip_resources(dd);
13981         return ret;
13982 }
13983
13984 static void init_early_variables(struct hfi1_devdata *dd)
13985 {
13986         int i;
13987
13988         /* assign link credit variables */
13989         dd->vau = CM_VAU;
13990         dd->link_credits = CM_GLOBAL_CREDITS;
13991         if (is_ax(dd))
13992                 dd->link_credits--;
13993         dd->vcu = cu_to_vcu(hfi1_cu);
13994         /* enough room for 8 MAD packets plus header - 17K */
13995         dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13996         if (dd->vl15_init > dd->link_credits)
13997                 dd->vl15_init = dd->link_credits;
13998
13999         write_uninitialized_csrs_and_memories(dd);
14000
14001         if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14002                 for (i = 0; i < dd->num_pports; i++) {
14003                         struct hfi1_pportdata *ppd = &dd->pport[i];
14004
14005                         set_partition_keys(ppd);
14006                 }
14007         init_sc2vl_tables(dd);
14008 }
14009
14010 static void init_kdeth_qp(struct hfi1_devdata *dd)
14011 {
14012         /* user changed the KDETH_QP */
14013         if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14014                 /* out of range or illegal value */
14015                 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14016                 kdeth_qp = 0;
14017         }
14018         if (kdeth_qp == 0)      /* not set, or failed range check */
14019                 kdeth_qp = DEFAULT_KDETH_QP;
14020
14021         write_csr(dd, SEND_BTH_QP,
14022                   (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14023                   SEND_BTH_QP_KDETH_QP_SHIFT);
14024
14025         write_csr(dd, RCV_BTH_QP,
14026                   (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14027                   RCV_BTH_QP_KDETH_QP_SHIFT);
14028 }
14029
14030 /**
14031  * init_qpmap_table
14032  * @dd - device data
14033  * @first_ctxt - first context
14034  * @last_ctxt - first context
14035  *
14036  * This return sets the qpn mapping table that
14037  * is indexed by qpn[8:1].
14038  *
14039  * The routine will round robin the 256 settings
14040  * from first_ctxt to last_ctxt.
14041  *
14042  * The first/last looks ahead to having specialized
14043  * receive contexts for mgmt and bypass.  Normal
14044  * verbs traffic will assumed to be on a range
14045  * of receive contexts.
14046  */
14047 static void init_qpmap_table(struct hfi1_devdata *dd,
14048                              u32 first_ctxt,
14049                              u32 last_ctxt)
14050 {
14051         u64 reg = 0;
14052         u64 regno = RCV_QP_MAP_TABLE;
14053         int i;
14054         u64 ctxt = first_ctxt;
14055
14056         for (i = 0; i < 256; i++) {
14057                 reg |= ctxt << (8 * (i % 8));
14058                 ctxt++;
14059                 if (ctxt > last_ctxt)
14060                         ctxt = first_ctxt;
14061                 if (i % 8 == 7) {
14062                         write_csr(dd, regno, reg);
14063                         reg = 0;
14064                         regno += 8;
14065                 }
14066         }
14067
14068         add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14069                         | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14070 }
14071
14072 struct rsm_map_table {
14073         u64 map[NUM_MAP_REGS];
14074         unsigned int used;
14075 };
14076
14077 struct rsm_rule_data {
14078         u8 offset;
14079         u8 pkt_type;
14080         u32 field1_off;
14081         u32 field2_off;
14082         u32 index1_off;
14083         u32 index1_width;
14084         u32 index2_off;
14085         u32 index2_width;
14086         u32 mask1;
14087         u32 value1;
14088         u32 mask2;
14089         u32 value2;
14090 };
14091
14092 /*
14093  * Return an initialized RMT map table for users to fill in.  OK if it
14094  * returns NULL, indicating no table.
14095  */
14096 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14097 {
14098         struct rsm_map_table *rmt;
14099         u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14100
14101         rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14102         if (rmt) {
14103                 memset(rmt->map, rxcontext, sizeof(rmt->map));
14104                 rmt->used = 0;
14105         }
14106
14107         return rmt;
14108 }
14109
14110 /*
14111  * Write the final RMT map table to the chip and free the table.  OK if
14112  * table is NULL.
14113  */
14114 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14115                                    struct rsm_map_table *rmt)
14116 {
14117         int i;
14118
14119         if (rmt) {
14120                 /* write table to chip */
14121                 for (i = 0; i < NUM_MAP_REGS; i++)
14122                         write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14123
14124                 /* enable RSM */
14125                 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14126         }
14127 }
14128
14129 /*
14130  * Add a receive side mapping rule.
14131  */
14132 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14133                          struct rsm_rule_data *rrd)
14134 {
14135         write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14136                   (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14137                   1ull << rule_index | /* enable bit */
14138                   (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14139         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14140                   (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14141                   (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14142                   (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14143                   (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14144                   (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14145                   (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14146         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14147                   (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14148                   (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14149                   (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14150                   (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14151 }
14152
14153 /*
14154  * Clear a receive side mapping rule.
14155  */
14156 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14157 {
14158         write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14159         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14160         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14161 }
14162
14163 /* return the number of RSM map table entries that will be used for QOS */
14164 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14165                            unsigned int *np)
14166 {
14167         int i;
14168         unsigned int m, n;
14169         u8 max_by_vl = 0;
14170
14171         /* is QOS active at all? */
14172         if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14173             num_vls == 1 ||
14174             krcvqsset <= 1)
14175                 goto no_qos;
14176
14177         /* determine bits for qpn */
14178         for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14179                 if (krcvqs[i] > max_by_vl)
14180                         max_by_vl = krcvqs[i];
14181         if (max_by_vl > 32)
14182                 goto no_qos;
14183         m = ilog2(__roundup_pow_of_two(max_by_vl));
14184
14185         /* determine bits for vl */
14186         n = ilog2(__roundup_pow_of_two(num_vls));
14187
14188         /* reject if too much is used */
14189         if ((m + n) > 7)
14190                 goto no_qos;
14191
14192         if (mp)
14193                 *mp = m;
14194         if (np)
14195                 *np = n;
14196
14197         return 1 << (m + n);
14198
14199 no_qos:
14200         if (mp)
14201                 *mp = 0;
14202         if (np)
14203                 *np = 0;
14204         return 0;
14205 }
14206
14207 /**
14208  * init_qos - init RX qos
14209  * @dd - device data
14210  * @rmt - RSM map table
14211  *
14212  * This routine initializes Rule 0 and the RSM map table to implement
14213  * quality of service (qos).
14214  *
14215  * If all of the limit tests succeed, qos is applied based on the array
14216  * interpretation of krcvqs where entry 0 is VL0.
14217  *
14218  * The number of vl bits (n) and the number of qpn bits (m) are computed to
14219  * feed both the RSM map table and the single rule.
14220  */
14221 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14222 {
14223         struct rsm_rule_data rrd;
14224         unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14225         unsigned int rmt_entries;
14226         u64 reg;
14227
14228         if (!rmt)
14229                 goto bail;
14230         rmt_entries = qos_rmt_entries(dd, &m, &n);
14231         if (rmt_entries == 0)
14232                 goto bail;
14233         qpns_per_vl = 1 << m;
14234
14235         /* enough room in the map table? */
14236         rmt_entries = 1 << (m + n);
14237         if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14238                 goto bail;
14239
14240         /* add qos entries to the the RSM map table */
14241         for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14242                 unsigned tctxt;
14243
14244                 for (qpn = 0, tctxt = ctxt;
14245                      krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14246                         unsigned idx, regoff, regidx;
14247
14248                         /* generate the index the hardware will produce */
14249                         idx = rmt->used + ((qpn << n) ^ i);
14250                         regoff = (idx % 8) * 8;
14251                         regidx = idx / 8;
14252                         /* replace default with context number */
14253                         reg = rmt->map[regidx];
14254                         reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14255                                 << regoff);
14256                         reg |= (u64)(tctxt++) << regoff;
14257                         rmt->map[regidx] = reg;
14258                         if (tctxt == ctxt + krcvqs[i])
14259                                 tctxt = ctxt;
14260                 }
14261                 ctxt += krcvqs[i];
14262         }
14263
14264         rrd.offset = rmt->used;
14265         rrd.pkt_type = 2;
14266         rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14267         rrd.field2_off = LRH_SC_MATCH_OFFSET;
14268         rrd.index1_off = LRH_SC_SELECT_OFFSET;
14269         rrd.index1_width = n;
14270         rrd.index2_off = QPN_SELECT_OFFSET;
14271         rrd.index2_width = m + n;
14272         rrd.mask1 = LRH_BTH_MASK;
14273         rrd.value1 = LRH_BTH_VALUE;
14274         rrd.mask2 = LRH_SC_MASK;
14275         rrd.value2 = LRH_SC_VALUE;
14276
14277         /* add rule 0 */
14278         add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14279
14280         /* mark RSM map entries as used */
14281         rmt->used += rmt_entries;
14282         /* map everything else to the mcast/err/vl15 context */
14283         init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14284         dd->qos_shift = n + 1;
14285         return;
14286 bail:
14287         dd->qos_shift = 1;
14288         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14289 }
14290
14291 static void init_user_fecn_handling(struct hfi1_devdata *dd,
14292                                     struct rsm_map_table *rmt)
14293 {
14294         struct rsm_rule_data rrd;
14295         u64 reg;
14296         int i, idx, regoff, regidx;
14297         u8 offset;
14298         u32 total_cnt;
14299
14300         /* there needs to be enough room in the map table */
14301         total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
14302         if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14303                 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14304                 return;
14305         }
14306
14307         /*
14308          * RSM will extract the destination context as an index into the
14309          * map table.  The destination contexts are a sequential block
14310          * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14311          * Map entries are accessed as offset + extracted value.  Adjust
14312          * the added offset so this sequence can be placed anywhere in
14313          * the table - as long as the entries themselves do not wrap.
14314          * There are only enough bits in offset for the table size, so
14315          * start with that to allow for a "negative" offset.
14316          */
14317         offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14318                                                 (int)dd->first_dyn_alloc_ctxt);
14319
14320         for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14321                                 i < dd->num_rcv_contexts; i++, idx++) {
14322                 /* replace with identity mapping */
14323                 regoff = (idx % 8) * 8;
14324                 regidx = idx / 8;
14325                 reg = rmt->map[regidx];
14326                 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14327                 reg |= (u64)i << regoff;
14328                 rmt->map[regidx] = reg;
14329         }
14330
14331         /*
14332          * For RSM intercept of Expected FECN packets:
14333          * o packet type 0 - expected
14334          * o match on F (bit 95), using select/match 1, and
14335          * o match on SH (bit 133), using select/match 2.
14336          *
14337          * Use index 1 to extract the 8-bit receive context from DestQP
14338          * (start at bit 64).  Use that as the RSM map table index.
14339          */
14340         rrd.offset = offset;
14341         rrd.pkt_type = 0;
14342         rrd.field1_off = 95;
14343         rrd.field2_off = 133;
14344         rrd.index1_off = 64;
14345         rrd.index1_width = 8;
14346         rrd.index2_off = 0;
14347         rrd.index2_width = 0;
14348         rrd.mask1 = 1;
14349         rrd.value1 = 1;
14350         rrd.mask2 = 1;
14351         rrd.value2 = 1;
14352
14353         /* add rule 1 */
14354         add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14355
14356         rmt->used += total_cnt;
14357 }
14358
14359 /* Initialize RSM for VNIC */
14360 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14361 {
14362         u8 i, j;
14363         u8 ctx_id = 0;
14364         u64 reg;
14365         u32 regoff;
14366         struct rsm_rule_data rrd;
14367
14368         if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14369                 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14370                            dd->vnic.rmt_start);
14371                 return;
14372         }
14373
14374         dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14375                 dd->vnic.rmt_start,
14376                 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14377
14378         /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14379         regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14380         reg = read_csr(dd, regoff);
14381         for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14382                 /* Update map register with vnic context */
14383                 j = (dd->vnic.rmt_start + i) % 8;
14384                 reg &= ~(0xffllu << (j * 8));
14385                 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14386                 /* Wrap up vnic ctx index */
14387                 ctx_id %= dd->vnic.num_ctxt;
14388                 /* Write back map register */
14389                 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14390                         dev_dbg(&(dd)->pcidev->dev,
14391                                 "Vnic rsm map reg[%d] =0x%llx\n",
14392                                 regoff - RCV_RSM_MAP_TABLE, reg);
14393
14394                         write_csr(dd, regoff, reg);
14395                         regoff += 8;
14396                         if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14397                                 reg = read_csr(dd, regoff);
14398                 }
14399         }
14400
14401         /* Add rule for vnic */
14402         rrd.offset = dd->vnic.rmt_start;
14403         rrd.pkt_type = 4;
14404         /* Match 16B packets */
14405         rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14406         rrd.mask1 = L2_TYPE_MASK;
14407         rrd.value1 = L2_16B_VALUE;
14408         /* Match ETH L4 packets */
14409         rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14410         rrd.mask2 = L4_16B_TYPE_MASK;
14411         rrd.value2 = L4_16B_ETH_VALUE;
14412         /* Calc context from veswid and entropy */
14413         rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14414         rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14415         rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14416         rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14417         add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14418
14419         /* Enable RSM if not already enabled */
14420         add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14421 }
14422
14423 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14424 {
14425         clear_rsm_rule(dd, RSM_INS_VNIC);
14426
14427         /* Disable RSM if used only by vnic */
14428         if (dd->vnic.rmt_start == 0)
14429                 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14430 }
14431
14432 static void init_rxe(struct hfi1_devdata *dd)
14433 {
14434         struct rsm_map_table *rmt;
14435         u64 val;
14436
14437         /* enable all receive errors */
14438         write_csr(dd, RCV_ERR_MASK, ~0ull);
14439
14440         rmt = alloc_rsm_map_table(dd);
14441         /* set up QOS, including the QPN map table */
14442         init_qos(dd, rmt);
14443         init_user_fecn_handling(dd, rmt);
14444         complete_rsm_map_table(dd, rmt);
14445         /* record number of used rsm map entries for vnic */
14446         dd->vnic.rmt_start = rmt->used;
14447         kfree(rmt);
14448
14449         /*
14450          * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14451          * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14452          * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14453          * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14454          * Max_PayLoad_Size set to its minimum of 128.
14455          *
14456          * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14457          * (64 bytes).  Max_Payload_Size is possibly modified upward in
14458          * tune_pcie_caps() which is called after this routine.
14459          */
14460
14461         /* Have 16 bytes (4DW) of bypass header available in header queue */
14462         val = read_csr(dd, RCV_BYPASS);
14463         val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14464         val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14465                 RCV_BYPASS_HDR_SIZE_SHIFT);
14466         write_csr(dd, RCV_BYPASS, val);
14467 }
14468
14469 static void init_other(struct hfi1_devdata *dd)
14470 {
14471         /* enable all CCE errors */
14472         write_csr(dd, CCE_ERR_MASK, ~0ull);
14473         /* enable *some* Misc errors */
14474         write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14475         /* enable all DC errors, except LCB */
14476         write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14477         write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14478 }
14479
14480 /*
14481  * Fill out the given AU table using the given CU.  A CU is defined in terms
14482  * AUs.  The table is a an encoding: given the index, how many AUs does that
14483  * represent?
14484  *
14485  * NOTE: Assumes that the register layout is the same for the
14486  * local and remote tables.
14487  */
14488 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14489                                u32 csr0to3, u32 csr4to7)
14490 {
14491         write_csr(dd, csr0to3,
14492                   0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14493                   1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14494                   2ull * cu <<
14495                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14496                   4ull * cu <<
14497                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14498         write_csr(dd, csr4to7,
14499                   8ull * cu <<
14500                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14501                   16ull * cu <<
14502                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14503                   32ull * cu <<
14504                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14505                   64ull * cu <<
14506                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14507 }
14508
14509 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14510 {
14511         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14512                            SEND_CM_LOCAL_AU_TABLE4_TO7);
14513 }
14514
14515 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14516 {
14517         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14518                            SEND_CM_REMOTE_AU_TABLE4_TO7);
14519 }
14520
14521 static void init_txe(struct hfi1_devdata *dd)
14522 {
14523         int i;
14524
14525         /* enable all PIO, SDMA, general, and Egress errors */
14526         write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14527         write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14528         write_csr(dd, SEND_ERR_MASK, ~0ull);
14529         write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14530
14531         /* enable all per-context and per-SDMA engine errors */
14532         for (i = 0; i < chip_send_contexts(dd); i++)
14533                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14534         for (i = 0; i < chip_sdma_engines(dd); i++)
14535                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14536
14537         /* set the local CU to AU mapping */
14538         assign_local_cm_au_table(dd, dd->vcu);
14539
14540         /*
14541          * Set reasonable default for Credit Return Timer
14542          * Don't set on Simulator - causes it to choke.
14543          */
14544         if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14545                 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14546 }
14547
14548 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14549                        u16 jkey)
14550 {
14551         u8 hw_ctxt;
14552         u64 reg;
14553
14554         if (!rcd || !rcd->sc)
14555                 return -EINVAL;
14556
14557         hw_ctxt = rcd->sc->hw_context;
14558         reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14559                 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14560                  SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14561         /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14562         if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14563                 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14564         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14565         /*
14566          * Enable send-side J_KEY integrity check, unless this is A0 h/w
14567          */
14568         if (!is_ax(dd)) {
14569                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14570                 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14571                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14572         }
14573
14574         /* Enable J_KEY check on receive context. */
14575         reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14576                 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14577                  RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14578         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14579
14580         return 0;
14581 }
14582
14583 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14584 {
14585         u8 hw_ctxt;
14586         u64 reg;
14587
14588         if (!rcd || !rcd->sc)
14589                 return -EINVAL;
14590
14591         hw_ctxt = rcd->sc->hw_context;
14592         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14593         /*
14594          * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14595          * This check would not have been enabled for A0 h/w, see
14596          * set_ctxt_jkey().
14597          */
14598         if (!is_ax(dd)) {
14599                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14600                 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14601                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14602         }
14603         /* Turn off the J_KEY on the receive side */
14604         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14605
14606         return 0;
14607 }
14608
14609 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14610                        u16 pkey)
14611 {
14612         u8 hw_ctxt;
14613         u64 reg;
14614
14615         if (!rcd || !rcd->sc)
14616                 return -EINVAL;
14617
14618         hw_ctxt = rcd->sc->hw_context;
14619         reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14620                 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14621         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14622         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14623         reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14624         reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14625         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14626
14627         return 0;
14628 }
14629
14630 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14631 {
14632         u8 hw_ctxt;
14633         u64 reg;
14634
14635         if (!ctxt || !ctxt->sc)
14636                 return -EINVAL;
14637
14638         hw_ctxt = ctxt->sc->hw_context;
14639         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14640         reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14641         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14642         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14643
14644         return 0;
14645 }
14646
14647 /*
14648  * Start doing the clean up the the chip. Our clean up happens in multiple
14649  * stages and this is just the first.
14650  */
14651 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14652 {
14653         aspm_exit(dd);
14654         free_cntrs(dd);
14655         free_rcverr(dd);
14656         finish_chip_resources(dd);
14657 }
14658
14659 #define HFI_BASE_GUID(dev) \
14660         ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14661
14662 /*
14663  * Information can be shared between the two HFIs on the same ASIC
14664  * in the same OS.  This function finds the peer device and sets
14665  * up a shared structure.
14666  */
14667 static int init_asic_data(struct hfi1_devdata *dd)
14668 {
14669         unsigned long flags;
14670         struct hfi1_devdata *tmp, *peer = NULL;
14671         struct hfi1_asic_data *asic_data;
14672         int ret = 0;
14673
14674         /* pre-allocate the asic structure in case we are the first device */
14675         asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14676         if (!asic_data)
14677                 return -ENOMEM;
14678
14679         spin_lock_irqsave(&hfi1_devs_lock, flags);
14680         /* Find our peer device */
14681         list_for_each_entry(tmp, &hfi1_dev_list, list) {
14682                 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14683                     dd->unit != tmp->unit) {
14684                         peer = tmp;
14685                         break;
14686                 }
14687         }
14688
14689         if (peer) {
14690                 /* use already allocated structure */
14691                 dd->asic_data = peer->asic_data;
14692                 kfree(asic_data);
14693         } else {
14694                 dd->asic_data = asic_data;
14695                 mutex_init(&dd->asic_data->asic_resource_mutex);
14696         }
14697         dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14698         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14699
14700         /* first one through - set up i2c devices */
14701         if (!peer)
14702                 ret = set_up_i2c(dd, dd->asic_data);
14703
14704         return ret;
14705 }
14706
14707 /*
14708  * Set dd->boardname.  Use a generic name if a name is not returned from
14709  * EFI variable space.
14710  *
14711  * Return 0 on success, -ENOMEM if space could not be allocated.
14712  */
14713 static int obtain_boardname(struct hfi1_devdata *dd)
14714 {
14715         /* generic board description */
14716         const char generic[] =
14717                 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14718         unsigned long size;
14719         int ret;
14720
14721         ret = read_hfi1_efi_var(dd, "description", &size,
14722                                 (void **)&dd->boardname);
14723         if (ret) {
14724                 dd_dev_info(dd, "Board description not found\n");
14725                 /* use generic description */
14726                 dd->boardname = kstrdup(generic, GFP_KERNEL);
14727                 if (!dd->boardname)
14728                         return -ENOMEM;
14729         }
14730         return 0;
14731 }
14732
14733 /*
14734  * Check the interrupt registers to make sure that they are mapped correctly.
14735  * It is intended to help user identify any mismapping by VMM when the driver
14736  * is running in a VM. This function should only be called before interrupt
14737  * is set up properly.
14738  *
14739  * Return 0 on success, -EINVAL on failure.
14740  */
14741 static int check_int_registers(struct hfi1_devdata *dd)
14742 {
14743         u64 reg;
14744         u64 all_bits = ~(u64)0;
14745         u64 mask;
14746
14747         /* Clear CceIntMask[0] to avoid raising any interrupts */
14748         mask = read_csr(dd, CCE_INT_MASK);
14749         write_csr(dd, CCE_INT_MASK, 0ull);
14750         reg = read_csr(dd, CCE_INT_MASK);
14751         if (reg)
14752                 goto err_exit;
14753
14754         /* Clear all interrupt status bits */
14755         write_csr(dd, CCE_INT_CLEAR, all_bits);
14756         reg = read_csr(dd, CCE_INT_STATUS);
14757         if (reg)
14758                 goto err_exit;
14759
14760         /* Set all interrupt status bits */
14761         write_csr(dd, CCE_INT_FORCE, all_bits);
14762         reg = read_csr(dd, CCE_INT_STATUS);
14763         if (reg != all_bits)
14764                 goto err_exit;
14765
14766         /* Restore the interrupt mask */
14767         write_csr(dd, CCE_INT_CLEAR, all_bits);
14768         write_csr(dd, CCE_INT_MASK, mask);
14769
14770         return 0;
14771 err_exit:
14772         write_csr(dd, CCE_INT_MASK, mask);
14773         dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14774         return -EINVAL;
14775 }
14776
14777 /**
14778  * hfi1_init_dd() - Initialize most of the dd structure.
14779  * @dev: the pci_dev for hfi1_ib device
14780  * @ent: pci_device_id struct for this dev
14781  *
14782  * This is global, and is called directly at init to set up the
14783  * chip-specific function pointers for later use.
14784  */
14785 int hfi1_init_dd(struct hfi1_devdata *dd)
14786 {
14787         struct pci_dev *pdev = dd->pcidev;
14788         struct hfi1_pportdata *ppd;
14789         u64 reg;
14790         int i, ret;
14791         static const char * const inames[] = { /* implementation names */
14792                 "RTL silicon",
14793                 "RTL VCS simulation",
14794                 "RTL FPGA emulation",
14795                 "Functional simulator"
14796         };
14797         struct pci_dev *parent = pdev->bus->self;
14798         u32 sdma_engines = chip_sdma_engines(dd);
14799
14800         ppd = dd->pport;
14801         for (i = 0; i < dd->num_pports; i++, ppd++) {
14802                 int vl;
14803                 /* init common fields */
14804                 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14805                 /* DC supports 4 link widths */
14806                 ppd->link_width_supported =
14807                         OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14808                         OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14809                 ppd->link_width_downgrade_supported =
14810                         ppd->link_width_supported;
14811                 /* start out enabling only 4X */
14812                 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14813                 ppd->link_width_downgrade_enabled =
14814                                         ppd->link_width_downgrade_supported;
14815                 /* link width active is 0 when link is down */
14816                 /* link width downgrade active is 0 when link is down */
14817
14818                 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14819                     num_vls > HFI1_MAX_VLS_SUPPORTED) {
14820                         dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
14821                                    num_vls, HFI1_MAX_VLS_SUPPORTED);
14822                         num_vls = HFI1_MAX_VLS_SUPPORTED;
14823                 }
14824                 ppd->vls_supported = num_vls;
14825                 ppd->vls_operational = ppd->vls_supported;
14826                 /* Set the default MTU. */
14827                 for (vl = 0; vl < num_vls; vl++)
14828                         dd->vld[vl].mtu = hfi1_max_mtu;
14829                 dd->vld[15].mtu = MAX_MAD_PACKET;
14830                 /*
14831                  * Set the initial values to reasonable default, will be set
14832                  * for real when link is up.
14833                  */
14834                 ppd->overrun_threshold = 0x4;
14835                 ppd->phy_error_threshold = 0xf;
14836                 ppd->port_crc_mode_enabled = link_crc_mask;
14837                 /* initialize supported LTP CRC mode */
14838                 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14839                 /* initialize enabled LTP CRC mode */
14840                 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14841                 /* start in offline */
14842                 ppd->host_link_state = HLS_DN_OFFLINE;
14843                 init_vl_arb_caches(ppd);
14844         }
14845
14846         /*
14847          * Do remaining PCIe setup and save PCIe values in dd.
14848          * Any error printing is already done by the init code.
14849          * On return, we have the chip mapped.
14850          */
14851         ret = hfi1_pcie_ddinit(dd, pdev);
14852         if (ret < 0)
14853                 goto bail_free;
14854
14855         /* Save PCI space registers to rewrite after device reset */
14856         ret = save_pci_variables(dd);
14857         if (ret < 0)
14858                 goto bail_cleanup;
14859
14860         dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14861                         & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14862         dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14863                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
14864
14865         /*
14866          * Check interrupt registers mapping if the driver has no access to
14867          * the upstream component. In this case, it is likely that the driver
14868          * is running in a VM.
14869          */
14870         if (!parent) {
14871                 ret = check_int_registers(dd);
14872                 if (ret)
14873                         goto bail_cleanup;
14874         }
14875
14876         /*
14877          * obtain the hardware ID - NOT related to unit, which is a
14878          * software enumeration
14879          */
14880         reg = read_csr(dd, CCE_REVISION2);
14881         dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14882                                         & CCE_REVISION2_HFI_ID_MASK;
14883         /* the variable size will remove unwanted bits */
14884         dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14885         dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14886         dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14887                     dd->icode < ARRAY_SIZE(inames) ?
14888                     inames[dd->icode] : "unknown", (int)dd->irev);
14889
14890         /* speeds the hardware can support */
14891         dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14892         /* speeds allowed to run at */
14893         dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14894         /* give a reasonable active value, will be set on link up */
14895         dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14896
14897         /* fix up link widths for emulation _p */
14898         ppd = dd->pport;
14899         if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14900                 ppd->link_width_supported =
14901                         ppd->link_width_enabled =
14902                         ppd->link_width_downgrade_supported =
14903                         ppd->link_width_downgrade_enabled =
14904                                 OPA_LINK_WIDTH_1X;
14905         }
14906         /* insure num_vls isn't larger than number of sdma engines */
14907         if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
14908                 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14909                            num_vls, sdma_engines);
14910                 num_vls = sdma_engines;
14911                 ppd->vls_supported = sdma_engines;
14912                 ppd->vls_operational = ppd->vls_supported;
14913         }
14914
14915         /*
14916          * Convert the ns parameter to the 64 * cclocks used in the CSR.
14917          * Limit the max if larger than the field holds.  If timeout is
14918          * non-zero, then the calculated field will be at least 1.
14919          *
14920          * Must be after icode is set up - the cclock rate depends
14921          * on knowing the hardware being used.
14922          */
14923         dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14924         if (dd->rcv_intr_timeout_csr >
14925                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14926                 dd->rcv_intr_timeout_csr =
14927                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14928         else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14929                 dd->rcv_intr_timeout_csr = 1;
14930
14931         /* needs to be done before we look for the peer device */
14932         read_guid(dd);
14933
14934         /* set up shared ASIC data with peer device */
14935         ret = init_asic_data(dd);
14936         if (ret)
14937                 goto bail_cleanup;
14938
14939         /* obtain chip sizes, reset chip CSRs */
14940         ret = init_chip(dd);
14941         if (ret)
14942                 goto bail_cleanup;
14943
14944         /* read in the PCIe link speed information */
14945         ret = pcie_speeds(dd);
14946         if (ret)
14947                 goto bail_cleanup;
14948
14949         /* call before get_platform_config(), after init_chip_resources() */
14950         ret = eprom_init(dd);
14951         if (ret)
14952                 goto bail_free_rcverr;
14953
14954         /* Needs to be called before hfi1_firmware_init */
14955         get_platform_config(dd);
14956
14957         /* read in firmware */
14958         ret = hfi1_firmware_init(dd);
14959         if (ret)
14960                 goto bail_cleanup;
14961
14962         /*
14963          * In general, the PCIe Gen3 transition must occur after the
14964          * chip has been idled (so it won't initiate any PCIe transactions
14965          * e.g. an interrupt) and before the driver changes any registers
14966          * (the transition will reset the registers).
14967          *
14968          * In particular, place this call after:
14969          * - init_chip()     - the chip will not initiate any PCIe transactions
14970          * - pcie_speeds()   - reads the current link speed
14971          * - hfi1_firmware_init() - the needed firmware is ready to be
14972          *                          downloaded
14973          */
14974         ret = do_pcie_gen3_transition(dd);
14975         if (ret)
14976                 goto bail_cleanup;
14977
14978         /*
14979          * This should probably occur in hfi1_pcie_init(), but historically
14980          * occurs after the do_pcie_gen3_transition() code.
14981          */
14982         tune_pcie_caps(dd);
14983
14984         /* start setting dd values and adjusting CSRs */
14985         init_early_variables(dd);
14986
14987         parse_platform_config(dd);
14988
14989         ret = obtain_boardname(dd);
14990         if (ret)
14991                 goto bail_cleanup;
14992
14993         snprintf(dd->boardversion, BOARD_VERS_MAX,
14994                  "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14995                  HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14996                  (u32)dd->majrev,
14997                  (u32)dd->minrev,
14998                  (dd->revision >> CCE_REVISION_SW_SHIFT)
14999                     & CCE_REVISION_SW_MASK);
15000
15001         ret = set_up_context_variables(dd);
15002         if (ret)
15003                 goto bail_cleanup;
15004
15005         /* set initial RXE CSRs */
15006         init_rxe(dd);
15007         /* set initial TXE CSRs */
15008         init_txe(dd);
15009         /* set initial non-RXE, non-TXE CSRs */
15010         init_other(dd);
15011         /* set up KDETH QP prefix in both RX and TX CSRs */
15012         init_kdeth_qp(dd);
15013
15014         ret = hfi1_dev_affinity_init(dd);
15015         if (ret)
15016                 goto bail_cleanup;
15017
15018         /* send contexts must be set up before receive contexts */
15019         ret = init_send_contexts(dd);
15020         if (ret)
15021                 goto bail_cleanup;
15022
15023         ret = hfi1_create_kctxts(dd);
15024         if (ret)
15025                 goto bail_cleanup;
15026
15027         /*
15028          * Initialize aspm, to be done after gen3 transition and setting up
15029          * contexts and before enabling interrupts
15030          */
15031         aspm_init(dd);
15032
15033         ret = init_pervl_scs(dd);
15034         if (ret)
15035                 goto bail_cleanup;
15036
15037         /* sdma init */
15038         for (i = 0; i < dd->num_pports; ++i) {
15039                 ret = sdma_init(dd, i);
15040                 if (ret)
15041                         goto bail_cleanup;
15042         }
15043
15044         /* use contexts created by hfi1_create_kctxts */
15045         ret = set_up_interrupts(dd);
15046         if (ret)
15047                 goto bail_cleanup;
15048
15049         ret = hfi1_comp_vectors_set_up(dd);
15050         if (ret)
15051                 goto bail_clear_intr;
15052
15053         /* set up LCB access - must be after set_up_interrupts() */
15054         init_lcb_access(dd);
15055
15056         /*
15057          * Serial number is created from the base guid:
15058          * [27:24] = base guid [38:35]
15059          * [23: 0] = base guid [23: 0]
15060          */
15061         snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15062                  (dd->base_guid & 0xFFFFFF) |
15063                      ((dd->base_guid >> 11) & 0xF000000));
15064
15065         dd->oui1 = dd->base_guid >> 56 & 0xFF;
15066         dd->oui2 = dd->base_guid >> 48 & 0xFF;
15067         dd->oui3 = dd->base_guid >> 40 & 0xFF;
15068
15069         ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15070         if (ret)
15071                 goto bail_clear_intr;
15072
15073         thermal_init(dd);
15074
15075         ret = init_cntrs(dd);
15076         if (ret)
15077                 goto bail_clear_intr;
15078
15079         ret = init_rcverr(dd);
15080         if (ret)
15081                 goto bail_free_cntrs;
15082
15083         init_completion(&dd->user_comp);
15084
15085         /* The user refcount starts with one to inidicate an active device */
15086         atomic_set(&dd->user_refcount, 1);
15087
15088         goto bail;
15089
15090 bail_free_rcverr:
15091         free_rcverr(dd);
15092 bail_free_cntrs:
15093         free_cntrs(dd);
15094 bail_clear_intr:
15095         hfi1_comp_vectors_clean_up(dd);
15096         msix_clean_up_interrupts(dd);
15097 bail_cleanup:
15098         hfi1_pcie_ddcleanup(dd);
15099 bail_free:
15100         hfi1_free_devdata(dd);
15101 bail:
15102         return ret;
15103 }
15104
15105 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15106                         u32 dw_len)
15107 {
15108         u32 delta_cycles;
15109         u32 current_egress_rate = ppd->current_egress_rate;
15110         /* rates here are in units of 10^6 bits/sec */
15111
15112         if (desired_egress_rate == -1)
15113                 return 0; /* shouldn't happen */
15114
15115         if (desired_egress_rate >= current_egress_rate)
15116                 return 0; /* we can't help go faster, only slower */
15117
15118         delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15119                         egress_cycles(dw_len * 4, current_egress_rate);
15120
15121         return (u16)delta_cycles;
15122 }
15123
15124 /**
15125  * create_pbc - build a pbc for transmission
15126  * @flags: special case flags or-ed in built pbc
15127  * @srate: static rate
15128  * @vl: vl
15129  * @dwlen: dword length (header words + data words + pbc words)
15130  *
15131  * Create a PBC with the given flags, rate, VL, and length.
15132  *
15133  * NOTE: The PBC created will not insert any HCRC - all callers but one are
15134  * for verbs, which does not use this PSM feature.  The lone other caller
15135  * is for the diagnostic interface which calls this if the user does not
15136  * supply their own PBC.
15137  */
15138 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15139                u32 dw_len)
15140 {
15141         u64 pbc, delay = 0;
15142
15143         if (unlikely(srate_mbs))
15144                 delay = delay_cycles(ppd, srate_mbs, dw_len);
15145
15146         pbc = flags
15147                 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15148                 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15149                 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15150                 | (dw_len & PBC_LENGTH_DWS_MASK)
15151                         << PBC_LENGTH_DWS_SHIFT;
15152
15153         return pbc;
15154 }
15155
15156 #define SBUS_THERMAL    0x4f
15157 #define SBUS_THERM_MONITOR_MODE 0x1
15158
15159 #define THERM_FAILURE(dev, ret, reason) \
15160         dd_dev_err((dd),                                                \
15161                    "Thermal sensor initialization failed: %s (%d)\n",   \
15162                    (reason), (ret))
15163
15164 /*
15165  * Initialize the thermal sensor.
15166  *
15167  * After initialization, enable polling of thermal sensor through
15168  * SBus interface. In order for this to work, the SBus Master
15169  * firmware has to be loaded due to the fact that the HW polling
15170  * logic uses SBus interrupts, which are not supported with
15171  * default firmware. Otherwise, no data will be returned through
15172  * the ASIC_STS_THERM CSR.
15173  */
15174 static int thermal_init(struct hfi1_devdata *dd)
15175 {
15176         int ret = 0;
15177
15178         if (dd->icode != ICODE_RTL_SILICON ||
15179             check_chip_resource(dd, CR_THERM_INIT, NULL))
15180                 return ret;
15181
15182         ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15183         if (ret) {
15184                 THERM_FAILURE(dd, ret, "Acquire SBus");
15185                 return ret;
15186         }
15187
15188         dd_dev_info(dd, "Initializing thermal sensor\n");
15189         /* Disable polling of thermal readings */
15190         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15191         msleep(100);
15192         /* Thermal Sensor Initialization */
15193         /*    Step 1: Reset the Thermal SBus Receiver */
15194         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15195                                 RESET_SBUS_RECEIVER, 0);
15196         if (ret) {
15197                 THERM_FAILURE(dd, ret, "Bus Reset");
15198                 goto done;
15199         }
15200         /*    Step 2: Set Reset bit in Thermal block */
15201         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15202                                 WRITE_SBUS_RECEIVER, 0x1);
15203         if (ret) {
15204                 THERM_FAILURE(dd, ret, "Therm Block Reset");
15205                 goto done;
15206         }
15207         /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15208         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15209                                 WRITE_SBUS_RECEIVER, 0x32);
15210         if (ret) {
15211                 THERM_FAILURE(dd, ret, "Write Clock Div");
15212                 goto done;
15213         }
15214         /*    Step 4: Select temperature mode */
15215         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15216                                 WRITE_SBUS_RECEIVER,
15217                                 SBUS_THERM_MONITOR_MODE);
15218         if (ret) {
15219                 THERM_FAILURE(dd, ret, "Write Mode Sel");
15220                 goto done;
15221         }
15222         /*    Step 5: De-assert block reset and start conversion */
15223         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15224                                 WRITE_SBUS_RECEIVER, 0x2);
15225         if (ret) {
15226                 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15227                 goto done;
15228         }
15229         /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15230         msleep(22);
15231
15232         /* Enable polling of thermal readings */
15233         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15234
15235         /* Set initialized flag */
15236         ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15237         if (ret)
15238                 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15239
15240 done:
15241         release_chip_resource(dd, CR_SBUS);
15242         return ret;
15243 }
15244
15245 static void handle_temp_err(struct hfi1_devdata *dd)
15246 {
15247         struct hfi1_pportdata *ppd = &dd->pport[0];
15248         /*
15249          * Thermal Critical Interrupt
15250          * Put the device into forced freeze mode, take link down to
15251          * offline, and put DC into reset.
15252          */
15253         dd_dev_emerg(dd,
15254                      "Critical temperature reached! Forcing device into freeze mode!\n");
15255         dd->flags |= HFI1_FORCED_FREEZE;
15256         start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15257         /*
15258          * Shut DC down as much and as quickly as possible.
15259          *
15260          * Step 1: Take the link down to OFFLINE. This will cause the
15261          *         8051 to put the Serdes in reset. However, we don't want to
15262          *         go through the entire link state machine since we want to
15263          *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15264          *         but rather an attempt to save the chip.
15265          *         Code below is almost the same as quiet_serdes() but avoids
15266          *         all the extra work and the sleeps.
15267          */
15268         ppd->driver_link_ready = 0;
15269         ppd->link_enabled = 0;
15270         set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15271                                 PLS_OFFLINE);
15272         /*
15273          * Step 2: Shutdown LCB and 8051
15274          *         After shutdown, do not restore DC_CFG_RESET value.
15275          */
15276         dc_shutdown(dd);
15277 }