IB/hfi1: Move constant to the right in bitwise operations
[linux-2.6-microblaze.git] / drivers / staging / rdma / hfi1 / chip.c
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66
67 #define NUM_IB_PORTS 1
68
69 uint kdeth_qp;
70 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
71 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
72
73 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
74 module_param(num_vls, uint, S_IRUGO);
75 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
76
77 /*
78  * Default time to aggregate two 10K packets from the idle state
79  * (timer not running). The timer starts at the end of the first packet,
80  * so only the time for one 10K packet and header plus a bit extra is needed.
81  * 10 * 1024 + 64 header byte = 10304 byte
82  * 10304 byte / 12.5 GB/s = 824.32ns
83  */
84 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
85 module_param(rcv_intr_timeout, uint, S_IRUGO);
86 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
87
88 uint rcv_intr_count = 16; /* same as qib */
89 module_param(rcv_intr_count, uint, S_IRUGO);
90 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
91
92 ushort link_crc_mask = SUPPORTED_CRCS;
93 module_param(link_crc_mask, ushort, S_IRUGO);
94 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
95
96 uint loopback;
97 module_param_named(loopback, loopback, uint, S_IRUGO);
98 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
99
100 /* Other driver tunables */
101 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
102 static ushort crc_14b_sideband = 1;
103 static uint use_flr = 1;
104 uint quick_linkup; /* skip LNI */
105
106 struct flag_table {
107         u64 flag;       /* the flag */
108         char *str;      /* description string */
109         u16 extra;      /* extra information */
110         u16 unused0;
111         u32 unused1;
112 };
113
114 /* str must be a string constant */
115 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
116 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
117
118 /* Send Error Consequences */
119 #define SEC_WRITE_DROPPED       0x1
120 #define SEC_PACKET_DROPPED      0x2
121 #define SEC_SC_HALTED           0x4     /* per-context only */
122 #define SEC_SPC_FREEZE          0x8     /* per-HFI only */
123
124 #define MIN_KERNEL_KCTXTS         2
125 #define FIRST_KERNEL_KCTXT        1
126 #define NUM_MAP_REGS             32
127
128 /* Bit offset into the GUID which carries HFI id information */
129 #define GUID_HFI_INDEX_SHIFT     39
130
131 /* extract the emulation revision */
132 #define emulator_rev(dd) ((dd)->irev >> 8)
133 /* parallel and serial emulation versions are 3 and 4 respectively */
134 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
135 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
136
137 /* RSM fields */
138
139 /* packet type */
140 #define IB_PACKET_TYPE         2ull
141 #define QW_SHIFT               6ull
142 /* QPN[7..1] */
143 #define QPN_WIDTH              7ull
144
145 /* LRH.BTH: QW 0, OFFSET 48 - for match */
146 #define LRH_BTH_QW             0ull
147 #define LRH_BTH_BIT_OFFSET     48ull
148 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
149 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
150 #define LRH_BTH_SELECT
151 #define LRH_BTH_MASK           3ull
152 #define LRH_BTH_VALUE          2ull
153
154 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
155 #define LRH_SC_QW              0ull
156 #define LRH_SC_BIT_OFFSET      56ull
157 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
158 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
159 #define LRH_SC_MASK            128ull
160 #define LRH_SC_VALUE           0ull
161
162 /* SC[n..0] QW 0, OFFSET 60 - for select */
163 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
164
165 /* QPN[m+n:1] QW 1, OFFSET 1 */
166 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
167
168 /* defines to build power on SC2VL table */
169 #define SC2VL_VAL( \
170         num, \
171         sc0, sc0val, \
172         sc1, sc1val, \
173         sc2, sc2val, \
174         sc3, sc3val, \
175         sc4, sc4val, \
176         sc5, sc5val, \
177         sc6, sc6val, \
178         sc7, sc7val) \
179 ( \
180         ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
181         ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
182         ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
183         ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
184         ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
185         ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
186         ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
187         ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
188 )
189
190 #define DC_SC_VL_VAL( \
191         range, \
192         e0, e0val, \
193         e1, e1val, \
194         e2, e2val, \
195         e3, e3val, \
196         e4, e4val, \
197         e5, e5val, \
198         e6, e6val, \
199         e7, e7val, \
200         e8, e8val, \
201         e9, e9val, \
202         e10, e10val, \
203         e11, e11val, \
204         e12, e12val, \
205         e13, e13val, \
206         e14, e14val, \
207         e15, e15val) \
208 ( \
209         ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
210         ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
211         ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
212         ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
213         ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
214         ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
215         ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
216         ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
217         ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
218         ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
219         ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
220         ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
221         ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
222         ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
223         ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
224         ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
225 )
226
227 /* all CceStatus sub-block freeze bits */
228 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
229                         | CCE_STATUS_RXE_FROZE_SMASK \
230                         | CCE_STATUS_TXE_FROZE_SMASK \
231                         | CCE_STATUS_TXE_PIO_FROZE_SMASK)
232 /* all CceStatus sub-block TXE pause bits */
233 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
234                         | CCE_STATUS_TXE_PAUSED_SMASK \
235                         | CCE_STATUS_SDMA_PAUSED_SMASK)
236 /* all CceStatus sub-block RXE pause bits */
237 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
238
239 /*
240  * CCE Error flags.
241  */
242 static struct flag_table cce_err_status_flags[] = {
243 /* 0*/  FLAG_ENTRY0("CceCsrParityErr",
244                 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
245 /* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
246                 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
247 /* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
248                 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
249 /* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
250                 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
251 /* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
252                 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
253 /* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
254                 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
255 /* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
256                 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
257 /* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
258                 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
259 /* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
260                 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
261 /* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
262             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
263 /*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
265 /*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
266             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
267 /*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
268                 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
269 /*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
270                 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
271 /*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
272                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
273 /*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
274                 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
275 /*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
276                 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
277 /*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
278                 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
279 /*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
280                 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
281 /*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
282                 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
283 /*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
284                 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
285 /*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
286                 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
287 /*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
288                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
289 /*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
290                 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
291 /*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
292                 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
293 /*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
294                 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
295 /*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
296                 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
297 /*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
298                 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
299 /*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
300                 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
301 /*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
302                 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
303 /*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
304                 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
305 /*31*/  FLAG_ENTRY0("LATriggered",
306                 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
307 /*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
308                 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
309 /*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
310                 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
311 /*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
312                 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
313 /*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
314                 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
315 /*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
316                 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
317 /*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
318                 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
319 /*38*/  FLAG_ENTRY0("CceIntMapCorErr",
320                 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
321 /*39*/  FLAG_ENTRY0("CceIntMapUncErr",
322                 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
323 /*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
324                 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
325 /*41-63 reserved*/
326 };
327
328 /*
329  * Misc Error flags
330  */
331 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
332 static struct flag_table misc_err_status_flags[] = {
333 /* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
334 /* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
335 /* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
336 /* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
337 /* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
338 /* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
339 /* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
340 /* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
341 /* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
342 /* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
343 /*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
344 /*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
345 /*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
346 };
347
348 /*
349  * TXE PIO Error flags and consequences
350  */
351 static struct flag_table pio_err_status_flags[] = {
352 /* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
353         SEC_WRITE_DROPPED,
354         SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
355 /* 1*/  FLAG_ENTRY("PioWriteAddrParity",
356         SEC_SPC_FREEZE,
357         SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
358 /* 2*/  FLAG_ENTRY("PioCsrParity",
359         SEC_SPC_FREEZE,
360         SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
361 /* 3*/  FLAG_ENTRY("PioSbMemFifo0",
362         SEC_SPC_FREEZE,
363         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
364 /* 4*/  FLAG_ENTRY("PioSbMemFifo1",
365         SEC_SPC_FREEZE,
366         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
367 /* 5*/  FLAG_ENTRY("PioPccFifoParity",
368         SEC_SPC_FREEZE,
369         SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
370 /* 6*/  FLAG_ENTRY("PioPecFifoParity",
371         SEC_SPC_FREEZE,
372         SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
373 /* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
374         SEC_SPC_FREEZE,
375         SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
376 /* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
377         SEC_SPC_FREEZE,
378         SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
379 /* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
380         SEC_SPC_FREEZE,
381         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
382 /*10*/  FLAG_ENTRY("PioSmPktResetParity",
383         SEC_SPC_FREEZE,
384         SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
385 /*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
386         SEC_SPC_FREEZE,
387         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
388 /*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
389         SEC_SPC_FREEZE,
390         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
391 /*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
392         0,
393         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
394 /*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
395         0,
396         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
397 /*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
398         SEC_SPC_FREEZE,
399         SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
400 /*16*/  FLAG_ENTRY("PioPpmcPblFifo",
401         SEC_SPC_FREEZE,
402         SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
403 /*17*/  FLAG_ENTRY("PioInitSmIn",
404         0,
405         SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
406 /*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
407         SEC_SPC_FREEZE,
408         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
409 /*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
410         SEC_SPC_FREEZE,
411         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
412 /*20*/  FLAG_ENTRY("PioHostAddrMemCor",
413         0,
414         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
415 /*21*/  FLAG_ENTRY("PioWriteDataParity",
416         SEC_SPC_FREEZE,
417         SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
418 /*22*/  FLAG_ENTRY("PioStateMachine",
419         SEC_SPC_FREEZE,
420         SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
421 /*23*/  FLAG_ENTRY("PioWriteQwValidParity",
422         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
423         SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
424 /*24*/  FLAG_ENTRY("PioBlockQwCountParity",
425         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
426         SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
427 /*25*/  FLAG_ENTRY("PioVlfVlLenParity",
428         SEC_SPC_FREEZE,
429         SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
430 /*26*/  FLAG_ENTRY("PioVlfSopParity",
431         SEC_SPC_FREEZE,
432         SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
433 /*27*/  FLAG_ENTRY("PioVlFifoParity",
434         SEC_SPC_FREEZE,
435         SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
436 /*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
437         SEC_SPC_FREEZE,
438         SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
439 /*29*/  FLAG_ENTRY("PioPpmcSopLen",
440         SEC_SPC_FREEZE,
441         SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
442 /*30-31 reserved*/
443 /*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
444         SEC_SPC_FREEZE,
445         SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
446 /*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
447         SEC_SPC_FREEZE,
448         SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
449 /*34*/  FLAG_ENTRY("PioPccSopHeadParity",
450         SEC_SPC_FREEZE,
451         SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
452 /*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
453         SEC_SPC_FREEZE,
454         SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
455 /*36-63 reserved*/
456 };
457
458 /* TXE PIO errors that cause an SPC freeze */
459 #define ALL_PIO_FREEZE_ERR \
460         (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
461         | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
462         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
463         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
464         | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
465         | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
466         | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
467         | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
468         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
469         | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
470         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
471         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
472         | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
473         | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
474         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
475         | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
476         | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
477         | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
478         | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
479         | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
480         | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
481         | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
482         | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
483         | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
484         | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
485         | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
486         | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
487         | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
488         | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
489
490 /*
491  * TXE SDMA Error flags
492  */
493 static struct flag_table sdma_err_status_flags[] = {
494 /* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
495                 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
496 /* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
497                 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
498 /* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
499                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
500 /* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
501                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
502 /*04-63 reserved*/
503 };
504
505 /* TXE SDMA errors that cause an SPC freeze */
506 #define ALL_SDMA_FREEZE_ERR  \
507                 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
508                 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
509                 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
510
511 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
512 #define PORT_DISCARD_EGRESS_ERRS \
513         (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
514         | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
515         | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
516
517 /*
518  * TXE Egress Error flags
519  */
520 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
521 static struct flag_table egress_err_status_flags[] = {
522 /* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
523 /* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
524 /* 2 reserved */
525 /* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
526                 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
527 /* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
528 /* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
529 /* 6 reserved */
530 /* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
531                 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
532 /* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
533                 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
534 /* 9-10 reserved */
535 /*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
536                 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
537 /*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
538 /*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
539 /*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
540 /*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
541 /*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
542                 SEES(TX_SDMA0_DISALLOWED_PACKET)),
543 /*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
544                 SEES(TX_SDMA1_DISALLOWED_PACKET)),
545 /*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
546                 SEES(TX_SDMA2_DISALLOWED_PACKET)),
547 /*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
548                 SEES(TX_SDMA3_DISALLOWED_PACKET)),
549 /*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
550                 SEES(TX_SDMA4_DISALLOWED_PACKET)),
551 /*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
552                 SEES(TX_SDMA5_DISALLOWED_PACKET)),
553 /*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
554                 SEES(TX_SDMA6_DISALLOWED_PACKET)),
555 /*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
556                 SEES(TX_SDMA7_DISALLOWED_PACKET)),
557 /*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
558                 SEES(TX_SDMA8_DISALLOWED_PACKET)),
559 /*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
560                 SEES(TX_SDMA9_DISALLOWED_PACKET)),
561 /*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
562                 SEES(TX_SDMA10_DISALLOWED_PACKET)),
563 /*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
564                 SEES(TX_SDMA11_DISALLOWED_PACKET)),
565 /*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
566                 SEES(TX_SDMA12_DISALLOWED_PACKET)),
567 /*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
568                 SEES(TX_SDMA13_DISALLOWED_PACKET)),
569 /*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
570                 SEES(TX_SDMA14_DISALLOWED_PACKET)),
571 /*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
572                 SEES(TX_SDMA15_DISALLOWED_PACKET)),
573 /*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
574                 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
575 /*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
576                 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
577 /*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
578                 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
579 /*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
580                 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
581 /*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
582                 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
583 /*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
584                 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
585 /*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
586                 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
587 /*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
588                 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
589 /*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
590                 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
591 /*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
592 /*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
593 /*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
594 /*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
595 /*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
596 /*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
597 /*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
598 /*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
599 /*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
600 /*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
601 /*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
602 /*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
603 /*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
604 /*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
605 /*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
606 /*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
607 /*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
608 /*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
609 /*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
610 /*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
611 /*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
612 /*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
613                 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
614 /*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
615                 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
616 };
617
618 /*
619  * TXE Egress Error Info flags
620  */
621 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
622 static struct flag_table egress_err_info_flags[] = {
623 /* 0*/  FLAG_ENTRY0("Reserved", 0ull),
624 /* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
625 /* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
626 /* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
627 /* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
628 /* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
629 /* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
630 /* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
631 /* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
632 /* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
633 /*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
634 /*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
635 /*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
636 /*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
637 /*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
638 /*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
639 /*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
640 /*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
641 /*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
642 /*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
643 /*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
644 /*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
645 };
646
647 /* TXE Egress errors that cause an SPC freeze */
648 #define ALL_TXE_EGRESS_FREEZE_ERR \
649         (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
650         | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
651         | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
652         | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
653         | SEES(TX_LAUNCH_CSR_PARITY) \
654         | SEES(TX_SBRD_CTL_CSR_PARITY) \
655         | SEES(TX_CONFIG_PARITY) \
656         | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
657         | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
658         | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
659         | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
660         | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
661         | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
662         | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
663         | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
664         | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
665         | SEES(TX_CREDIT_RETURN_PARITY))
666
667 /*
668  * TXE Send error flags
669  */
670 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
671 static struct flag_table send_err_status_flags[] = {
672 /* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
673 /* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
674 /* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
675 };
676
677 /*
678  * TXE Send Context Error flags and consequences
679  */
680 static struct flag_table sc_err_status_flags[] = {
681 /* 0*/  FLAG_ENTRY("InconsistentSop",
682                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
683                 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
684 /* 1*/  FLAG_ENTRY("DisallowedPacket",
685                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
686                 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
687 /* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
688                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
689                 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
690 /* 3*/  FLAG_ENTRY("WriteOverflow",
691                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
692                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
693 /* 4*/  FLAG_ENTRY("WriteOutOfBounds",
694                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
695                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
696 /* 5-63 reserved*/
697 };
698
699 /*
700  * RXE Receive Error flags
701  */
702 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
703 static struct flag_table rxe_err_status_flags[] = {
704 /* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
705 /* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
706 /* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
707 /* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
708 /* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
709 /* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
710 /* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
711 /* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
712 /* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
713 /* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
714 /*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
715 /*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
716 /*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
717 /*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
718 /*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
719 /*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
720 /*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
721                 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
722 /*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
723 /*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
724 /*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
725                 RXES(RBUF_BLOCK_LIST_READ_UNC)),
726 /*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
727                 RXES(RBUF_BLOCK_LIST_READ_COR)),
728 /*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
729                 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
730 /*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
731                 RXES(RBUF_CSR_QENT_CNT_PARITY)),
732 /*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
733                 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
734 /*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
735                 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
736 /*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
737 /*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
738 /*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
739                 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
740 /*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
741 /*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
742 /*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
743 /*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
744 /*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
745 /*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
746 /*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
747 /*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
748                 RXES(RBUF_FL_INITDONE_PARITY)),
749 /*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
750                 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
751 /*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
752 /*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
753 /*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
754 /*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
755                 RXES(LOOKUP_DES_PART1_UNC_COR)),
756 /*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
757                 RXES(LOOKUP_DES_PART2_PARITY)),
758 /*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
759 /*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
760 /*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
761 /*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
762 /*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
763 /*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
764 /*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
765 /*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
766 /*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
767 /*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
768 /*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
769 /*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
770 /*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
771 /*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
772 /*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
773 /*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
774 /*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
775 /*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
776 /*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
777 /*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
778 /*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
779 /*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
780 };
781
782 /* RXE errors that will trigger an SPC freeze */
783 #define ALL_RXE_FREEZE_ERR  \
784         (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
785         | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
786         | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
787         | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
788         | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
789         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
790         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
791         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
792         | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
793         | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
794         | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
795         | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
796         | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
797         | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
798         | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
799         | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
800         | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
801         | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
802         | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
803         | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
804         | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
805         | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
806         | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
807         | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
808         | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
809         | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
810         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
811         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
812         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
813         | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
814         | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
815         | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
816         | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
817         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
818         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
819         | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
820         | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
821         | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
822         | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
823         | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
824         | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
825         | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
826         | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
827         | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
828
829 #define RXE_FREEZE_ABORT_MASK \
830         (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
831         RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
832         RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
833
834 /*
835  * DCC Error Flags
836  */
837 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
838 static struct flag_table dcc_err_flags[] = {
839         FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
840         FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
841         FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
842         FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
843         FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
844         FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
845         FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
846         FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
847         FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
848         FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
849         FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
850         FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
851         FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
852         FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
853         FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
854         FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
855         FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
856         FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
857         FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
858         FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
859         FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
860         FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
861         FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
862         FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
863         FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
864         FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
865         FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
866         FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
867         FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
868         FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
869         FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
870         FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
871         FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
872         FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
873         FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
874         FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
875         FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
876         FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
877         FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
878         FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
879         FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
880         FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
881         FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
882         FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
883         FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
884         FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
885 };
886
887 /*
888  * LCB error flags
889  */
890 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
891 static struct flag_table lcb_err_flags[] = {
892 /* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
893 /* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
894 /* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
895 /* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
896                 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
897 /* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
898 /* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
899 /* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
900 /* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
901 /* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
902 /* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
903 /*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
904 /*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
905 /*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
906 /*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
907                 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
908 /*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
909 /*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
910 /*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
911 /*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
912 /*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
913 /*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
914                 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
915 /*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
916 /*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
917 /*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
918 /*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
919 /*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
920 /*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
921 /*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
922                 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
923 /*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
924 /*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
925                 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
926 /*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
927                 LCBE(REDUNDANT_FLIT_PARITY_ERR))
928 };
929
930 /*
931  * DC8051 Error Flags
932  */
933 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
934 static struct flag_table dc8051_err_flags[] = {
935         FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
936         FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
937         FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
938         FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
939         FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
940         FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
941         FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
942         FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
943         FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
944                     D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
945         FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
946 };
947
948 /*
949  * DC8051 Information Error flags
950  *
951  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
952  */
953 static struct flag_table dc8051_info_err_flags[] = {
954         FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
955         FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
956         FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
957         FLAG_ENTRY0("Serdes internal loopback failure",
958                     FAILED_SERDES_INTERNAL_LOOPBACK),
959         FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
960         FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
961         FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
962         FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
963         FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
964         FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
965         FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
966         FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT)
967 };
968
969 /*
970  * DC8051 Information Host Information flags
971  *
972  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
973  */
974 static struct flag_table dc8051_info_host_msg_flags[] = {
975         FLAG_ENTRY0("Host request done", 0x0001),
976         FLAG_ENTRY0("BC SMA message", 0x0002),
977         FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
978         FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
979         FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
980         FLAG_ENTRY0("External device config request", 0x0020),
981         FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
982         FLAG_ENTRY0("LinkUp achieved", 0x0080),
983         FLAG_ENTRY0("Link going down", 0x0100),
984 };
985
986 static u32 encoded_size(u32 size);
987 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
988 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
989 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
990                                u8 *continuous);
991 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
992                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
993 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
994                                       u8 *remote_tx_rate, u16 *link_widths);
995 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
996                                      u8 *flag_bits, u16 *link_widths);
997 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
998                                   u8 *device_rev);
999 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1000 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1001 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1002                             u8 *tx_polarity_inversion,
1003                             u8 *rx_polarity_inversion, u8 *max_rate);
1004 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1005                                 unsigned int context, u64 err_status);
1006 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1007 static void handle_dcc_err(struct hfi1_devdata *dd,
1008                            unsigned int context, u64 err_status);
1009 static void handle_lcb_err(struct hfi1_devdata *dd,
1010                            unsigned int context, u64 err_status);
1011 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1012 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1013 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1014 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019 static void set_partition_keys(struct hfi1_pportdata *);
1020 static const char *link_state_name(u32 state);
1021 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1022                                           u32 state);
1023 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1024                            u64 *out_data);
1025 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1026 static int thermal_init(struct hfi1_devdata *dd);
1027
1028 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1029                                   int msecs);
1030 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1031 static void handle_temp_err(struct hfi1_devdata *);
1032 static void dc_shutdown(struct hfi1_devdata *);
1033 static void dc_start(struct hfi1_devdata *);
1034
1035 /*
1036  * Error interrupt table entry.  This is used as input to the interrupt
1037  * "clear down" routine used for all second tier error interrupt register.
1038  * Second tier interrupt registers have a single bit representing them
1039  * in the top-level CceIntStatus.
1040  */
1041 struct err_reg_info {
1042         u32 status;             /* status CSR offset */
1043         u32 clear;              /* clear CSR offset */
1044         u32 mask;               /* mask CSR offset */
1045         void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1046         const char *desc;
1047 };
1048
1049 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1050 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1051 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1052
1053 /*
1054  * Helpers for building HFI and DC error interrupt table entries.  Different
1055  * helpers are needed because of inconsistent register names.
1056  */
1057 #define EE(reg, handler, desc) \
1058         { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1059                 handler, desc }
1060 #define DC_EE1(reg, handler, desc) \
1061         { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1062 #define DC_EE2(reg, handler, desc) \
1063         { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1064
1065 /*
1066  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1067  * another register containing more information.
1068  */
1069 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1070 /* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1071 /* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1072 /* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1073 /* 3*/  { 0, 0, 0, NULL }, /* reserved */
1074 /* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1075 /* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1076 /* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1077 /* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1078         /* the rest are reserved */
1079 };
1080
1081 /*
1082  * Index into the Various section of the interrupt sources
1083  * corresponding to the Critical Temperature interrupt.
1084  */
1085 #define TCRIT_INT_SOURCE 4
1086
1087 /*
1088  * SDMA error interrupt entry - refers to another register containing more
1089  * information.
1090  */
1091 static const struct err_reg_info sdma_eng_err =
1092         EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1093
1094 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1095 /* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1096 /* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1097 /* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1098 /* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1099 /* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1100         /* rest are reserved */
1101 };
1102
1103 /*
1104  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1105  * register can not be derived from the MTU value because 10K is not
1106  * a power of 2. Therefore, we need a constant. Everything else can
1107  * be calculated.
1108  */
1109 #define DCC_CFG_PORT_MTU_CAP_10240 7
1110
1111 /*
1112  * Table of the DC grouping of error interrupts.  Each entry refers to
1113  * another register containing more information.
1114  */
1115 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1116 /* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1117 /* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1118 /* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1119 /* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1120         /* the rest are reserved */
1121 };
1122
1123 struct cntr_entry {
1124         /*
1125          * counter name
1126          */
1127         char *name;
1128
1129         /*
1130          * csr to read for name (if applicable)
1131          */
1132         u64 csr;
1133
1134         /*
1135          * offset into dd or ppd to store the counter's value
1136          */
1137         int offset;
1138
1139         /*
1140          * flags
1141          */
1142         u8 flags;
1143
1144         /*
1145          * accessor for stat element, context either dd or ppd
1146          */
1147         u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1148                        int mode, u64 data);
1149 };
1150
1151 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1152 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1153
1154 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1155 { \
1156         name, \
1157         csr, \
1158         offset, \
1159         flags, \
1160         accessor \
1161 }
1162
1163 /* 32bit RXE */
1164 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1165 CNTR_ELEM(#name, \
1166           (counter * 8 + RCV_COUNTER_ARRAY32), \
1167           0, flags | CNTR_32BIT, \
1168           port_access_u32_csr)
1169
1170 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1171 CNTR_ELEM(#name, \
1172           (counter * 8 + RCV_COUNTER_ARRAY32), \
1173           0, flags | CNTR_32BIT, \
1174           dev_access_u32_csr)
1175
1176 /* 64bit RXE */
1177 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1178 CNTR_ELEM(#name, \
1179           (counter * 8 + RCV_COUNTER_ARRAY64), \
1180           0, flags, \
1181           port_access_u64_csr)
1182
1183 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1184 CNTR_ELEM(#name, \
1185           (counter * 8 + RCV_COUNTER_ARRAY64), \
1186           0, flags, \
1187           dev_access_u64_csr)
1188
1189 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1190 #define OVR_ELM(ctx) \
1191 CNTR_ELEM("RcvHdrOvr" #ctx, \
1192           (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1193           0, CNTR_NORMAL, port_access_u64_csr)
1194
1195 /* 32bit TXE */
1196 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1197 CNTR_ELEM(#name, \
1198           (counter * 8 + SEND_COUNTER_ARRAY32), \
1199           0, flags | CNTR_32BIT, \
1200           port_access_u32_csr)
1201
1202 /* 64bit TXE */
1203 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1204 CNTR_ELEM(#name, \
1205           (counter * 8 + SEND_COUNTER_ARRAY64), \
1206           0, flags, \
1207           port_access_u64_csr)
1208
1209 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1210 CNTR_ELEM(#name,\
1211           counter * 8 + SEND_COUNTER_ARRAY64, \
1212           0, \
1213           flags, \
1214           dev_access_u64_csr)
1215
1216 /* CCE */
1217 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1218 CNTR_ELEM(#name, \
1219           (counter * 8 + CCE_COUNTER_ARRAY32), \
1220           0, flags | CNTR_32BIT, \
1221           dev_access_u32_csr)
1222
1223 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1224 CNTR_ELEM(#name, \
1225           (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1226           0, flags | CNTR_32BIT, \
1227           dev_access_u32_csr)
1228
1229 /* DC */
1230 #define DC_PERF_CNTR(name, counter, flags) \
1231 CNTR_ELEM(#name, \
1232           counter, \
1233           0, \
1234           flags, \
1235           dev_access_u64_csr)
1236
1237 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1238 CNTR_ELEM(#name, \
1239           counter, \
1240           0, \
1241           flags, \
1242           dc_access_lcb_cntr)
1243
1244 /* ibp counters */
1245 #define SW_IBP_CNTR(name, cntr) \
1246 CNTR_ELEM(#name, \
1247           0, \
1248           0, \
1249           CNTR_SYNTH, \
1250           access_ibp_##cntr)
1251
1252 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1253 {
1254         u64 val;
1255
1256         if (dd->flags & HFI1_PRESENT) {
1257                 val = readq((void __iomem *)dd->kregbase + offset);
1258                 return val;
1259         }
1260         return -1;
1261 }
1262
1263 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1264 {
1265         if (dd->flags & HFI1_PRESENT)
1266                 writeq(value, (void __iomem *)dd->kregbase + offset);
1267 }
1268
1269 void __iomem *get_csr_addr(
1270         struct hfi1_devdata *dd,
1271         u32 offset)
1272 {
1273         return (void __iomem *)dd->kregbase + offset;
1274 }
1275
1276 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1277                                  int mode, u64 value)
1278 {
1279         u64 ret;
1280
1281         if (mode == CNTR_MODE_R) {
1282                 ret = read_csr(dd, csr);
1283         } else if (mode == CNTR_MODE_W) {
1284                 write_csr(dd, csr, value);
1285                 ret = value;
1286         } else {
1287                 dd_dev_err(dd, "Invalid cntr register access mode");
1288                 return 0;
1289         }
1290
1291         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1292         return ret;
1293 }
1294
1295 /* Dev Access */
1296 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1297                               void *context, int vl, int mode, u64 data)
1298 {
1299         struct hfi1_devdata *dd = context;
1300         u64 csr = entry->csr;
1301
1302         if (entry->flags & CNTR_SDMA) {
1303                 if (vl == CNTR_INVALID_VL)
1304                         return 0;
1305                 csr += 0x100 * vl;
1306         } else {
1307                 if (vl != CNTR_INVALID_VL)
1308                         return 0;
1309         }
1310         return read_write_csr(dd, csr, mode, data);
1311 }
1312
1313 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1314                               void *context, int idx, int mode, u64 data)
1315 {
1316         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1317
1318         if (dd->per_sdma && idx < dd->num_sdma)
1319                 return dd->per_sdma[idx].err_cnt;
1320         return 0;
1321 }
1322
1323 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1324                               void *context, int idx, int mode, u64 data)
1325 {
1326         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1327
1328         if (dd->per_sdma && idx < dd->num_sdma)
1329                 return dd->per_sdma[idx].sdma_int_cnt;
1330         return 0;
1331 }
1332
1333 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1334                                    void *context, int idx, int mode, u64 data)
1335 {
1336         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1337
1338         if (dd->per_sdma && idx < dd->num_sdma)
1339                 return dd->per_sdma[idx].idle_int_cnt;
1340         return 0;
1341 }
1342
1343 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1344                                        void *context, int idx, int mode,
1345                                        u64 data)
1346 {
1347         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1348
1349         if (dd->per_sdma && idx < dd->num_sdma)
1350                 return dd->per_sdma[idx].progress_int_cnt;
1351         return 0;
1352 }
1353
1354 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1355                               int vl, int mode, u64 data)
1356 {
1357         struct hfi1_devdata *dd = context;
1358
1359         u64 val = 0;
1360         u64 csr = entry->csr;
1361
1362         if (entry->flags & CNTR_VL) {
1363                 if (vl == CNTR_INVALID_VL)
1364                         return 0;
1365                 csr += 8 * vl;
1366         } else {
1367                 if (vl != CNTR_INVALID_VL)
1368                         return 0;
1369         }
1370
1371         val = read_write_csr(dd, csr, mode, data);
1372         return val;
1373 }
1374
1375 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1376                               int vl, int mode, u64 data)
1377 {
1378         struct hfi1_devdata *dd = context;
1379         u32 csr = entry->csr;
1380         int ret = 0;
1381
1382         if (vl != CNTR_INVALID_VL)
1383                 return 0;
1384         if (mode == CNTR_MODE_R)
1385                 ret = read_lcb_csr(dd, csr, &data);
1386         else if (mode == CNTR_MODE_W)
1387                 ret = write_lcb_csr(dd, csr, data);
1388
1389         if (ret) {
1390                 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1391                 return 0;
1392         }
1393
1394         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1395         return data;
1396 }
1397
1398 /* Port Access */
1399 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1400                                int vl, int mode, u64 data)
1401 {
1402         struct hfi1_pportdata *ppd = context;
1403
1404         if (vl != CNTR_INVALID_VL)
1405                 return 0;
1406         return read_write_csr(ppd->dd, entry->csr, mode, data);
1407 }
1408
1409 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1410                                void *context, int vl, int mode, u64 data)
1411 {
1412         struct hfi1_pportdata *ppd = context;
1413         u64 val;
1414         u64 csr = entry->csr;
1415
1416         if (entry->flags & CNTR_VL) {
1417                 if (vl == CNTR_INVALID_VL)
1418                         return 0;
1419                 csr += 8 * vl;
1420         } else {
1421                 if (vl != CNTR_INVALID_VL)
1422                         return 0;
1423         }
1424         val = read_write_csr(ppd->dd, csr, mode, data);
1425         return val;
1426 }
1427
1428 /* Software defined */
1429 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1430                                 u64 data)
1431 {
1432         u64 ret;
1433
1434         if (mode == CNTR_MODE_R) {
1435                 ret = *cntr;
1436         } else if (mode == CNTR_MODE_W) {
1437                 *cntr = data;
1438                 ret = data;
1439         } else {
1440                 dd_dev_err(dd, "Invalid cntr sw access mode");
1441                 return 0;
1442         }
1443
1444         hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1445
1446         return ret;
1447 }
1448
1449 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1450                                  int vl, int mode, u64 data)
1451 {
1452         struct hfi1_pportdata *ppd = context;
1453
1454         if (vl != CNTR_INVALID_VL)
1455                 return 0;
1456         return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1457 }
1458
1459 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1460                                  int vl, int mode, u64 data)
1461 {
1462         struct hfi1_pportdata *ppd = context;
1463
1464         if (vl != CNTR_INVALID_VL)
1465                 return 0;
1466         return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1467 }
1468
1469 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1470                                        void *context, int vl, int mode,
1471                                        u64 data)
1472 {
1473         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1474
1475         if (vl != CNTR_INVALID_VL)
1476                 return 0;
1477         return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1478 }
1479
1480 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1481                                    void *context, int vl, int mode, u64 data)
1482 {
1483         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1484         u64 zero = 0;
1485         u64 *counter;
1486
1487         if (vl == CNTR_INVALID_VL)
1488                 counter = &ppd->port_xmit_discards;
1489         else if (vl >= 0 && vl < C_VL_COUNT)
1490                 counter = &ppd->port_xmit_discards_vl[vl];
1491         else
1492                 counter = &zero;
1493
1494         return read_write_sw(ppd->dd, counter, mode, data);
1495 }
1496
1497 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1498                                        void *context, int vl, int mode,
1499                                        u64 data)
1500 {
1501         struct hfi1_pportdata *ppd = context;
1502
1503         if (vl != CNTR_INVALID_VL)
1504                 return 0;
1505
1506         return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1507                              mode, data);
1508 }
1509
1510 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1511                                       void *context, int vl, int mode, u64 data)
1512 {
1513         struct hfi1_pportdata *ppd = context;
1514
1515         if (vl != CNTR_INVALID_VL)
1516                 return 0;
1517
1518         return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1519                              mode, data);
1520 }
1521
1522 u64 get_all_cpu_total(u64 __percpu *cntr)
1523 {
1524         int cpu;
1525         u64 counter = 0;
1526
1527         for_each_possible_cpu(cpu)
1528                 counter += *per_cpu_ptr(cntr, cpu);
1529         return counter;
1530 }
1531
1532 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1533                           u64 __percpu *cntr,
1534                           int vl, int mode, u64 data)
1535 {
1536         u64 ret = 0;
1537
1538         if (vl != CNTR_INVALID_VL)
1539                 return 0;
1540
1541         if (mode == CNTR_MODE_R) {
1542                 ret = get_all_cpu_total(cntr) - *z_val;
1543         } else if (mode == CNTR_MODE_W) {
1544                 /* A write can only zero the counter */
1545                 if (data == 0)
1546                         *z_val = get_all_cpu_total(cntr);
1547                 else
1548                         dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1549         } else {
1550                 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1551                 return 0;
1552         }
1553
1554         return ret;
1555 }
1556
1557 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1558                               void *context, int vl, int mode, u64 data)
1559 {
1560         struct hfi1_devdata *dd = context;
1561
1562         return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1563                               mode, data);
1564 }
1565
1566 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1567                                    void *context, int vl, int mode, u64 data)
1568 {
1569         struct hfi1_devdata *dd = context;
1570
1571         return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1572                               mode, data);
1573 }
1574
1575 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1576                               void *context, int vl, int mode, u64 data)
1577 {
1578         struct hfi1_devdata *dd = context;
1579
1580         return dd->verbs_dev.n_piowait;
1581 }
1582
1583 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1584                                void *context, int vl, int mode, u64 data)
1585 {
1586         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1587
1588         return dd->verbs_dev.n_piodrain;
1589 }
1590
1591 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1592                               void *context, int vl, int mode, u64 data)
1593 {
1594         struct hfi1_devdata *dd = context;
1595
1596         return dd->verbs_dev.n_txwait;
1597 }
1598
1599 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1600                                void *context, int vl, int mode, u64 data)
1601 {
1602         struct hfi1_devdata *dd = context;
1603
1604         return dd->verbs_dev.n_kmem_wait;
1605 }
1606
1607 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1608                                    void *context, int vl, int mode, u64 data)
1609 {
1610         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1611
1612         return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1613                               mode, data);
1614 }
1615
1616 /* Software counters for the error status bits within MISC_ERR_STATUS */
1617 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1618                                              void *context, int vl, int mode,
1619                                              u64 data)
1620 {
1621         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1622
1623         return dd->misc_err_status_cnt[12];
1624 }
1625
1626 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1627                                           void *context, int vl, int mode,
1628                                           u64 data)
1629 {
1630         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1631
1632         return dd->misc_err_status_cnt[11];
1633 }
1634
1635 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1636                                                void *context, int vl, int mode,
1637                                                u64 data)
1638 {
1639         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1640
1641         return dd->misc_err_status_cnt[10];
1642 }
1643
1644 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1645                                                  void *context, int vl,
1646                                                  int mode, u64 data)
1647 {
1648         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1649
1650         return dd->misc_err_status_cnt[9];
1651 }
1652
1653 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1654                                            void *context, int vl, int mode,
1655                                            u64 data)
1656 {
1657         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1658
1659         return dd->misc_err_status_cnt[8];
1660 }
1661
1662 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1663                                 const struct cntr_entry *entry,
1664                                 void *context, int vl, int mode, u64 data)
1665 {
1666         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1667
1668         return dd->misc_err_status_cnt[7];
1669 }
1670
1671 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1672                                                 void *context, int vl,
1673                                                 int mode, u64 data)
1674 {
1675         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1676
1677         return dd->misc_err_status_cnt[6];
1678 }
1679
1680 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1681                                               void *context, int vl, int mode,
1682                                               u64 data)
1683 {
1684         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1685
1686         return dd->misc_err_status_cnt[5];
1687 }
1688
1689 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1690                                             void *context, int vl, int mode,
1691                                             u64 data)
1692 {
1693         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1694
1695         return dd->misc_err_status_cnt[4];
1696 }
1697
1698 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1699                                                  void *context, int vl,
1700                                                  int mode, u64 data)
1701 {
1702         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1703
1704         return dd->misc_err_status_cnt[3];
1705 }
1706
1707 static u64 access_misc_csr_write_bad_addr_err_cnt(
1708                                 const struct cntr_entry *entry,
1709                                 void *context, int vl, int mode, u64 data)
1710 {
1711         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1712
1713         return dd->misc_err_status_cnt[2];
1714 }
1715
1716 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1717                                                  void *context, int vl,
1718                                                  int mode, u64 data)
1719 {
1720         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1721
1722         return dd->misc_err_status_cnt[1];
1723 }
1724
1725 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1726                                           void *context, int vl, int mode,
1727                                           u64 data)
1728 {
1729         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1730
1731         return dd->misc_err_status_cnt[0];
1732 }
1733
1734 /*
1735  * Software counter for the aggregate of
1736  * individual CceErrStatus counters
1737  */
1738 static u64 access_sw_cce_err_status_aggregated_cnt(
1739                                 const struct cntr_entry *entry,
1740                                 void *context, int vl, int mode, u64 data)
1741 {
1742         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1743
1744         return dd->sw_cce_err_status_aggregate;
1745 }
1746
1747 /*
1748  * Software counters corresponding to each of the
1749  * error status bits within CceErrStatus
1750  */
1751 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1752                                               void *context, int vl, int mode,
1753                                               u64 data)
1754 {
1755         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1756
1757         return dd->cce_err_status_cnt[40];
1758 }
1759
1760 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1761                                           void *context, int vl, int mode,
1762                                           u64 data)
1763 {
1764         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1765
1766         return dd->cce_err_status_cnt[39];
1767 }
1768
1769 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1770                                           void *context, int vl, int mode,
1771                                           u64 data)
1772 {
1773         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1774
1775         return dd->cce_err_status_cnt[38];
1776 }
1777
1778 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1779                                              void *context, int vl, int mode,
1780                                              u64 data)
1781 {
1782         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1783
1784         return dd->cce_err_status_cnt[37];
1785 }
1786
1787 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1788                                              void *context, int vl, int mode,
1789                                              u64 data)
1790 {
1791         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1792
1793         return dd->cce_err_status_cnt[36];
1794 }
1795
1796 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1797                                 const struct cntr_entry *entry,
1798                                 void *context, int vl, int mode, u64 data)
1799 {
1800         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1801
1802         return dd->cce_err_status_cnt[35];
1803 }
1804
1805 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1806                                 const struct cntr_entry *entry,
1807                                 void *context, int vl, int mode, u64 data)
1808 {
1809         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1810
1811         return dd->cce_err_status_cnt[34];
1812 }
1813
1814 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1815                                                  void *context, int vl,
1816                                                  int mode, u64 data)
1817 {
1818         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1819
1820         return dd->cce_err_status_cnt[33];
1821 }
1822
1823 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1824                                                 void *context, int vl, int mode,
1825                                                 u64 data)
1826 {
1827         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1828
1829         return dd->cce_err_status_cnt[32];
1830 }
1831
1832 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1833                                    void *context, int vl, int mode, u64 data)
1834 {
1835         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1836
1837         return dd->cce_err_status_cnt[31];
1838 }
1839
1840 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1841                                                void *context, int vl, int mode,
1842                                                u64 data)
1843 {
1844         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1845
1846         return dd->cce_err_status_cnt[30];
1847 }
1848
1849 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1850                                               void *context, int vl, int mode,
1851                                               u64 data)
1852 {
1853         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1854
1855         return dd->cce_err_status_cnt[29];
1856 }
1857
1858 static u64 access_pcic_transmit_back_parity_err_cnt(
1859                                 const struct cntr_entry *entry,
1860                                 void *context, int vl, int mode, u64 data)
1861 {
1862         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1863
1864         return dd->cce_err_status_cnt[28];
1865 }
1866
1867 static u64 access_pcic_transmit_front_parity_err_cnt(
1868                                 const struct cntr_entry *entry,
1869                                 void *context, int vl, int mode, u64 data)
1870 {
1871         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1872
1873         return dd->cce_err_status_cnt[27];
1874 }
1875
1876 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1877                                              void *context, int vl, int mode,
1878                                              u64 data)
1879 {
1880         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1881
1882         return dd->cce_err_status_cnt[26];
1883 }
1884
1885 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1886                                             void *context, int vl, int mode,
1887                                             u64 data)
1888 {
1889         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1890
1891         return dd->cce_err_status_cnt[25];
1892 }
1893
1894 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1895                                               void *context, int vl, int mode,
1896                                               u64 data)
1897 {
1898         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1899
1900         return dd->cce_err_status_cnt[24];
1901 }
1902
1903 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1904                                              void *context, int vl, int mode,
1905                                              u64 data)
1906 {
1907         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1908
1909         return dd->cce_err_status_cnt[23];
1910 }
1911
1912 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1913                                                  void *context, int vl,
1914                                                  int mode, u64 data)
1915 {
1916         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1917
1918         return dd->cce_err_status_cnt[22];
1919 }
1920
1921 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1922                                          void *context, int vl, int mode,
1923                                          u64 data)
1924 {
1925         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1926
1927         return dd->cce_err_status_cnt[21];
1928 }
1929
1930 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1931                                 const struct cntr_entry *entry,
1932                                 void *context, int vl, int mode, u64 data)
1933 {
1934         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1935
1936         return dd->cce_err_status_cnt[20];
1937 }
1938
1939 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1940                                                  void *context, int vl,
1941                                                  int mode, u64 data)
1942 {
1943         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1944
1945         return dd->cce_err_status_cnt[19];
1946 }
1947
1948 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1949                                              void *context, int vl, int mode,
1950                                              u64 data)
1951 {
1952         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1953
1954         return dd->cce_err_status_cnt[18];
1955 }
1956
1957 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1958                                             void *context, int vl, int mode,
1959                                             u64 data)
1960 {
1961         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1962
1963         return dd->cce_err_status_cnt[17];
1964 }
1965
1966 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1967                                               void *context, int vl, int mode,
1968                                               u64 data)
1969 {
1970         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1971
1972         return dd->cce_err_status_cnt[16];
1973 }
1974
1975 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1976                                              void *context, int vl, int mode,
1977                                              u64 data)
1978 {
1979         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1980
1981         return dd->cce_err_status_cnt[15];
1982 }
1983
1984 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1985                                                  void *context, int vl,
1986                                                  int mode, u64 data)
1987 {
1988         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1989
1990         return dd->cce_err_status_cnt[14];
1991 }
1992
1993 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1994                                              void *context, int vl, int mode,
1995                                              u64 data)
1996 {
1997         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1998
1999         return dd->cce_err_status_cnt[13];
2000 }
2001
2002 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2003                                 const struct cntr_entry *entry,
2004                                 void *context, int vl, int mode, u64 data)
2005 {
2006         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2007
2008         return dd->cce_err_status_cnt[12];
2009 }
2010
2011 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2012                                 const struct cntr_entry *entry,
2013                                 void *context, int vl, int mode, u64 data)
2014 {
2015         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2016
2017         return dd->cce_err_status_cnt[11];
2018 }
2019
2020 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2021                                 const struct cntr_entry *entry,
2022                                 void *context, int vl, int mode, u64 data)
2023 {
2024         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2025
2026         return dd->cce_err_status_cnt[10];
2027 }
2028
2029 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2030                                 const struct cntr_entry *entry,
2031                                 void *context, int vl, int mode, u64 data)
2032 {
2033         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2034
2035         return dd->cce_err_status_cnt[9];
2036 }
2037
2038 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2039                                 const struct cntr_entry *entry,
2040                                 void *context, int vl, int mode, u64 data)
2041 {
2042         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2043
2044         return dd->cce_err_status_cnt[8];
2045 }
2046
2047 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2048                                                  void *context, int vl,
2049                                                  int mode, u64 data)
2050 {
2051         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2052
2053         return dd->cce_err_status_cnt[7];
2054 }
2055
2056 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2057                                 const struct cntr_entry *entry,
2058                                 void *context, int vl, int mode, u64 data)
2059 {
2060         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2061
2062         return dd->cce_err_status_cnt[6];
2063 }
2064
2065 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2066                                                void *context, int vl, int mode,
2067                                                u64 data)
2068 {
2069         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2070
2071         return dd->cce_err_status_cnt[5];
2072 }
2073
2074 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2075                                           void *context, int vl, int mode,
2076                                           u64 data)
2077 {
2078         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2079
2080         return dd->cce_err_status_cnt[4];
2081 }
2082
2083 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2084                                 const struct cntr_entry *entry,
2085                                 void *context, int vl, int mode, u64 data)
2086 {
2087         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2088
2089         return dd->cce_err_status_cnt[3];
2090 }
2091
2092 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2093                                                  void *context, int vl,
2094                                                  int mode, u64 data)
2095 {
2096         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2097
2098         return dd->cce_err_status_cnt[2];
2099 }
2100
2101 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2102                                                 void *context, int vl,
2103                                                 int mode, u64 data)
2104 {
2105         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2106
2107         return dd->cce_err_status_cnt[1];
2108 }
2109
2110 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2111                                          void *context, int vl, int mode,
2112                                          u64 data)
2113 {
2114         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2115
2116         return dd->cce_err_status_cnt[0];
2117 }
2118
2119 /*
2120  * Software counters corresponding to each of the
2121  * error status bits within RcvErrStatus
2122  */
2123 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2124                                         void *context, int vl, int mode,
2125                                         u64 data)
2126 {
2127         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2128
2129         return dd->rcv_err_status_cnt[63];
2130 }
2131
2132 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2133                                                 void *context, int vl,
2134                                                 int mode, u64 data)
2135 {
2136         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2137
2138         return dd->rcv_err_status_cnt[62];
2139 }
2140
2141 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2142                                                void *context, int vl, int mode,
2143                                                u64 data)
2144 {
2145         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2146
2147         return dd->rcv_err_status_cnt[61];
2148 }
2149
2150 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2151                                          void *context, int vl, int mode,
2152                                          u64 data)
2153 {
2154         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2155
2156         return dd->rcv_err_status_cnt[60];
2157 }
2158
2159 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2160                                                  void *context, int vl,
2161                                                  int mode, u64 data)
2162 {
2163         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2164
2165         return dd->rcv_err_status_cnt[59];
2166 }
2167
2168 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2169                                                  void *context, int vl,
2170                                                  int mode, u64 data)
2171 {
2172         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2173
2174         return dd->rcv_err_status_cnt[58];
2175 }
2176
2177 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2178                                             void *context, int vl, int mode,
2179                                             u64 data)
2180 {
2181         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2182
2183         return dd->rcv_err_status_cnt[57];
2184 }
2185
2186 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2187                                            void *context, int vl, int mode,
2188                                            u64 data)
2189 {
2190         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2191
2192         return dd->rcv_err_status_cnt[56];
2193 }
2194
2195 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2196                                            void *context, int vl, int mode,
2197                                            u64 data)
2198 {
2199         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2200
2201         return dd->rcv_err_status_cnt[55];
2202 }
2203
2204 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2205                                 const struct cntr_entry *entry,
2206                                 void *context, int vl, int mode, u64 data)
2207 {
2208         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2209
2210         return dd->rcv_err_status_cnt[54];
2211 }
2212
2213 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2214                                 const struct cntr_entry *entry,
2215                                 void *context, int vl, int mode, u64 data)
2216 {
2217         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2218
2219         return dd->rcv_err_status_cnt[53];
2220 }
2221
2222 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2223                                                  void *context, int vl,
2224                                                  int mode, u64 data)
2225 {
2226         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2227
2228         return dd->rcv_err_status_cnt[52];
2229 }
2230
2231 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2232                                                  void *context, int vl,
2233                                                  int mode, u64 data)
2234 {
2235         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2236
2237         return dd->rcv_err_status_cnt[51];
2238 }
2239
2240 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2241                                                  void *context, int vl,
2242                                                  int mode, u64 data)
2243 {
2244         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2245
2246         return dd->rcv_err_status_cnt[50];
2247 }
2248
2249 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2250                                                  void *context, int vl,
2251                                                  int mode, u64 data)
2252 {
2253         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2254
2255         return dd->rcv_err_status_cnt[49];
2256 }
2257
2258 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2259                                                  void *context, int vl,
2260                                                  int mode, u64 data)
2261 {
2262         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2263
2264         return dd->rcv_err_status_cnt[48];
2265 }
2266
2267 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2268                                                  void *context, int vl,
2269                                                  int mode, u64 data)
2270 {
2271         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2272
2273         return dd->rcv_err_status_cnt[47];
2274 }
2275
2276 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2277                                          void *context, int vl, int mode,
2278                                          u64 data)
2279 {
2280         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2281
2282         return dd->rcv_err_status_cnt[46];
2283 }
2284
2285 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2286                                 const struct cntr_entry *entry,
2287                                 void *context, int vl, int mode, u64 data)
2288 {
2289         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2290
2291         return dd->rcv_err_status_cnt[45];
2292 }
2293
2294 static u64 access_rx_lookup_csr_parity_err_cnt(
2295                                 const struct cntr_entry *entry,
2296                                 void *context, int vl, int mode, u64 data)
2297 {
2298         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2299
2300         return dd->rcv_err_status_cnt[44];
2301 }
2302
2303 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2304                                 const struct cntr_entry *entry,
2305                                 void *context, int vl, int mode, u64 data)
2306 {
2307         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2308
2309         return dd->rcv_err_status_cnt[43];
2310 }
2311
2312 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2313                                 const struct cntr_entry *entry,
2314                                 void *context, int vl, int mode, u64 data)
2315 {
2316         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2317
2318         return dd->rcv_err_status_cnt[42];
2319 }
2320
2321 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2322                                 const struct cntr_entry *entry,
2323                                 void *context, int vl, int mode, u64 data)
2324 {
2325         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2326
2327         return dd->rcv_err_status_cnt[41];
2328 }
2329
2330 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2331                                 const struct cntr_entry *entry,
2332                                 void *context, int vl, int mode, u64 data)
2333 {
2334         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2335
2336         return dd->rcv_err_status_cnt[40];
2337 }
2338
2339 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2340                                 const struct cntr_entry *entry,
2341                                 void *context, int vl, int mode, u64 data)
2342 {
2343         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2344
2345         return dd->rcv_err_status_cnt[39];
2346 }
2347
2348 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2349                                 const struct cntr_entry *entry,
2350                                 void *context, int vl, int mode, u64 data)
2351 {
2352         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2353
2354         return dd->rcv_err_status_cnt[38];
2355 }
2356
2357 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2358                                 const struct cntr_entry *entry,
2359                                 void *context, int vl, int mode, u64 data)
2360 {
2361         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2362
2363         return dd->rcv_err_status_cnt[37];
2364 }
2365
2366 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2367                                 const struct cntr_entry *entry,
2368                                 void *context, int vl, int mode, u64 data)
2369 {
2370         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2371
2372         return dd->rcv_err_status_cnt[36];
2373 }
2374
2375 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2376                                 const struct cntr_entry *entry,
2377                                 void *context, int vl, int mode, u64 data)
2378 {
2379         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2380
2381         return dd->rcv_err_status_cnt[35];
2382 }
2383
2384 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2385                                 const struct cntr_entry *entry,
2386                                 void *context, int vl, int mode, u64 data)
2387 {
2388         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2389
2390         return dd->rcv_err_status_cnt[34];
2391 }
2392
2393 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2394                                 const struct cntr_entry *entry,
2395                                 void *context, int vl, int mode, u64 data)
2396 {
2397         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2398
2399         return dd->rcv_err_status_cnt[33];
2400 }
2401
2402 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2403                                         void *context, int vl, int mode,
2404                                         u64 data)
2405 {
2406         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2407
2408         return dd->rcv_err_status_cnt[32];
2409 }
2410
2411 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2412                                        void *context, int vl, int mode,
2413                                        u64 data)
2414 {
2415         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2416
2417         return dd->rcv_err_status_cnt[31];
2418 }
2419
2420 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2421                                           void *context, int vl, int mode,
2422                                           u64 data)
2423 {
2424         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2425
2426         return dd->rcv_err_status_cnt[30];
2427 }
2428
2429 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2430                                              void *context, int vl, int mode,
2431                                              u64 data)
2432 {
2433         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2434
2435         return dd->rcv_err_status_cnt[29];
2436 }
2437
2438 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2439                                                  void *context, int vl,
2440                                                  int mode, u64 data)
2441 {
2442         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2443
2444         return dd->rcv_err_status_cnt[28];
2445 }
2446
2447 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2448                                 const struct cntr_entry *entry,
2449                                 void *context, int vl, int mode, u64 data)
2450 {
2451         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2452
2453         return dd->rcv_err_status_cnt[27];
2454 }
2455
2456 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2457                                 const struct cntr_entry *entry,
2458                                 void *context, int vl, int mode, u64 data)
2459 {
2460         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2461
2462         return dd->rcv_err_status_cnt[26];
2463 }
2464
2465 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2466                                 const struct cntr_entry *entry,
2467                                 void *context, int vl, int mode, u64 data)
2468 {
2469         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2470
2471         return dd->rcv_err_status_cnt[25];
2472 }
2473
2474 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2475                                 const struct cntr_entry *entry,
2476                                 void *context, int vl, int mode, u64 data)
2477 {
2478         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2479
2480         return dd->rcv_err_status_cnt[24];
2481 }
2482
2483 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2484                                 const struct cntr_entry *entry,
2485                                 void *context, int vl, int mode, u64 data)
2486 {
2487         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2488
2489         return dd->rcv_err_status_cnt[23];
2490 }
2491
2492 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2493                                 const struct cntr_entry *entry,
2494                                 void *context, int vl, int mode, u64 data)
2495 {
2496         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2497
2498         return dd->rcv_err_status_cnt[22];
2499 }
2500
2501 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2502                                 const struct cntr_entry *entry,
2503                                 void *context, int vl, int mode, u64 data)
2504 {
2505         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2506
2507         return dd->rcv_err_status_cnt[21];
2508 }
2509
2510 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2511                                 const struct cntr_entry *entry,
2512                                 void *context, int vl, int mode, u64 data)
2513 {
2514         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2515
2516         return dd->rcv_err_status_cnt[20];
2517 }
2518
2519 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2520                                 const struct cntr_entry *entry,
2521                                 void *context, int vl, int mode, u64 data)
2522 {
2523         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2524
2525         return dd->rcv_err_status_cnt[19];
2526 }
2527
2528 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2529                                                  void *context, int vl,
2530                                                  int mode, u64 data)
2531 {
2532         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2533
2534         return dd->rcv_err_status_cnt[18];
2535 }
2536
2537 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2538                                                  void *context, int vl,
2539                                                  int mode, u64 data)
2540 {
2541         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2542
2543         return dd->rcv_err_status_cnt[17];
2544 }
2545
2546 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2547                                 const struct cntr_entry *entry,
2548                                 void *context, int vl, int mode, u64 data)
2549 {
2550         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2551
2552         return dd->rcv_err_status_cnt[16];
2553 }
2554
2555 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2556                                 const struct cntr_entry *entry,
2557                                 void *context, int vl, int mode, u64 data)
2558 {
2559         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2560
2561         return dd->rcv_err_status_cnt[15];
2562 }
2563
2564 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2565                                                 void *context, int vl,
2566                                                 int mode, u64 data)
2567 {
2568         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2569
2570         return dd->rcv_err_status_cnt[14];
2571 }
2572
2573 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2574                                                 void *context, int vl,
2575                                                 int mode, u64 data)
2576 {
2577         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2578
2579         return dd->rcv_err_status_cnt[13];
2580 }
2581
2582 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2583                                               void *context, int vl, int mode,
2584                                               u64 data)
2585 {
2586         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2587
2588         return dd->rcv_err_status_cnt[12];
2589 }
2590
2591 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2592                                           void *context, int vl, int mode,
2593                                           u64 data)
2594 {
2595         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2596
2597         return dd->rcv_err_status_cnt[11];
2598 }
2599
2600 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2601                                           void *context, int vl, int mode,
2602                                           u64 data)
2603 {
2604         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2605
2606         return dd->rcv_err_status_cnt[10];
2607 }
2608
2609 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2610                                                void *context, int vl, int mode,
2611                                                u64 data)
2612 {
2613         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2614
2615         return dd->rcv_err_status_cnt[9];
2616 }
2617
2618 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2619                                             void *context, int vl, int mode,
2620                                             u64 data)
2621 {
2622         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2623
2624         return dd->rcv_err_status_cnt[8];
2625 }
2626
2627 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2628                                 const struct cntr_entry *entry,
2629                                 void *context, int vl, int mode, u64 data)
2630 {
2631         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2632
2633         return dd->rcv_err_status_cnt[7];
2634 }
2635
2636 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2637                                 const struct cntr_entry *entry,
2638                                 void *context, int vl, int mode, u64 data)
2639 {
2640         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2641
2642         return dd->rcv_err_status_cnt[6];
2643 }
2644
2645 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2646                                           void *context, int vl, int mode,
2647                                           u64 data)
2648 {
2649         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2650
2651         return dd->rcv_err_status_cnt[5];
2652 }
2653
2654 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2655                                           void *context, int vl, int mode,
2656                                           u64 data)
2657 {
2658         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2659
2660         return dd->rcv_err_status_cnt[4];
2661 }
2662
2663 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2664                                          void *context, int vl, int mode,
2665                                          u64 data)
2666 {
2667         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2668
2669         return dd->rcv_err_status_cnt[3];
2670 }
2671
2672 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2673                                          void *context, int vl, int mode,
2674                                          u64 data)
2675 {
2676         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2677
2678         return dd->rcv_err_status_cnt[2];
2679 }
2680
2681 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2682                                             void *context, int vl, int mode,
2683                                             u64 data)
2684 {
2685         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2686
2687         return dd->rcv_err_status_cnt[1];
2688 }
2689
2690 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2691                                          void *context, int vl, int mode,
2692                                          u64 data)
2693 {
2694         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2695
2696         return dd->rcv_err_status_cnt[0];
2697 }
2698
2699 /*
2700  * Software counters corresponding to each of the
2701  * error status bits within SendPioErrStatus
2702  */
2703 static u64 access_pio_pec_sop_head_parity_err_cnt(
2704                                 const struct cntr_entry *entry,
2705                                 void *context, int vl, int mode, u64 data)
2706 {
2707         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2708
2709         return dd->send_pio_err_status_cnt[35];
2710 }
2711
2712 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2713                                 const struct cntr_entry *entry,
2714                                 void *context, int vl, int mode, u64 data)
2715 {
2716         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2717
2718         return dd->send_pio_err_status_cnt[34];
2719 }
2720
2721 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2722                                 const struct cntr_entry *entry,
2723                                 void *context, int vl, int mode, u64 data)
2724 {
2725         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2726
2727         return dd->send_pio_err_status_cnt[33];
2728 }
2729
2730 static u64 access_pio_current_free_cnt_parity_err_cnt(
2731                                 const struct cntr_entry *entry,
2732                                 void *context, int vl, int mode, u64 data)
2733 {
2734         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2735
2736         return dd->send_pio_err_status_cnt[32];
2737 }
2738
2739 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2740                                           void *context, int vl, int mode,
2741                                           u64 data)
2742 {
2743         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2744
2745         return dd->send_pio_err_status_cnt[31];
2746 }
2747
2748 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2749                                           void *context, int vl, int mode,
2750                                           u64 data)
2751 {
2752         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2753
2754         return dd->send_pio_err_status_cnt[30];
2755 }
2756
2757 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2758                                            void *context, int vl, int mode,
2759                                            u64 data)
2760 {
2761         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2762
2763         return dd->send_pio_err_status_cnt[29];
2764 }
2765
2766 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2767                                 const struct cntr_entry *entry,
2768                                 void *context, int vl, int mode, u64 data)
2769 {
2770         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2771
2772         return dd->send_pio_err_status_cnt[28];
2773 }
2774
2775 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2776                                              void *context, int vl, int mode,
2777                                              u64 data)
2778 {
2779         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2780
2781         return dd->send_pio_err_status_cnt[27];
2782 }
2783
2784 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2785                                              void *context, int vl, int mode,
2786                                              u64 data)
2787 {
2788         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2789
2790         return dd->send_pio_err_status_cnt[26];
2791 }
2792
2793 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2794                                                 void *context, int vl,
2795                                                 int mode, u64 data)
2796 {
2797         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2798
2799         return dd->send_pio_err_status_cnt[25];
2800 }
2801
2802 static u64 access_pio_block_qw_count_parity_err_cnt(
2803                                 const struct cntr_entry *entry,
2804                                 void *context, int vl, int mode, u64 data)
2805 {
2806         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2807
2808         return dd->send_pio_err_status_cnt[24];
2809 }
2810
2811 static u64 access_pio_write_qw_valid_parity_err_cnt(
2812                                 const struct cntr_entry *entry,
2813                                 void *context, int vl, int mode, u64 data)
2814 {
2815         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2816
2817         return dd->send_pio_err_status_cnt[23];
2818 }
2819
2820 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2821                                             void *context, int vl, int mode,
2822                                             u64 data)
2823 {
2824         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2825
2826         return dd->send_pio_err_status_cnt[22];
2827 }
2828
2829 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2830                                                 void *context, int vl,
2831                                                 int mode, u64 data)
2832 {
2833         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2834
2835         return dd->send_pio_err_status_cnt[21];
2836 }
2837
2838 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2839                                                 void *context, int vl,
2840                                                 int mode, u64 data)
2841 {
2842         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2843
2844         return dd->send_pio_err_status_cnt[20];
2845 }
2846
2847 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2848                                                 void *context, int vl,
2849                                                 int mode, u64 data)
2850 {
2851         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2852
2853         return dd->send_pio_err_status_cnt[19];
2854 }
2855
2856 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2857                                 const struct cntr_entry *entry,
2858                                 void *context, int vl, int mode, u64 data)
2859 {
2860         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2861
2862         return dd->send_pio_err_status_cnt[18];
2863 }
2864
2865 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2866                                          void *context, int vl, int mode,
2867                                          u64 data)
2868 {
2869         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2870
2871         return dd->send_pio_err_status_cnt[17];
2872 }
2873
2874 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2875                                             void *context, int vl, int mode,
2876                                             u64 data)
2877 {
2878         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2879
2880         return dd->send_pio_err_status_cnt[16];
2881 }
2882
2883 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2884                                 const struct cntr_entry *entry,
2885                                 void *context, int vl, int mode, u64 data)
2886 {
2887         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2888
2889         return dd->send_pio_err_status_cnt[15];
2890 }
2891
2892 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2893                                 const struct cntr_entry *entry,
2894                                 void *context, int vl, int mode, u64 data)
2895 {
2896         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2897
2898         return dd->send_pio_err_status_cnt[14];
2899 }
2900
2901 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2902                                 const struct cntr_entry *entry,
2903                                 void *context, int vl, int mode, u64 data)
2904 {
2905         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2906
2907         return dd->send_pio_err_status_cnt[13];
2908 }
2909
2910 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2911                                 const struct cntr_entry *entry,
2912                                 void *context, int vl, int mode, u64 data)
2913 {
2914         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2915
2916         return dd->send_pio_err_status_cnt[12];
2917 }
2918
2919 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2920                                 const struct cntr_entry *entry,
2921                                 void *context, int vl, int mode, u64 data)
2922 {
2923         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2924
2925         return dd->send_pio_err_status_cnt[11];
2926 }
2927
2928 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2929                                 const struct cntr_entry *entry,
2930                                 void *context, int vl, int mode, u64 data)
2931 {
2932         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2933
2934         return dd->send_pio_err_status_cnt[10];
2935 }
2936
2937 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2938                                 const struct cntr_entry *entry,
2939                                 void *context, int vl, int mode, u64 data)
2940 {
2941         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2942
2943         return dd->send_pio_err_status_cnt[9];
2944 }
2945
2946 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2947                                 const struct cntr_entry *entry,
2948                                 void *context, int vl, int mode, u64 data)
2949 {
2950         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2951
2952         return dd->send_pio_err_status_cnt[8];
2953 }
2954
2955 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2956                                 const struct cntr_entry *entry,
2957                                 void *context, int vl, int mode, u64 data)
2958 {
2959         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2960
2961         return dd->send_pio_err_status_cnt[7];
2962 }
2963
2964 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2965                                               void *context, int vl, int mode,
2966                                               u64 data)
2967 {
2968         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2969
2970         return dd->send_pio_err_status_cnt[6];
2971 }
2972
2973 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2974                                               void *context, int vl, int mode,
2975                                               u64 data)
2976 {
2977         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2978
2979         return dd->send_pio_err_status_cnt[5];
2980 }
2981
2982 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2983                                            void *context, int vl, int mode,
2984                                            u64 data)
2985 {
2986         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2987
2988         return dd->send_pio_err_status_cnt[4];
2989 }
2990
2991 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2992                                            void *context, int vl, int mode,
2993                                            u64 data)
2994 {
2995         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2996
2997         return dd->send_pio_err_status_cnt[3];
2998 }
2999
3000 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3001                                          void *context, int vl, int mode,
3002                                          u64 data)
3003 {
3004         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3005
3006         return dd->send_pio_err_status_cnt[2];
3007 }
3008
3009 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3010                                                 void *context, int vl,
3011                                                 int mode, u64 data)
3012 {
3013         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3014
3015         return dd->send_pio_err_status_cnt[1];
3016 }
3017
3018 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3019                                              void *context, int vl, int mode,
3020                                              u64 data)
3021 {
3022         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3023
3024         return dd->send_pio_err_status_cnt[0];
3025 }
3026
3027 /*
3028  * Software counters corresponding to each of the
3029  * error status bits within SendDmaErrStatus
3030  */
3031 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3032                                 const struct cntr_entry *entry,
3033                                 void *context, int vl, int mode, u64 data)
3034 {
3035         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3036
3037         return dd->send_dma_err_status_cnt[3];
3038 }
3039
3040 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3041                                 const struct cntr_entry *entry,
3042                                 void *context, int vl, int mode, u64 data)
3043 {
3044         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3045
3046         return dd->send_dma_err_status_cnt[2];
3047 }
3048
3049 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3050                                           void *context, int vl, int mode,
3051                                           u64 data)
3052 {
3053         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3054
3055         return dd->send_dma_err_status_cnt[1];
3056 }
3057
3058 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3059                                        void *context, int vl, int mode,
3060                                        u64 data)
3061 {
3062         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3063
3064         return dd->send_dma_err_status_cnt[0];
3065 }
3066
3067 /*
3068  * Software counters corresponding to each of the
3069  * error status bits within SendEgressErrStatus
3070  */
3071 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3072                                 const struct cntr_entry *entry,
3073                                 void *context, int vl, int mode, u64 data)
3074 {
3075         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3076
3077         return dd->send_egress_err_status_cnt[63];
3078 }
3079
3080 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3081                                 const struct cntr_entry *entry,
3082                                 void *context, int vl, int mode, u64 data)
3083 {
3084         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3085
3086         return dd->send_egress_err_status_cnt[62];
3087 }
3088
3089 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3090                                              void *context, int vl, int mode,
3091                                              u64 data)
3092 {
3093         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3094
3095         return dd->send_egress_err_status_cnt[61];
3096 }
3097
3098 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3099                                                  void *context, int vl,
3100                                                  int mode, u64 data)
3101 {
3102         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3103
3104         return dd->send_egress_err_status_cnt[60];
3105 }
3106
3107 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3108                                 const struct cntr_entry *entry,
3109                                 void *context, int vl, int mode, u64 data)
3110 {
3111         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3112
3113         return dd->send_egress_err_status_cnt[59];
3114 }
3115
3116 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3117                                         void *context, int vl, int mode,
3118                                         u64 data)
3119 {
3120         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3121
3122         return dd->send_egress_err_status_cnt[58];
3123 }
3124
3125 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3126                                             void *context, int vl, int mode,
3127                                             u64 data)
3128 {
3129         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3130
3131         return dd->send_egress_err_status_cnt[57];
3132 }
3133
3134 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3135                                               void *context, int vl, int mode,
3136                                               u64 data)
3137 {
3138         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3139
3140         return dd->send_egress_err_status_cnt[56];
3141 }
3142
3143 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3144                                               void *context, int vl, int mode,
3145                                               u64 data)
3146 {
3147         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3148
3149         return dd->send_egress_err_status_cnt[55];
3150 }
3151
3152 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3153                                               void *context, int vl, int mode,
3154                                               u64 data)
3155 {
3156         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3157
3158         return dd->send_egress_err_status_cnt[54];
3159 }
3160
3161 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3162                                               void *context, int vl, int mode,
3163                                               u64 data)
3164 {
3165         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3166
3167         return dd->send_egress_err_status_cnt[53];
3168 }
3169
3170 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3171                                               void *context, int vl, int mode,
3172                                               u64 data)
3173 {
3174         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3175
3176         return dd->send_egress_err_status_cnt[52];
3177 }
3178
3179 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3180                                               void *context, int vl, int mode,
3181                                               u64 data)
3182 {
3183         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3184
3185         return dd->send_egress_err_status_cnt[51];
3186 }
3187
3188 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3189                                               void *context, int vl, int mode,
3190                                               u64 data)
3191 {
3192         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3193
3194         return dd->send_egress_err_status_cnt[50];
3195 }
3196
3197 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3198                                               void *context, int vl, int mode,
3199                                               u64 data)
3200 {
3201         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3202
3203         return dd->send_egress_err_status_cnt[49];
3204 }
3205
3206 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3207                                               void *context, int vl, int mode,
3208                                               u64 data)
3209 {
3210         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3211
3212         return dd->send_egress_err_status_cnt[48];
3213 }
3214
3215 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3216                                               void *context, int vl, int mode,
3217                                               u64 data)
3218 {
3219         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3220
3221         return dd->send_egress_err_status_cnt[47];
3222 }
3223
3224 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3225                                             void *context, int vl, int mode,
3226                                             u64 data)
3227 {
3228         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3229
3230         return dd->send_egress_err_status_cnt[46];
3231 }
3232
3233 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3234                                              void *context, int vl, int mode,
3235                                              u64 data)
3236 {
3237         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3238
3239         return dd->send_egress_err_status_cnt[45];
3240 }
3241
3242 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3243                                                  void *context, int vl,
3244                                                  int mode, u64 data)
3245 {
3246         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3247
3248         return dd->send_egress_err_status_cnt[44];
3249 }
3250
3251 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3252                                 const struct cntr_entry *entry,
3253                                 void *context, int vl, int mode, u64 data)
3254 {
3255         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3256
3257         return dd->send_egress_err_status_cnt[43];
3258 }
3259
3260 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3261                                         void *context, int vl, int mode,
3262                                         u64 data)
3263 {
3264         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3265
3266         return dd->send_egress_err_status_cnt[42];
3267 }
3268
3269 static u64 access_tx_credit_return_partiy_err_cnt(
3270                                 const struct cntr_entry *entry,
3271                                 void *context, int vl, int mode, u64 data)
3272 {
3273         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3274
3275         return dd->send_egress_err_status_cnt[41];
3276 }
3277
3278 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3279                                 const struct cntr_entry *entry,
3280                                 void *context, int vl, int mode, u64 data)
3281 {
3282         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3283
3284         return dd->send_egress_err_status_cnt[40];
3285 }
3286
3287 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3288                                 const struct cntr_entry *entry,
3289                                 void *context, int vl, int mode, u64 data)
3290 {
3291         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3292
3293         return dd->send_egress_err_status_cnt[39];
3294 }
3295
3296 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3297                                 const struct cntr_entry *entry,
3298                                 void *context, int vl, int mode, u64 data)
3299 {
3300         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3301
3302         return dd->send_egress_err_status_cnt[38];
3303 }
3304
3305 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3306                                 const struct cntr_entry *entry,
3307                                 void *context, int vl, int mode, u64 data)
3308 {
3309         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3310
3311         return dd->send_egress_err_status_cnt[37];
3312 }
3313
3314 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3315                                 const struct cntr_entry *entry,
3316                                 void *context, int vl, int mode, u64 data)
3317 {
3318         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3319
3320         return dd->send_egress_err_status_cnt[36];
3321 }
3322
3323 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3324                                 const struct cntr_entry *entry,
3325                                 void *context, int vl, int mode, u64 data)
3326 {
3327         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3328
3329         return dd->send_egress_err_status_cnt[35];
3330 }
3331
3332 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3333                                 const struct cntr_entry *entry,
3334                                 void *context, int vl, int mode, u64 data)
3335 {
3336         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3337
3338         return dd->send_egress_err_status_cnt[34];
3339 }
3340
3341 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3342                                 const struct cntr_entry *entry,
3343                                 void *context, int vl, int mode, u64 data)
3344 {
3345         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3346
3347         return dd->send_egress_err_status_cnt[33];
3348 }
3349
3350 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3351                                 const struct cntr_entry *entry,
3352                                 void *context, int vl, int mode, u64 data)
3353 {
3354         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3355
3356         return dd->send_egress_err_status_cnt[32];
3357 }
3358
3359 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3360                                 const struct cntr_entry *entry,
3361                                 void *context, int vl, int mode, u64 data)
3362 {
3363         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3364
3365         return dd->send_egress_err_status_cnt[31];
3366 }
3367
3368 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3369                                 const struct cntr_entry *entry,
3370                                 void *context, int vl, int mode, u64 data)
3371 {
3372         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3373
3374         return dd->send_egress_err_status_cnt[30];
3375 }
3376
3377 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3378                                 const struct cntr_entry *entry,
3379                                 void *context, int vl, int mode, u64 data)
3380 {
3381         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3382
3383         return dd->send_egress_err_status_cnt[29];
3384 }
3385
3386 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3387                                 const struct cntr_entry *entry,
3388                                 void *context, int vl, int mode, u64 data)
3389 {
3390         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3391
3392         return dd->send_egress_err_status_cnt[28];
3393 }
3394
3395 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3396                                 const struct cntr_entry *entry,
3397                                 void *context, int vl, int mode, u64 data)
3398 {
3399         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3400
3401         return dd->send_egress_err_status_cnt[27];
3402 }
3403
3404 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3405                                 const struct cntr_entry *entry,
3406                                 void *context, int vl, int mode, u64 data)
3407 {
3408         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3409
3410         return dd->send_egress_err_status_cnt[26];
3411 }
3412
3413 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3414                                 const struct cntr_entry *entry,
3415                                 void *context, int vl, int mode, u64 data)
3416 {
3417         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3418
3419         return dd->send_egress_err_status_cnt[25];
3420 }
3421
3422 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3423                                 const struct cntr_entry *entry,
3424                                 void *context, int vl, int mode, u64 data)
3425 {
3426         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3427
3428         return dd->send_egress_err_status_cnt[24];
3429 }
3430
3431 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3432                                 const struct cntr_entry *entry,
3433                                 void *context, int vl, int mode, u64 data)
3434 {
3435         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3436
3437         return dd->send_egress_err_status_cnt[23];
3438 }
3439
3440 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3441                                 const struct cntr_entry *entry,
3442                                 void *context, int vl, int mode, u64 data)
3443 {
3444         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3445
3446         return dd->send_egress_err_status_cnt[22];
3447 }
3448
3449 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3450                                 const struct cntr_entry *entry,
3451                                 void *context, int vl, int mode, u64 data)
3452 {
3453         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3454
3455         return dd->send_egress_err_status_cnt[21];
3456 }
3457
3458 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3459                                 const struct cntr_entry *entry,
3460                                 void *context, int vl, int mode, u64 data)
3461 {
3462         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3463
3464         return dd->send_egress_err_status_cnt[20];
3465 }
3466
3467 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3468                                 const struct cntr_entry *entry,
3469                                 void *context, int vl, int mode, u64 data)
3470 {
3471         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3472
3473         return dd->send_egress_err_status_cnt[19];
3474 }
3475
3476 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3477                                 const struct cntr_entry *entry,
3478                                 void *context, int vl, int mode, u64 data)
3479 {
3480         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3481
3482         return dd->send_egress_err_status_cnt[18];
3483 }
3484
3485 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3486                                 const struct cntr_entry *entry,
3487                                 void *context, int vl, int mode, u64 data)
3488 {
3489         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3490
3491         return dd->send_egress_err_status_cnt[17];
3492 }
3493
3494 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3495                                 const struct cntr_entry *entry,
3496                                 void *context, int vl, int mode, u64 data)
3497 {
3498         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3499
3500         return dd->send_egress_err_status_cnt[16];
3501 }
3502
3503 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3504                                            void *context, int vl, int mode,
3505                                            u64 data)
3506 {
3507         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3508
3509         return dd->send_egress_err_status_cnt[15];
3510 }
3511
3512 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3513                                                  void *context, int vl,
3514                                                  int mode, u64 data)
3515 {
3516         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3517
3518         return dd->send_egress_err_status_cnt[14];
3519 }
3520
3521 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3522                                                void *context, int vl, int mode,
3523                                                u64 data)
3524 {
3525         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3526
3527         return dd->send_egress_err_status_cnt[13];
3528 }
3529
3530 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3531                                         void *context, int vl, int mode,
3532                                         u64 data)
3533 {
3534         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3535
3536         return dd->send_egress_err_status_cnt[12];
3537 }
3538
3539 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3540                                 const struct cntr_entry *entry,
3541                                 void *context, int vl, int mode, u64 data)
3542 {
3543         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3544
3545         return dd->send_egress_err_status_cnt[11];
3546 }
3547
3548 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3549                                              void *context, int vl, int mode,
3550                                              u64 data)
3551 {
3552         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3553
3554         return dd->send_egress_err_status_cnt[10];
3555 }
3556
3557 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3558                                             void *context, int vl, int mode,
3559                                             u64 data)
3560 {
3561         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3562
3563         return dd->send_egress_err_status_cnt[9];
3564 }
3565
3566 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3567                                 const struct cntr_entry *entry,
3568                                 void *context, int vl, int mode, u64 data)
3569 {
3570         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3571
3572         return dd->send_egress_err_status_cnt[8];
3573 }
3574
3575 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3576                                 const struct cntr_entry *entry,
3577                                 void *context, int vl, int mode, u64 data)
3578 {
3579         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3580
3581         return dd->send_egress_err_status_cnt[7];
3582 }
3583
3584 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3585                                             void *context, int vl, int mode,
3586                                             u64 data)
3587 {
3588         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3589
3590         return dd->send_egress_err_status_cnt[6];
3591 }
3592
3593 static u64 access_tx_incorrect_link_state_err_cnt(
3594                                 const struct cntr_entry *entry,
3595                                 void *context, int vl, int mode, u64 data)
3596 {
3597         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3598
3599         return dd->send_egress_err_status_cnt[5];
3600 }
3601
3602 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3603                                       void *context, int vl, int mode,
3604                                       u64 data)
3605 {
3606         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3607
3608         return dd->send_egress_err_status_cnt[4];
3609 }
3610
3611 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3612                                 const struct cntr_entry *entry,
3613                                 void *context, int vl, int mode, u64 data)
3614 {
3615         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3616
3617         return dd->send_egress_err_status_cnt[3];
3618 }
3619
3620 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3621                                             void *context, int vl, int mode,
3622                                             u64 data)
3623 {
3624         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3625
3626         return dd->send_egress_err_status_cnt[2];
3627 }
3628
3629 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3630                                 const struct cntr_entry *entry,
3631                                 void *context, int vl, int mode, u64 data)
3632 {
3633         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3634
3635         return dd->send_egress_err_status_cnt[1];
3636 }
3637
3638 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3639                                 const struct cntr_entry *entry,
3640                                 void *context, int vl, int mode, u64 data)
3641 {
3642         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3643
3644         return dd->send_egress_err_status_cnt[0];
3645 }
3646
3647 /*
3648  * Software counters corresponding to each of the
3649  * error status bits within SendErrStatus
3650  */
3651 static u64 access_send_csr_write_bad_addr_err_cnt(
3652                                 const struct cntr_entry *entry,
3653                                 void *context, int vl, int mode, u64 data)
3654 {
3655         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3656
3657         return dd->send_err_status_cnt[2];
3658 }
3659
3660 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3661                                                  void *context, int vl,
3662                                                  int mode, u64 data)
3663 {
3664         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3665
3666         return dd->send_err_status_cnt[1];
3667 }
3668
3669 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3670                                       void *context, int vl, int mode,
3671                                       u64 data)
3672 {
3673         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3674
3675         return dd->send_err_status_cnt[0];
3676 }
3677
3678 /*
3679  * Software counters corresponding to each of the
3680  * error status bits within SendCtxtErrStatus
3681  */
3682 static u64 access_pio_write_out_of_bounds_err_cnt(
3683                                 const struct cntr_entry *entry,
3684                                 void *context, int vl, int mode, u64 data)
3685 {
3686         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3687
3688         return dd->sw_ctxt_err_status_cnt[4];
3689 }
3690
3691 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3692                                              void *context, int vl, int mode,
3693                                              u64 data)
3694 {
3695         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3696
3697         return dd->sw_ctxt_err_status_cnt[3];
3698 }
3699
3700 static u64 access_pio_write_crosses_boundary_err_cnt(
3701                                 const struct cntr_entry *entry,
3702                                 void *context, int vl, int mode, u64 data)
3703 {
3704         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3705
3706         return dd->sw_ctxt_err_status_cnt[2];
3707 }
3708
3709 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3710                                                 void *context, int vl,
3711                                                 int mode, u64 data)
3712 {
3713         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3714
3715         return dd->sw_ctxt_err_status_cnt[1];
3716 }
3717
3718 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3719                                                void *context, int vl, int mode,
3720                                                u64 data)
3721 {
3722         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3723
3724         return dd->sw_ctxt_err_status_cnt[0];
3725 }
3726
3727 /*
3728  * Software counters corresponding to each of the
3729  * error status bits within SendDmaEngErrStatus
3730  */
3731 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3732                                 const struct cntr_entry *entry,
3733                                 void *context, int vl, int mode, u64 data)
3734 {
3735         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3736
3737         return dd->sw_send_dma_eng_err_status_cnt[23];
3738 }
3739
3740 static u64 access_sdma_header_storage_cor_err_cnt(
3741                                 const struct cntr_entry *entry,
3742                                 void *context, int vl, int mode, u64 data)
3743 {
3744         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3745
3746         return dd->sw_send_dma_eng_err_status_cnt[22];
3747 }
3748
3749 static u64 access_sdma_packet_tracking_cor_err_cnt(
3750                                 const struct cntr_entry *entry,
3751                                 void *context, int vl, int mode, u64 data)
3752 {
3753         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3754
3755         return dd->sw_send_dma_eng_err_status_cnt[21];
3756 }
3757
3758 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3759                                             void *context, int vl, int mode,
3760                                             u64 data)
3761 {
3762         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3763
3764         return dd->sw_send_dma_eng_err_status_cnt[20];
3765 }
3766
3767 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3768                                               void *context, int vl, int mode,
3769                                               u64 data)
3770 {
3771         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3772
3773         return dd->sw_send_dma_eng_err_status_cnt[19];
3774 }
3775
3776 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3777                                 const struct cntr_entry *entry,
3778                                 void *context, int vl, int mode, u64 data)
3779 {
3780         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3781
3782         return dd->sw_send_dma_eng_err_status_cnt[18];
3783 }
3784
3785 static u64 access_sdma_header_storage_unc_err_cnt(
3786                                 const struct cntr_entry *entry,
3787                                 void *context, int vl, int mode, u64 data)
3788 {
3789         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3790
3791         return dd->sw_send_dma_eng_err_status_cnt[17];
3792 }
3793
3794 static u64 access_sdma_packet_tracking_unc_err_cnt(
3795                                 const struct cntr_entry *entry,
3796                                 void *context, int vl, int mode, u64 data)
3797 {
3798         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3799
3800         return dd->sw_send_dma_eng_err_status_cnt[16];
3801 }
3802
3803 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3804                                             void *context, int vl, int mode,
3805                                             u64 data)
3806 {
3807         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3808
3809         return dd->sw_send_dma_eng_err_status_cnt[15];
3810 }
3811
3812 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3813                                               void *context, int vl, int mode,
3814                                               u64 data)
3815 {
3816         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3817
3818         return dd->sw_send_dma_eng_err_status_cnt[14];
3819 }
3820
3821 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3822                                        void *context, int vl, int mode,
3823                                        u64 data)
3824 {
3825         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3826
3827         return dd->sw_send_dma_eng_err_status_cnt[13];
3828 }
3829
3830 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3831                                              void *context, int vl, int mode,
3832                                              u64 data)
3833 {
3834         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3835
3836         return dd->sw_send_dma_eng_err_status_cnt[12];
3837 }
3838
3839 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3840                                               void *context, int vl, int mode,
3841                                               u64 data)
3842 {
3843         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3844
3845         return dd->sw_send_dma_eng_err_status_cnt[11];
3846 }
3847
3848 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3849                                              void *context, int vl, int mode,
3850                                              u64 data)
3851 {
3852         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3853
3854         return dd->sw_send_dma_eng_err_status_cnt[10];
3855 }
3856
3857 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3858                                           void *context, int vl, int mode,
3859                                           u64 data)
3860 {
3861         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3862
3863         return dd->sw_send_dma_eng_err_status_cnt[9];
3864 }
3865
3866 static u64 access_sdma_packet_desc_overflow_err_cnt(
3867                                 const struct cntr_entry *entry,
3868                                 void *context, int vl, int mode, u64 data)
3869 {
3870         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3871
3872         return dd->sw_send_dma_eng_err_status_cnt[8];
3873 }
3874
3875 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3876                                                void *context, int vl,
3877                                                int mode, u64 data)
3878 {
3879         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3880
3881         return dd->sw_send_dma_eng_err_status_cnt[7];
3882 }
3883
3884 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3885                                     void *context, int vl, int mode, u64 data)
3886 {
3887         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3888
3889         return dd->sw_send_dma_eng_err_status_cnt[6];
3890 }
3891
3892 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3893                                         void *context, int vl, int mode,
3894                                         u64 data)
3895 {
3896         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3897
3898         return dd->sw_send_dma_eng_err_status_cnt[5];
3899 }
3900
3901 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3902                                           void *context, int vl, int mode,
3903                                           u64 data)
3904 {
3905         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3906
3907         return dd->sw_send_dma_eng_err_status_cnt[4];
3908 }
3909
3910 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3911                                 const struct cntr_entry *entry,
3912                                 void *context, int vl, int mode, u64 data)
3913 {
3914         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3915
3916         return dd->sw_send_dma_eng_err_status_cnt[3];
3917 }
3918
3919 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3920                                         void *context, int vl, int mode,
3921                                         u64 data)
3922 {
3923         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3924
3925         return dd->sw_send_dma_eng_err_status_cnt[2];
3926 }
3927
3928 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3929                                             void *context, int vl, int mode,
3930                                             u64 data)
3931 {
3932         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3933
3934         return dd->sw_send_dma_eng_err_status_cnt[1];
3935 }
3936
3937 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3938                                         void *context, int vl, int mode,
3939                                         u64 data)
3940 {
3941         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3942
3943         return dd->sw_send_dma_eng_err_status_cnt[0];
3944 }
3945
3946 #define def_access_sw_cpu(cntr) \
3947 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
3948                               void *context, int vl, int mode, u64 data)      \
3949 {                                                                             \
3950         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
3951         return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
3952                               ppd->ibport_data.rvp.cntr, vl,                  \
3953                               mode, data);                                    \
3954 }
3955
3956 def_access_sw_cpu(rc_acks);
3957 def_access_sw_cpu(rc_qacks);
3958 def_access_sw_cpu(rc_delayed_comp);
3959
3960 #define def_access_ibp_counter(cntr) \
3961 static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
3962                                 void *context, int vl, int mode, u64 data)    \
3963 {                                                                             \
3964         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
3965                                                                               \
3966         if (vl != CNTR_INVALID_VL)                                            \
3967                 return 0;                                                     \
3968                                                                               \
3969         return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
3970                              mode, data);                                     \
3971 }
3972
3973 def_access_ibp_counter(loop_pkts);
3974 def_access_ibp_counter(rc_resends);
3975 def_access_ibp_counter(rnr_naks);
3976 def_access_ibp_counter(other_naks);
3977 def_access_ibp_counter(rc_timeouts);
3978 def_access_ibp_counter(pkt_drops);
3979 def_access_ibp_counter(dmawait);
3980 def_access_ibp_counter(rc_seqnak);
3981 def_access_ibp_counter(rc_dupreq);
3982 def_access_ibp_counter(rdma_seq);
3983 def_access_ibp_counter(unaligned);
3984 def_access_ibp_counter(seq_naks);
3985
3986 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3987 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3988 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3989                         CNTR_NORMAL),
3990 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3991                         CNTR_NORMAL),
3992 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3993                         RCV_TID_FLOW_GEN_MISMATCH_CNT,
3994                         CNTR_NORMAL),
3995 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
3996                         CNTR_NORMAL),
3997 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
3998                         RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
3999 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4000                         CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4001 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4002                         CNTR_NORMAL),
4003 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4004                         CNTR_NORMAL),
4005 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4006                         CNTR_NORMAL),
4007 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4008                         CNTR_NORMAL),
4009 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4010                         CNTR_NORMAL),
4011 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4012                         CNTR_NORMAL),
4013 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4014                         CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4015 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4016                         CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4017 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4018                               CNTR_SYNTH),
4019 [C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4020 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4021                                  CNTR_SYNTH),
4022 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4023                                   CNTR_SYNTH),
4024 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4025                                   CNTR_SYNTH),
4026 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4027                                    DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4028 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4029                                   DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4030                                   CNTR_SYNTH),
4031 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4032                                 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4033 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4034                                CNTR_SYNTH),
4035 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4036                               CNTR_SYNTH),
4037 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4038                                CNTR_SYNTH),
4039 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4040                                  CNTR_SYNTH),
4041 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4042                                 CNTR_SYNTH),
4043 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4044                                 CNTR_SYNTH),
4045 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4046                                CNTR_SYNTH),
4047 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4048                                  CNTR_SYNTH | CNTR_VL),
4049 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4050                                 CNTR_SYNTH | CNTR_VL),
4051 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4052 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4053                                  CNTR_SYNTH | CNTR_VL),
4054 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4055 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4056                                  CNTR_SYNTH | CNTR_VL),
4057 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4058                               CNTR_SYNTH),
4059 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4060                                  CNTR_SYNTH | CNTR_VL),
4061 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4062                                 CNTR_SYNTH),
4063 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4064                                    CNTR_SYNTH | CNTR_VL),
4065 [C_DC_TOTAL_CRC] =
4066         DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4067                          CNTR_SYNTH),
4068 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4069                                   CNTR_SYNTH),
4070 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4071                                   CNTR_SYNTH),
4072 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4073                                   CNTR_SYNTH),
4074 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4075                                   CNTR_SYNTH),
4076 [C_DC_CRC_MULT_LN] =
4077         DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4078                          CNTR_SYNTH),
4079 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4080                                     CNTR_SYNTH),
4081 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4082                                     CNTR_SYNTH),
4083 [C_DC_SEQ_CRC_CNT] =
4084         DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4085                          CNTR_SYNTH),
4086 [C_DC_ESC0_ONLY_CNT] =
4087         DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4088                          CNTR_SYNTH),
4089 [C_DC_ESC0_PLUS1_CNT] =
4090         DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4091                          CNTR_SYNTH),
4092 [C_DC_ESC0_PLUS2_CNT] =
4093         DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4094                          CNTR_SYNTH),
4095 [C_DC_REINIT_FROM_PEER_CNT] =
4096         DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4097                          CNTR_SYNTH),
4098 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4099                                   CNTR_SYNTH),
4100 [C_DC_MISC_FLG_CNT] =
4101         DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4102                          CNTR_SYNTH),
4103 [C_DC_PRF_GOOD_LTP_CNT] =
4104         DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4105 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4106         DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4107                          CNTR_SYNTH),
4108 [C_DC_PRF_RX_FLIT_CNT] =
4109         DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4110 [C_DC_PRF_TX_FLIT_CNT] =
4111         DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4112 [C_DC_PRF_CLK_CNTR] =
4113         DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4114 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4115         DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4116 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4117         DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4118                          CNTR_SYNTH),
4119 [C_DC_PG_STS_TX_SBE_CNT] =
4120         DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4121 [C_DC_PG_STS_TX_MBE_CNT] =
4122         DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4123                          CNTR_SYNTH),
4124 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4125                             access_sw_cpu_intr),
4126 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4127                             access_sw_cpu_rcv_limit),
4128 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4129                             access_sw_vtx_wait),
4130 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4131                             access_sw_pio_wait),
4132 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4133                             access_sw_pio_drain),
4134 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4135                             access_sw_kmem_wait),
4136 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4137                             access_sw_send_schedule),
4138 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4139                                       SEND_DMA_DESC_FETCHED_CNT, 0,
4140                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4141                                       dev_access_u32_csr),
4142 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4143                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4144                              access_sde_int_cnt),
4145 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4146                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4147                              access_sde_err_cnt),
4148 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4149                                   CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4150                                   access_sde_idle_int_cnt),
4151 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4152                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4153                                       access_sde_progress_int_cnt),
4154 /* MISC_ERR_STATUS */
4155 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4156                                 CNTR_NORMAL,
4157                                 access_misc_pll_lock_fail_err_cnt),
4158 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4159                                 CNTR_NORMAL,
4160                                 access_misc_mbist_fail_err_cnt),
4161 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4162                                 CNTR_NORMAL,
4163                                 access_misc_invalid_eep_cmd_err_cnt),
4164 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4165                                 CNTR_NORMAL,
4166                                 access_misc_efuse_done_parity_err_cnt),
4167 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4168                                 CNTR_NORMAL,
4169                                 access_misc_efuse_write_err_cnt),
4170 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4171                                 0, CNTR_NORMAL,
4172                                 access_misc_efuse_read_bad_addr_err_cnt),
4173 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4174                                 CNTR_NORMAL,
4175                                 access_misc_efuse_csr_parity_err_cnt),
4176 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4177                                 CNTR_NORMAL,
4178                                 access_misc_fw_auth_failed_err_cnt),
4179 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4180                                 CNTR_NORMAL,
4181                                 access_misc_key_mismatch_err_cnt),
4182 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4183                                 CNTR_NORMAL,
4184                                 access_misc_sbus_write_failed_err_cnt),
4185 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4186                                 CNTR_NORMAL,
4187                                 access_misc_csr_write_bad_addr_err_cnt),
4188 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4189                                 CNTR_NORMAL,
4190                                 access_misc_csr_read_bad_addr_err_cnt),
4191 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4192                                 CNTR_NORMAL,
4193                                 access_misc_csr_parity_err_cnt),
4194 /* CceErrStatus */
4195 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4196                                 CNTR_NORMAL,
4197                                 access_sw_cce_err_status_aggregated_cnt),
4198 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4199                                 CNTR_NORMAL,
4200                                 access_cce_msix_csr_parity_err_cnt),
4201 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4202                                 CNTR_NORMAL,
4203                                 access_cce_int_map_unc_err_cnt),
4204 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4205                                 CNTR_NORMAL,
4206                                 access_cce_int_map_cor_err_cnt),
4207 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4208                                 CNTR_NORMAL,
4209                                 access_cce_msix_table_unc_err_cnt),
4210 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4211                                 CNTR_NORMAL,
4212                                 access_cce_msix_table_cor_err_cnt),
4213 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4214                                 0, CNTR_NORMAL,
4215                                 access_cce_rxdma_conv_fifo_parity_err_cnt),
4216 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4217                                 0, CNTR_NORMAL,
4218                                 access_cce_rcpl_async_fifo_parity_err_cnt),
4219 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4220                                 CNTR_NORMAL,
4221                                 access_cce_seg_write_bad_addr_err_cnt),
4222 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4223                                 CNTR_NORMAL,
4224                                 access_cce_seg_read_bad_addr_err_cnt),
4225 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4226                                 CNTR_NORMAL,
4227                                 access_la_triggered_cnt),
4228 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4229                                 CNTR_NORMAL,
4230                                 access_cce_trgt_cpl_timeout_err_cnt),
4231 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4232                                 CNTR_NORMAL,
4233                                 access_pcic_receive_parity_err_cnt),
4234 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4235                                 CNTR_NORMAL,
4236                                 access_pcic_transmit_back_parity_err_cnt),
4237 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4238                                 0, CNTR_NORMAL,
4239                                 access_pcic_transmit_front_parity_err_cnt),
4240 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4241                                 CNTR_NORMAL,
4242                                 access_pcic_cpl_dat_q_unc_err_cnt),
4243 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4244                                 CNTR_NORMAL,
4245                                 access_pcic_cpl_hd_q_unc_err_cnt),
4246 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4247                                 CNTR_NORMAL,
4248                                 access_pcic_post_dat_q_unc_err_cnt),
4249 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4250                                 CNTR_NORMAL,
4251                                 access_pcic_post_hd_q_unc_err_cnt),
4252 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4253                                 CNTR_NORMAL,
4254                                 access_pcic_retry_sot_mem_unc_err_cnt),
4255 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4256                                 CNTR_NORMAL,
4257                                 access_pcic_retry_mem_unc_err),
4258 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4259                                 CNTR_NORMAL,
4260                                 access_pcic_n_post_dat_q_parity_err_cnt),
4261 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4262                                 CNTR_NORMAL,
4263                                 access_pcic_n_post_h_q_parity_err_cnt),
4264 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4265                                 CNTR_NORMAL,
4266                                 access_pcic_cpl_dat_q_cor_err_cnt),
4267 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4268                                 CNTR_NORMAL,
4269                                 access_pcic_cpl_hd_q_cor_err_cnt),
4270 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4271                                 CNTR_NORMAL,
4272                                 access_pcic_post_dat_q_cor_err_cnt),
4273 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4274                                 CNTR_NORMAL,
4275                                 access_pcic_post_hd_q_cor_err_cnt),
4276 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4277                                 CNTR_NORMAL,
4278                                 access_pcic_retry_sot_mem_cor_err_cnt),
4279 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4280                                 CNTR_NORMAL,
4281                                 access_pcic_retry_mem_cor_err_cnt),
4282 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4283                                 "CceCli1AsyncFifoDbgParityError", 0, 0,
4284                                 CNTR_NORMAL,
4285                                 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4286 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4287                                 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4288                                 CNTR_NORMAL,
4289                                 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4290                                 ),
4291 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4292                         "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4293                         CNTR_NORMAL,
4294                         access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4295 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4296                         "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4297                         CNTR_NORMAL,
4298                         access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4299 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4300                         0, CNTR_NORMAL,
4301                         access_cce_cli2_async_fifo_parity_err_cnt),
4302 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4303                         CNTR_NORMAL,
4304                         access_cce_csr_cfg_bus_parity_err_cnt),
4305 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4306                         0, CNTR_NORMAL,
4307                         access_cce_cli0_async_fifo_parity_err_cnt),
4308 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4309                         CNTR_NORMAL,
4310                         access_cce_rspd_data_parity_err_cnt),
4311 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4312                         CNTR_NORMAL,
4313                         access_cce_trgt_access_err_cnt),
4314 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4315                         0, CNTR_NORMAL,
4316                         access_cce_trgt_async_fifo_parity_err_cnt),
4317 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4318                         CNTR_NORMAL,
4319                         access_cce_csr_write_bad_addr_err_cnt),
4320 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4321                         CNTR_NORMAL,
4322                         access_cce_csr_read_bad_addr_err_cnt),
4323 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4324                         CNTR_NORMAL,
4325                         access_ccs_csr_parity_err_cnt),
4326
4327 /* RcvErrStatus */
4328 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4329                         CNTR_NORMAL,
4330                         access_rx_csr_parity_err_cnt),
4331 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4332                         CNTR_NORMAL,
4333                         access_rx_csr_write_bad_addr_err_cnt),
4334 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4335                         CNTR_NORMAL,
4336                         access_rx_csr_read_bad_addr_err_cnt),
4337 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4338                         CNTR_NORMAL,
4339                         access_rx_dma_csr_unc_err_cnt),
4340 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4341                         CNTR_NORMAL,
4342                         access_rx_dma_dq_fsm_encoding_err_cnt),
4343 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4344                         CNTR_NORMAL,
4345                         access_rx_dma_eq_fsm_encoding_err_cnt),
4346 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4347                         CNTR_NORMAL,
4348                         access_rx_dma_csr_parity_err_cnt),
4349 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4350                         CNTR_NORMAL,
4351                         access_rx_rbuf_data_cor_err_cnt),
4352 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4353                         CNTR_NORMAL,
4354                         access_rx_rbuf_data_unc_err_cnt),
4355 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4356                         CNTR_NORMAL,
4357                         access_rx_dma_data_fifo_rd_cor_err_cnt),
4358 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4359                         CNTR_NORMAL,
4360                         access_rx_dma_data_fifo_rd_unc_err_cnt),
4361 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4362                         CNTR_NORMAL,
4363                         access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4364 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4365                         CNTR_NORMAL,
4366                         access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4367 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4368                         CNTR_NORMAL,
4369                         access_rx_rbuf_desc_part2_cor_err_cnt),
4370 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4371                         CNTR_NORMAL,
4372                         access_rx_rbuf_desc_part2_unc_err_cnt),
4373 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4374                         CNTR_NORMAL,
4375                         access_rx_rbuf_desc_part1_cor_err_cnt),
4376 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4377                         CNTR_NORMAL,
4378                         access_rx_rbuf_desc_part1_unc_err_cnt),
4379 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4380                         CNTR_NORMAL,
4381                         access_rx_hq_intr_fsm_err_cnt),
4382 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4383                         CNTR_NORMAL,
4384                         access_rx_hq_intr_csr_parity_err_cnt),
4385 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4386                         CNTR_NORMAL,
4387                         access_rx_lookup_csr_parity_err_cnt),
4388 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4389                         CNTR_NORMAL,
4390                         access_rx_lookup_rcv_array_cor_err_cnt),
4391 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4392                         CNTR_NORMAL,
4393                         access_rx_lookup_rcv_array_unc_err_cnt),
4394 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4395                         0, CNTR_NORMAL,
4396                         access_rx_lookup_des_part2_parity_err_cnt),
4397 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4398                         0, CNTR_NORMAL,
4399                         access_rx_lookup_des_part1_unc_cor_err_cnt),
4400 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4401                         CNTR_NORMAL,
4402                         access_rx_lookup_des_part1_unc_err_cnt),
4403 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4404                         CNTR_NORMAL,
4405                         access_rx_rbuf_next_free_buf_cor_err_cnt),
4406 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4407                         CNTR_NORMAL,
4408                         access_rx_rbuf_next_free_buf_unc_err_cnt),
4409 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4410                         "RxRbufFlInitWrAddrParityErr", 0, 0,
4411                         CNTR_NORMAL,
4412                         access_rbuf_fl_init_wr_addr_parity_err_cnt),
4413 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4414                         0, CNTR_NORMAL,
4415                         access_rx_rbuf_fl_initdone_parity_err_cnt),
4416 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4417                         0, CNTR_NORMAL,
4418                         access_rx_rbuf_fl_write_addr_parity_err_cnt),
4419 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4420                         CNTR_NORMAL,
4421                         access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4422 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4423                         CNTR_NORMAL,
4424                         access_rx_rbuf_empty_err_cnt),
4425 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4426                         CNTR_NORMAL,
4427                         access_rx_rbuf_full_err_cnt),
4428 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4429                         CNTR_NORMAL,
4430                         access_rbuf_bad_lookup_err_cnt),
4431 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4432                         CNTR_NORMAL,
4433                         access_rbuf_ctx_id_parity_err_cnt),
4434 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4435                         CNTR_NORMAL,
4436                         access_rbuf_csr_qeopdw_parity_err_cnt),
4437 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4438                         "RxRbufCsrQNumOfPktParityErr", 0, 0,
4439                         CNTR_NORMAL,
4440                         access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4441 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4442                         "RxRbufCsrQTlPtrParityErr", 0, 0,
4443                         CNTR_NORMAL,
4444                         access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4445 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4446                         0, CNTR_NORMAL,
4447                         access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4448 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4449                         0, CNTR_NORMAL,
4450                         access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4451 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4452                         0, 0, CNTR_NORMAL,
4453                         access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4454 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4455                         0, CNTR_NORMAL,
4456                         access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4457 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4458                         "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4459                         CNTR_NORMAL,
4460                         access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4461 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4462                         0, CNTR_NORMAL,
4463                         access_rx_rbuf_block_list_read_cor_err_cnt),
4464 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4465                         0, CNTR_NORMAL,
4466                         access_rx_rbuf_block_list_read_unc_err_cnt),
4467 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4468                         CNTR_NORMAL,
4469                         access_rx_rbuf_lookup_des_cor_err_cnt),
4470 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4471                         CNTR_NORMAL,
4472                         access_rx_rbuf_lookup_des_unc_err_cnt),
4473 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4474                         "RxRbufLookupDesRegUncCorErr", 0, 0,
4475                         CNTR_NORMAL,
4476                         access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4477 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4478                         CNTR_NORMAL,
4479                         access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4480 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4481                         CNTR_NORMAL,
4482                         access_rx_rbuf_free_list_cor_err_cnt),
4483 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4484                         CNTR_NORMAL,
4485                         access_rx_rbuf_free_list_unc_err_cnt),
4486 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4487                         CNTR_NORMAL,
4488                         access_rx_rcv_fsm_encoding_err_cnt),
4489 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4490                         CNTR_NORMAL,
4491                         access_rx_dma_flag_cor_err_cnt),
4492 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4493                         CNTR_NORMAL,
4494                         access_rx_dma_flag_unc_err_cnt),
4495 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4496                         CNTR_NORMAL,
4497                         access_rx_dc_sop_eop_parity_err_cnt),
4498 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4499                         CNTR_NORMAL,
4500                         access_rx_rcv_csr_parity_err_cnt),
4501 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4502                         CNTR_NORMAL,
4503                         access_rx_rcv_qp_map_table_cor_err_cnt),
4504 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4505                         CNTR_NORMAL,
4506                         access_rx_rcv_qp_map_table_unc_err_cnt),
4507 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4508                         CNTR_NORMAL,
4509                         access_rx_rcv_data_cor_err_cnt),
4510 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4511                         CNTR_NORMAL,
4512                         access_rx_rcv_data_unc_err_cnt),
4513 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4514                         CNTR_NORMAL,
4515                         access_rx_rcv_hdr_cor_err_cnt),
4516 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4517                         CNTR_NORMAL,
4518                         access_rx_rcv_hdr_unc_err_cnt),
4519 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4520                         CNTR_NORMAL,
4521                         access_rx_dc_intf_parity_err_cnt),
4522 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4523                         CNTR_NORMAL,
4524                         access_rx_dma_csr_cor_err_cnt),
4525 /* SendPioErrStatus */
4526 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4527                         CNTR_NORMAL,
4528                         access_pio_pec_sop_head_parity_err_cnt),
4529 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4530                         CNTR_NORMAL,
4531                         access_pio_pcc_sop_head_parity_err_cnt),
4532 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4533                         0, 0, CNTR_NORMAL,
4534                         access_pio_last_returned_cnt_parity_err_cnt),
4535 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4536                         0, CNTR_NORMAL,
4537                         access_pio_current_free_cnt_parity_err_cnt),
4538 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4539                         CNTR_NORMAL,
4540                         access_pio_reserved_31_err_cnt),
4541 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4542                         CNTR_NORMAL,
4543                         access_pio_reserved_30_err_cnt),
4544 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4545                         CNTR_NORMAL,
4546                         access_pio_ppmc_sop_len_err_cnt),
4547 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4548                         CNTR_NORMAL,
4549                         access_pio_ppmc_bqc_mem_parity_err_cnt),
4550 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4551                         CNTR_NORMAL,
4552                         access_pio_vl_fifo_parity_err_cnt),
4553 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4554                         CNTR_NORMAL,
4555                         access_pio_vlf_sop_parity_err_cnt),
4556 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4557                         CNTR_NORMAL,
4558                         access_pio_vlf_v1_len_parity_err_cnt),
4559 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4560                         CNTR_NORMAL,
4561                         access_pio_block_qw_count_parity_err_cnt),
4562 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4563                         CNTR_NORMAL,
4564                         access_pio_write_qw_valid_parity_err_cnt),
4565 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4566                         CNTR_NORMAL,
4567                         access_pio_state_machine_err_cnt),
4568 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4569                         CNTR_NORMAL,
4570                         access_pio_write_data_parity_err_cnt),
4571 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4572                         CNTR_NORMAL,
4573                         access_pio_host_addr_mem_cor_err_cnt),
4574 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4575                         CNTR_NORMAL,
4576                         access_pio_host_addr_mem_unc_err_cnt),
4577 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4578                         CNTR_NORMAL,
4579                         access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4580 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4581                         CNTR_NORMAL,
4582                         access_pio_init_sm_in_err_cnt),
4583 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4584                         CNTR_NORMAL,
4585                         access_pio_ppmc_pbl_fifo_err_cnt),
4586 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4587                         0, CNTR_NORMAL,
4588                         access_pio_credit_ret_fifo_parity_err_cnt),
4589 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4590                         CNTR_NORMAL,
4591                         access_pio_v1_len_mem_bank1_cor_err_cnt),
4592 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4593                         CNTR_NORMAL,
4594                         access_pio_v1_len_mem_bank0_cor_err_cnt),
4595 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4596                         CNTR_NORMAL,
4597                         access_pio_v1_len_mem_bank1_unc_err_cnt),
4598 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4599                         CNTR_NORMAL,
4600                         access_pio_v1_len_mem_bank0_unc_err_cnt),
4601 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4602                         CNTR_NORMAL,
4603                         access_pio_sm_pkt_reset_parity_err_cnt),
4604 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4605                         CNTR_NORMAL,
4606                         access_pio_pkt_evict_fifo_parity_err_cnt),
4607 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4608                         "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4609                         CNTR_NORMAL,
4610                         access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4611 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4612                         CNTR_NORMAL,
4613                         access_pio_sbrdctl_crrel_parity_err_cnt),
4614 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4615                         CNTR_NORMAL,
4616                         access_pio_pec_fifo_parity_err_cnt),
4617 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4618                         CNTR_NORMAL,
4619                         access_pio_pcc_fifo_parity_err_cnt),
4620 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4621                         CNTR_NORMAL,
4622                         access_pio_sb_mem_fifo1_err_cnt),
4623 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4624                         CNTR_NORMAL,
4625                         access_pio_sb_mem_fifo0_err_cnt),
4626 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4627                         CNTR_NORMAL,
4628                         access_pio_csr_parity_err_cnt),
4629 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4630                         CNTR_NORMAL,
4631                         access_pio_write_addr_parity_err_cnt),
4632 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4633                         CNTR_NORMAL,
4634                         access_pio_write_bad_ctxt_err_cnt),
4635 /* SendDmaErrStatus */
4636 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4637                         0, CNTR_NORMAL,
4638                         access_sdma_pcie_req_tracking_cor_err_cnt),
4639 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4640                         0, CNTR_NORMAL,
4641                         access_sdma_pcie_req_tracking_unc_err_cnt),
4642 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4643                         CNTR_NORMAL,
4644                         access_sdma_csr_parity_err_cnt),
4645 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4646                         CNTR_NORMAL,
4647                         access_sdma_rpy_tag_err_cnt),
4648 /* SendEgressErrStatus */
4649 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4650                         CNTR_NORMAL,
4651                         access_tx_read_pio_memory_csr_unc_err_cnt),
4652 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4653                         0, CNTR_NORMAL,
4654                         access_tx_read_sdma_memory_csr_err_cnt),
4655 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4656                         CNTR_NORMAL,
4657                         access_tx_egress_fifo_cor_err_cnt),
4658 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4659                         CNTR_NORMAL,
4660                         access_tx_read_pio_memory_cor_err_cnt),
4661 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4662                         CNTR_NORMAL,
4663                         access_tx_read_sdma_memory_cor_err_cnt),
4664 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4665                         CNTR_NORMAL,
4666                         access_tx_sb_hdr_cor_err_cnt),
4667 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4668                         CNTR_NORMAL,
4669                         access_tx_credit_overrun_err_cnt),
4670 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4671                         CNTR_NORMAL,
4672                         access_tx_launch_fifo8_cor_err_cnt),
4673 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4674                         CNTR_NORMAL,
4675                         access_tx_launch_fifo7_cor_err_cnt),
4676 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4677                         CNTR_NORMAL,
4678                         access_tx_launch_fifo6_cor_err_cnt),
4679 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4680                         CNTR_NORMAL,
4681                         access_tx_launch_fifo5_cor_err_cnt),
4682 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4683                         CNTR_NORMAL,
4684                         access_tx_launch_fifo4_cor_err_cnt),
4685 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4686                         CNTR_NORMAL,
4687                         access_tx_launch_fifo3_cor_err_cnt),
4688 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4689                         CNTR_NORMAL,
4690                         access_tx_launch_fifo2_cor_err_cnt),
4691 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4692                         CNTR_NORMAL,
4693                         access_tx_launch_fifo1_cor_err_cnt),
4694 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4695                         CNTR_NORMAL,
4696                         access_tx_launch_fifo0_cor_err_cnt),
4697 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4698                         CNTR_NORMAL,
4699                         access_tx_credit_return_vl_err_cnt),
4700 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4701                         CNTR_NORMAL,
4702                         access_tx_hcrc_insertion_err_cnt),
4703 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4704                         CNTR_NORMAL,
4705                         access_tx_egress_fifo_unc_err_cnt),
4706 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4707                         CNTR_NORMAL,
4708                         access_tx_read_pio_memory_unc_err_cnt),
4709 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4710                         CNTR_NORMAL,
4711                         access_tx_read_sdma_memory_unc_err_cnt),
4712 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4713                         CNTR_NORMAL,
4714                         access_tx_sb_hdr_unc_err_cnt),
4715 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4716                         CNTR_NORMAL,
4717                         access_tx_credit_return_partiy_err_cnt),
4718 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4719                         0, 0, CNTR_NORMAL,
4720                         access_tx_launch_fifo8_unc_or_parity_err_cnt),
4721 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4722                         0, 0, CNTR_NORMAL,
4723                         access_tx_launch_fifo7_unc_or_parity_err_cnt),
4724 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4725                         0, 0, CNTR_NORMAL,
4726                         access_tx_launch_fifo6_unc_or_parity_err_cnt),
4727 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4728                         0, 0, CNTR_NORMAL,
4729                         access_tx_launch_fifo5_unc_or_parity_err_cnt),
4730 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4731                         0, 0, CNTR_NORMAL,
4732                         access_tx_launch_fifo4_unc_or_parity_err_cnt),
4733 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4734                         0, 0, CNTR_NORMAL,
4735                         access_tx_launch_fifo3_unc_or_parity_err_cnt),
4736 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4737                         0, 0, CNTR_NORMAL,
4738                         access_tx_launch_fifo2_unc_or_parity_err_cnt),
4739 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4740                         0, 0, CNTR_NORMAL,
4741                         access_tx_launch_fifo1_unc_or_parity_err_cnt),
4742 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4743                         0, 0, CNTR_NORMAL,
4744                         access_tx_launch_fifo0_unc_or_parity_err_cnt),
4745 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4746                         0, 0, CNTR_NORMAL,
4747                         access_tx_sdma15_disallowed_packet_err_cnt),
4748 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4749                         0, 0, CNTR_NORMAL,
4750                         access_tx_sdma14_disallowed_packet_err_cnt),
4751 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4752                         0, 0, CNTR_NORMAL,
4753                         access_tx_sdma13_disallowed_packet_err_cnt),
4754 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4755                         0, 0, CNTR_NORMAL,
4756                         access_tx_sdma12_disallowed_packet_err_cnt),
4757 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4758                         0, 0, CNTR_NORMAL,
4759                         access_tx_sdma11_disallowed_packet_err_cnt),
4760 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4761                         0, 0, CNTR_NORMAL,
4762                         access_tx_sdma10_disallowed_packet_err_cnt),
4763 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4764                         0, 0, CNTR_NORMAL,
4765                         access_tx_sdma9_disallowed_packet_err_cnt),
4766 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4767                         0, 0, CNTR_NORMAL,
4768                         access_tx_sdma8_disallowed_packet_err_cnt),
4769 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4770                         0, 0, CNTR_NORMAL,
4771                         access_tx_sdma7_disallowed_packet_err_cnt),
4772 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4773                         0, 0, CNTR_NORMAL,
4774                         access_tx_sdma6_disallowed_packet_err_cnt),
4775 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4776                         0, 0, CNTR_NORMAL,
4777                         access_tx_sdma5_disallowed_packet_err_cnt),
4778 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4779                         0, 0, CNTR_NORMAL,
4780                         access_tx_sdma4_disallowed_packet_err_cnt),
4781 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4782                         0, 0, CNTR_NORMAL,
4783                         access_tx_sdma3_disallowed_packet_err_cnt),
4784 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4785                         0, 0, CNTR_NORMAL,
4786                         access_tx_sdma2_disallowed_packet_err_cnt),
4787 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4788                         0, 0, CNTR_NORMAL,
4789                         access_tx_sdma1_disallowed_packet_err_cnt),
4790 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4791                         0, 0, CNTR_NORMAL,
4792                         access_tx_sdma0_disallowed_packet_err_cnt),
4793 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4794                         CNTR_NORMAL,
4795                         access_tx_config_parity_err_cnt),
4796 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4797                         CNTR_NORMAL,
4798                         access_tx_sbrd_ctl_csr_parity_err_cnt),
4799 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4800                         CNTR_NORMAL,
4801                         access_tx_launch_csr_parity_err_cnt),
4802 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4803                         CNTR_NORMAL,
4804                         access_tx_illegal_vl_err_cnt),
4805 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4806                         "TxSbrdCtlStateMachineParityErr", 0, 0,
4807                         CNTR_NORMAL,
4808                         access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4809 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4810                         CNTR_NORMAL,
4811                         access_egress_reserved_10_err_cnt),
4812 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4813                         CNTR_NORMAL,
4814                         access_egress_reserved_9_err_cnt),
4815 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4816                         0, 0, CNTR_NORMAL,
4817                         access_tx_sdma_launch_intf_parity_err_cnt),
4818 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4819                         CNTR_NORMAL,
4820                         access_tx_pio_launch_intf_parity_err_cnt),
4821 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4822                         CNTR_NORMAL,
4823                         access_egress_reserved_6_err_cnt),
4824 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4825                         CNTR_NORMAL,
4826                         access_tx_incorrect_link_state_err_cnt),
4827 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4828                         CNTR_NORMAL,
4829                         access_tx_linkdown_err_cnt),
4830 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4831                         "EgressFifoUnderrunOrParityErr", 0, 0,
4832                         CNTR_NORMAL,
4833                         access_tx_egress_fifi_underrun_or_parity_err_cnt),
4834 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4835                         CNTR_NORMAL,
4836                         access_egress_reserved_2_err_cnt),
4837 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4838                         CNTR_NORMAL,
4839                         access_tx_pkt_integrity_mem_unc_err_cnt),
4840 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4841                         CNTR_NORMAL,
4842                         access_tx_pkt_integrity_mem_cor_err_cnt),
4843 /* SendErrStatus */
4844 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4845                         CNTR_NORMAL,
4846                         access_send_csr_write_bad_addr_err_cnt),
4847 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4848                         CNTR_NORMAL,
4849                         access_send_csr_read_bad_addr_err_cnt),
4850 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4851                         CNTR_NORMAL,
4852                         access_send_csr_parity_cnt),
4853 /* SendCtxtErrStatus */
4854 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4855                         CNTR_NORMAL,
4856                         access_pio_write_out_of_bounds_err_cnt),
4857 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4858                         CNTR_NORMAL,
4859                         access_pio_write_overflow_err_cnt),
4860 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4861                         0, 0, CNTR_NORMAL,
4862                         access_pio_write_crosses_boundary_err_cnt),
4863 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4864                         CNTR_NORMAL,
4865                         access_pio_disallowed_packet_err_cnt),
4866 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4867                         CNTR_NORMAL,
4868                         access_pio_inconsistent_sop_err_cnt),
4869 /* SendDmaEngErrStatus */
4870 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4871                         0, 0, CNTR_NORMAL,
4872                         access_sdma_header_request_fifo_cor_err_cnt),
4873 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4874                         CNTR_NORMAL,
4875                         access_sdma_header_storage_cor_err_cnt),
4876 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4877                         CNTR_NORMAL,
4878                         access_sdma_packet_tracking_cor_err_cnt),
4879 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4880                         CNTR_NORMAL,
4881                         access_sdma_assembly_cor_err_cnt),
4882 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4883                         CNTR_NORMAL,
4884                         access_sdma_desc_table_cor_err_cnt),
4885 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4886                         0, 0, CNTR_NORMAL,
4887                         access_sdma_header_request_fifo_unc_err_cnt),
4888 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4889                         CNTR_NORMAL,
4890                         access_sdma_header_storage_unc_err_cnt),
4891 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4892                         CNTR_NORMAL,
4893                         access_sdma_packet_tracking_unc_err_cnt),
4894 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4895                         CNTR_NORMAL,
4896                         access_sdma_assembly_unc_err_cnt),
4897 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4898                         CNTR_NORMAL,
4899                         access_sdma_desc_table_unc_err_cnt),
4900 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4901                         CNTR_NORMAL,
4902                         access_sdma_timeout_err_cnt),
4903 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4904                         CNTR_NORMAL,
4905                         access_sdma_header_length_err_cnt),
4906 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4907                         CNTR_NORMAL,
4908                         access_sdma_header_address_err_cnt),
4909 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4910                         CNTR_NORMAL,
4911                         access_sdma_header_select_err_cnt),
4912 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4913                         CNTR_NORMAL,
4914                         access_sdma_reserved_9_err_cnt),
4915 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4916                         CNTR_NORMAL,
4917                         access_sdma_packet_desc_overflow_err_cnt),
4918 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4919                         CNTR_NORMAL,
4920                         access_sdma_length_mismatch_err_cnt),
4921 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4922                         CNTR_NORMAL,
4923                         access_sdma_halt_err_cnt),
4924 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4925                         CNTR_NORMAL,
4926                         access_sdma_mem_read_err_cnt),
4927 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4928                         CNTR_NORMAL,
4929                         access_sdma_first_desc_err_cnt),
4930 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4931                         CNTR_NORMAL,
4932                         access_sdma_tail_out_of_bounds_err_cnt),
4933 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4934                         CNTR_NORMAL,
4935                         access_sdma_too_long_err_cnt),
4936 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4937                         CNTR_NORMAL,
4938                         access_sdma_gen_mismatch_err_cnt),
4939 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4940                         CNTR_NORMAL,
4941                         access_sdma_wrong_dw_err_cnt),
4942 };
4943
4944 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4945 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4946                         CNTR_NORMAL),
4947 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4948                         CNTR_NORMAL),
4949 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4950                         CNTR_NORMAL),
4951 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4952                         CNTR_NORMAL),
4953 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4954                         CNTR_NORMAL),
4955 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4956                         CNTR_NORMAL),
4957 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4958                         CNTR_NORMAL),
4959 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4960 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4961 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4962 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4963                                       CNTR_SYNTH | CNTR_VL),
4964 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4965                                      CNTR_SYNTH | CNTR_VL),
4966 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4967                                       CNTR_SYNTH | CNTR_VL),
4968 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4969 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4970 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4971                              access_sw_link_dn_cnt),
4972 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4973                            access_sw_link_up_cnt),
4974 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4975                                  access_sw_unknown_frame_cnt),
4976 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4977                              access_sw_xmit_discards),
4978 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4979                                 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4980                                 access_sw_xmit_discards),
4981 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4982                                  access_xmit_constraint_errs),
4983 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4984                                 access_rcv_constraint_errs),
4985 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4986 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4987 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4988 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4989 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4990 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4991 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4992 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4993 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4994 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4995 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
4996 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
4997 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
4998                                access_sw_cpu_rc_acks),
4999 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5000                                 access_sw_cpu_rc_qacks),
5001 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5002                                        access_sw_cpu_rc_delayed_comp),
5003 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5004 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5005 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5006 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5007 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5008 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5009 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5010 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5011 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5012 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5013 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5014 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5015 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5016 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5017 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5018 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5019 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5020 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5021 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5022 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5023 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5024 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5025 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5026 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5027 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5028 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5029 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5030 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5031 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5032 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5033 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5034 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5035 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5036 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5037 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5038 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5039 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5040 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5041 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5042 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5043 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5044 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5045 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5046 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5047 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5048 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5049 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5050 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5051 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5052 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5053 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5054 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5055 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5056 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5057 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5058 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5059 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5060 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5061 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5062 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5063 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5064 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5065 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5066 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5067 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5068 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5069 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5070 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5071 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5072 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5073 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5074 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5075 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5076 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5077 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5078 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5079 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5080 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5081 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5082 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5083 };
5084
5085 /* ======================================================================== */
5086
5087 /* return true if this is chip revision revision a */
5088 int is_ax(struct hfi1_devdata *dd)
5089 {
5090         u8 chip_rev_minor =
5091                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5092                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5093         return (chip_rev_minor & 0xf0) == 0;
5094 }
5095
5096 /* return true if this is chip revision revision b */
5097 int is_bx(struct hfi1_devdata *dd)
5098 {
5099         u8 chip_rev_minor =
5100                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5101                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5102         return (chip_rev_minor & 0xF0) == 0x10;
5103 }
5104
5105 /*
5106  * Append string s to buffer buf.  Arguments curp and len are the current
5107  * position and remaining length, respectively.
5108  *
5109  * return 0 on success, 1 on out of room
5110  */
5111 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5112 {
5113         char *p = *curp;
5114         int len = *lenp;
5115         int result = 0; /* success */
5116         char c;
5117
5118         /* add a comma, if first in the buffer */
5119         if (p != buf) {
5120                 if (len == 0) {
5121                         result = 1; /* out of room */
5122                         goto done;
5123                 }
5124                 *p++ = ',';
5125                 len--;
5126         }
5127
5128         /* copy the string */
5129         while ((c = *s++) != 0) {
5130                 if (len == 0) {
5131                         result = 1; /* out of room */
5132                         goto done;
5133                 }
5134                 *p++ = c;
5135                 len--;
5136         }
5137
5138 done:
5139         /* write return values */
5140         *curp = p;
5141         *lenp = len;
5142
5143         return result;
5144 }
5145
5146 /*
5147  * Using the given flag table, print a comma separated string into
5148  * the buffer.  End in '*' if the buffer is too short.
5149  */
5150 static char *flag_string(char *buf, int buf_len, u64 flags,
5151                          struct flag_table *table, int table_size)
5152 {
5153         char extra[32];
5154         char *p = buf;
5155         int len = buf_len;
5156         int no_room = 0;
5157         int i;
5158
5159         /* make sure there is at least 2 so we can form "*" */
5160         if (len < 2)
5161                 return "";
5162
5163         len--;  /* leave room for a nul */
5164         for (i = 0; i < table_size; i++) {
5165                 if (flags & table[i].flag) {
5166                         no_room = append_str(buf, &p, &len, table[i].str);
5167                         if (no_room)
5168                                 break;
5169                         flags &= ~table[i].flag;
5170                 }
5171         }
5172
5173         /* any undocumented bits left? */
5174         if (!no_room && flags) {
5175                 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5176                 no_room = append_str(buf, &p, &len, extra);
5177         }
5178
5179         /* add * if ran out of room */
5180         if (no_room) {
5181                 /* may need to back up to add space for a '*' */
5182                 if (len == 0)
5183                         --p;
5184                 *p++ = '*';
5185         }
5186
5187         /* add final nul - space already allocated above */
5188         *p = 0;
5189         return buf;
5190 }
5191
5192 /* first 8 CCE error interrupt source names */
5193 static const char * const cce_misc_names[] = {
5194         "CceErrInt",            /* 0 */
5195         "RxeErrInt",            /* 1 */
5196         "MiscErrInt",           /* 2 */
5197         "Reserved3",            /* 3 */
5198         "PioErrInt",            /* 4 */
5199         "SDmaErrInt",           /* 5 */
5200         "EgressErrInt",         /* 6 */
5201         "TxeErrInt"             /* 7 */
5202 };
5203
5204 /*
5205  * Return the miscellaneous error interrupt name.
5206  */
5207 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5208 {
5209         if (source < ARRAY_SIZE(cce_misc_names))
5210                 strncpy(buf, cce_misc_names[source], bsize);
5211         else
5212                 snprintf(buf, bsize, "Reserved%u",
5213                          source + IS_GENERAL_ERR_START);
5214
5215         return buf;
5216 }
5217
5218 /*
5219  * Return the SDMA engine error interrupt name.
5220  */
5221 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5222 {
5223         snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5224         return buf;
5225 }
5226
5227 /*
5228  * Return the send context error interrupt name.
5229  */
5230 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5231 {
5232         snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5233         return buf;
5234 }
5235
5236 static const char * const various_names[] = {
5237         "PbcInt",
5238         "GpioAssertInt",
5239         "Qsfp1Int",
5240         "Qsfp2Int",
5241         "TCritInt"
5242 };
5243
5244 /*
5245  * Return the various interrupt name.
5246  */
5247 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5248 {
5249         if (source < ARRAY_SIZE(various_names))
5250                 strncpy(buf, various_names[source], bsize);
5251         else
5252                 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5253         return buf;
5254 }
5255
5256 /*
5257  * Return the DC interrupt name.
5258  */
5259 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5260 {
5261         static const char * const dc_int_names[] = {
5262                 "common",
5263                 "lcb",
5264                 "8051",
5265                 "lbm"   /* local block merge */
5266         };
5267
5268         if (source < ARRAY_SIZE(dc_int_names))
5269                 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5270         else
5271                 snprintf(buf, bsize, "DCInt%u", source);
5272         return buf;
5273 }
5274
5275 static const char * const sdma_int_names[] = {
5276         "SDmaInt",
5277         "SdmaIdleInt",
5278         "SdmaProgressInt",
5279 };
5280
5281 /*
5282  * Return the SDMA engine interrupt name.
5283  */
5284 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5285 {
5286         /* what interrupt */
5287         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5288         /* which engine */
5289         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5290
5291         if (likely(what < 3))
5292                 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5293         else
5294                 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5295         return buf;
5296 }
5297
5298 /*
5299  * Return the receive available interrupt name.
5300  */
5301 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5302 {
5303         snprintf(buf, bsize, "RcvAvailInt%u", source);
5304         return buf;
5305 }
5306
5307 /*
5308  * Return the receive urgent interrupt name.
5309  */
5310 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5311 {
5312         snprintf(buf, bsize, "RcvUrgentInt%u", source);
5313         return buf;
5314 }
5315
5316 /*
5317  * Return the send credit interrupt name.
5318  */
5319 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5320 {
5321         snprintf(buf, bsize, "SendCreditInt%u", source);
5322         return buf;
5323 }
5324
5325 /*
5326  * Return the reserved interrupt name.
5327  */
5328 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5329 {
5330         snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5331         return buf;
5332 }
5333
5334 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5335 {
5336         return flag_string(buf, buf_len, flags,
5337                            cce_err_status_flags,
5338                            ARRAY_SIZE(cce_err_status_flags));
5339 }
5340
5341 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5342 {
5343         return flag_string(buf, buf_len, flags,
5344                            rxe_err_status_flags,
5345                            ARRAY_SIZE(rxe_err_status_flags));
5346 }
5347
5348 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5349 {
5350         return flag_string(buf, buf_len, flags, misc_err_status_flags,
5351                            ARRAY_SIZE(misc_err_status_flags));
5352 }
5353
5354 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5355 {
5356         return flag_string(buf, buf_len, flags,
5357                            pio_err_status_flags,
5358                            ARRAY_SIZE(pio_err_status_flags));
5359 }
5360
5361 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5362 {
5363         return flag_string(buf, buf_len, flags,
5364                            sdma_err_status_flags,
5365                            ARRAY_SIZE(sdma_err_status_flags));
5366 }
5367
5368 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5369 {
5370         return flag_string(buf, buf_len, flags,
5371                            egress_err_status_flags,
5372                            ARRAY_SIZE(egress_err_status_flags));
5373 }
5374
5375 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5376 {
5377         return flag_string(buf, buf_len, flags,
5378                            egress_err_info_flags,
5379                            ARRAY_SIZE(egress_err_info_flags));
5380 }
5381
5382 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5383 {
5384         return flag_string(buf, buf_len, flags,
5385                            send_err_status_flags,
5386                            ARRAY_SIZE(send_err_status_flags));
5387 }
5388
5389 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5390 {
5391         char buf[96];
5392         int i = 0;
5393
5394         /*
5395          * For most these errors, there is nothing that can be done except
5396          * report or record it.
5397          */
5398         dd_dev_info(dd, "CCE Error: %s\n",
5399                     cce_err_status_string(buf, sizeof(buf), reg));
5400
5401         if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5402             is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5403                 /* this error requires a manual drop into SPC freeze mode */
5404                 /* then a fix up */
5405                 start_freeze_handling(dd->pport, FREEZE_SELF);
5406         }
5407
5408         for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5409                 if (reg & (1ull << i)) {
5410                         incr_cntr64(&dd->cce_err_status_cnt[i]);
5411                         /* maintain a counter over all cce_err_status errors */
5412                         incr_cntr64(&dd->sw_cce_err_status_aggregate);
5413                 }
5414         }
5415 }
5416
5417 /*
5418  * Check counters for receive errors that do not have an interrupt
5419  * associated with them.
5420  */
5421 #define RCVERR_CHECK_TIME 10
5422 static void update_rcverr_timer(unsigned long opaque)
5423 {
5424         struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5425         struct hfi1_pportdata *ppd = dd->pport;
5426         u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5427
5428         if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5429             ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5430                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5431                 set_link_down_reason(
5432                 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5433                 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5434                 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5435         }
5436         dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5437
5438         mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5439 }
5440
5441 static int init_rcverr(struct hfi1_devdata *dd)
5442 {
5443         setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5444         /* Assume the hardware counter has been reset */
5445         dd->rcv_ovfl_cnt = 0;
5446         return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5447 }
5448
5449 static void free_rcverr(struct hfi1_devdata *dd)
5450 {
5451         if (dd->rcverr_timer.data)
5452                 del_timer_sync(&dd->rcverr_timer);
5453         dd->rcverr_timer.data = 0;
5454 }
5455
5456 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5457 {
5458         char buf[96];
5459         int i = 0;
5460
5461         dd_dev_info(dd, "Receive Error: %s\n",
5462                     rxe_err_status_string(buf, sizeof(buf), reg));
5463
5464         if (reg & ALL_RXE_FREEZE_ERR) {
5465                 int flags = 0;
5466
5467                 /*
5468                  * Freeze mode recovery is disabled for the errors
5469                  * in RXE_FREEZE_ABORT_MASK
5470                  */
5471                 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5472                         flags = FREEZE_ABORT;
5473
5474                 start_freeze_handling(dd->pport, flags);
5475         }
5476
5477         for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5478                 if (reg & (1ull << i))
5479                         incr_cntr64(&dd->rcv_err_status_cnt[i]);
5480         }
5481 }
5482
5483 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5484 {
5485         char buf[96];
5486         int i = 0;
5487
5488         dd_dev_info(dd, "Misc Error: %s",
5489                     misc_err_status_string(buf, sizeof(buf), reg));
5490         for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5491                 if (reg & (1ull << i))
5492                         incr_cntr64(&dd->misc_err_status_cnt[i]);
5493         }
5494 }
5495
5496 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5497 {
5498         char buf[96];
5499         int i = 0;
5500
5501         dd_dev_info(dd, "PIO Error: %s\n",
5502                     pio_err_status_string(buf, sizeof(buf), reg));
5503
5504         if (reg & ALL_PIO_FREEZE_ERR)
5505                 start_freeze_handling(dd->pport, 0);
5506
5507         for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5508                 if (reg & (1ull << i))
5509                         incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5510         }
5511 }
5512
5513 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5514 {
5515         char buf[96];
5516         int i = 0;
5517
5518         dd_dev_info(dd, "SDMA Error: %s\n",
5519                     sdma_err_status_string(buf, sizeof(buf), reg));
5520
5521         if (reg & ALL_SDMA_FREEZE_ERR)
5522                 start_freeze_handling(dd->pport, 0);
5523
5524         for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5525                 if (reg & (1ull << i))
5526                         incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5527         }
5528 }
5529
5530 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5531 {
5532         incr_cntr64(&ppd->port_xmit_discards);
5533 }
5534
5535 static void count_port_inactive(struct hfi1_devdata *dd)
5536 {
5537         __count_port_discards(dd->pport);
5538 }
5539
5540 /*
5541  * We have had a "disallowed packet" error during egress. Determine the
5542  * integrity check which failed, and update relevant error counter, etc.
5543  *
5544  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5545  * bit of state per integrity check, and so we can miss the reason for an
5546  * egress error if more than one packet fails the same integrity check
5547  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5548  */
5549 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5550                                         int vl)
5551 {
5552         struct hfi1_pportdata *ppd = dd->pport;
5553         u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5554         u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5555         char buf[96];
5556
5557         /* clear down all observed info as quickly as possible after read */
5558         write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5559
5560         dd_dev_info(dd,
5561                     "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5562                     info, egress_err_info_string(buf, sizeof(buf), info), src);
5563
5564         /* Eventually add other counters for each bit */
5565         if (info & PORT_DISCARD_EGRESS_ERRS) {
5566                 int weight, i;
5567
5568                 /*
5569                  * Count all applicable bits as individual errors and
5570                  * attribute them to the packet that triggered this handler.
5571                  * This may not be completely accurate due to limitations
5572                  * on the available hardware error information.  There is
5573                  * a single information register and any number of error
5574                  * packets may have occurred and contributed to it before
5575                  * this routine is called.  This means that:
5576                  * a) If multiple packets with the same error occur before
5577                  *    this routine is called, earlier packets are missed.
5578                  *    There is only a single bit for each error type.
5579                  * b) Errors may not be attributed to the correct VL.
5580                  *    The driver is attributing all bits in the info register
5581                  *    to the packet that triggered this call, but bits
5582                  *    could be an accumulation of different packets with
5583                  *    different VLs.
5584                  * c) A single error packet may have multiple counts attached
5585                  *    to it.  There is no way for the driver to know if
5586                  *    multiple bits set in the info register are due to a
5587                  *    single packet or multiple packets.  The driver assumes
5588                  *    multiple packets.
5589                  */
5590                 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5591                 for (i = 0; i < weight; i++) {
5592                         __count_port_discards(ppd);
5593                         if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5594                                 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5595                         else if (vl == 15)
5596                                 incr_cntr64(&ppd->port_xmit_discards_vl
5597                                             [C_VL_15]);
5598                 }
5599         }
5600 }
5601
5602 /*
5603  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5604  * register. Does it represent a 'port inactive' error?
5605  */
5606 static inline int port_inactive_err(u64 posn)
5607 {
5608         return (posn >= SEES(TX_LINKDOWN) &&
5609                 posn <= SEES(TX_INCORRECT_LINK_STATE));
5610 }
5611
5612 /*
5613  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5614  * register. Does it represent a 'disallowed packet' error?
5615  */
5616 static inline int disallowed_pkt_err(int posn)
5617 {
5618         return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5619                 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5620 }
5621
5622 /*
5623  * Input value is a bit position of one of the SDMA engine disallowed
5624  * packet errors.  Return which engine.  Use of this must be guarded by
5625  * disallowed_pkt_err().
5626  */
5627 static inline int disallowed_pkt_engine(int posn)
5628 {
5629         return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5630 }
5631
5632 /*
5633  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5634  * be done.
5635  */
5636 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5637 {
5638         struct sdma_vl_map *m;
5639         int vl;
5640
5641         /* range check */
5642         if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5643                 return -1;
5644
5645         rcu_read_lock();
5646         m = rcu_dereference(dd->sdma_map);
5647         vl = m->engine_to_vl[engine];
5648         rcu_read_unlock();
5649
5650         return vl;
5651 }
5652
5653 /*
5654  * Translate the send context (sofware index) into a VL.  Return -1 if the
5655  * translation cannot be done.
5656  */
5657 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5658 {
5659         struct send_context_info *sci;
5660         struct send_context *sc;
5661         int i;
5662
5663         sci = &dd->send_contexts[sw_index];
5664
5665         /* there is no information for user (PSM) and ack contexts */
5666         if (sci->type != SC_KERNEL)
5667                 return -1;
5668
5669         sc = sci->sc;
5670         if (!sc)
5671                 return -1;
5672         if (dd->vld[15].sc == sc)
5673                 return 15;
5674         for (i = 0; i < num_vls; i++)
5675                 if (dd->vld[i].sc == sc)
5676                         return i;
5677
5678         return -1;
5679 }
5680
5681 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5682 {
5683         u64 reg_copy = reg, handled = 0;
5684         char buf[96];
5685         int i = 0;
5686
5687         if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5688                 start_freeze_handling(dd->pport, 0);
5689         else if (is_ax(dd) &&
5690                  (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5691                  (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5692                 start_freeze_handling(dd->pport, 0);
5693
5694         while (reg_copy) {
5695                 int posn = fls64(reg_copy);
5696                 /* fls64() returns a 1-based offset, we want it zero based */
5697                 int shift = posn - 1;
5698                 u64 mask = 1ULL << shift;
5699
5700                 if (port_inactive_err(shift)) {
5701                         count_port_inactive(dd);
5702                         handled |= mask;
5703                 } else if (disallowed_pkt_err(shift)) {
5704                         int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5705
5706                         handle_send_egress_err_info(dd, vl);
5707                         handled |= mask;
5708                 }
5709                 reg_copy &= ~mask;
5710         }
5711
5712         reg &= ~handled;
5713
5714         if (reg)
5715                 dd_dev_info(dd, "Egress Error: %s\n",
5716                             egress_err_status_string(buf, sizeof(buf), reg));
5717
5718         for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5719                 if (reg & (1ull << i))
5720                         incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5721         }
5722 }
5723
5724 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5725 {
5726         char buf[96];
5727         int i = 0;
5728
5729         dd_dev_info(dd, "Send Error: %s\n",
5730                     send_err_status_string(buf, sizeof(buf), reg));
5731
5732         for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5733                 if (reg & (1ull << i))
5734                         incr_cntr64(&dd->send_err_status_cnt[i]);
5735         }
5736 }
5737
5738 /*
5739  * The maximum number of times the error clear down will loop before
5740  * blocking a repeating error.  This value is arbitrary.
5741  */
5742 #define MAX_CLEAR_COUNT 20
5743
5744 /*
5745  * Clear and handle an error register.  All error interrupts are funneled
5746  * through here to have a central location to correctly handle single-
5747  * or multi-shot errors.
5748  *
5749  * For non per-context registers, call this routine with a context value
5750  * of 0 so the per-context offset is zero.
5751  *
5752  * If the handler loops too many times, assume that something is wrong
5753  * and can't be fixed, so mask the error bits.
5754  */
5755 static void interrupt_clear_down(struct hfi1_devdata *dd,
5756                                  u32 context,
5757                                  const struct err_reg_info *eri)
5758 {
5759         u64 reg;
5760         u32 count;
5761
5762         /* read in a loop until no more errors are seen */
5763         count = 0;
5764         while (1) {
5765                 reg = read_kctxt_csr(dd, context, eri->status);
5766                 if (reg == 0)
5767                         break;
5768                 write_kctxt_csr(dd, context, eri->clear, reg);
5769                 if (likely(eri->handler))
5770                         eri->handler(dd, context, reg);
5771                 count++;
5772                 if (count > MAX_CLEAR_COUNT) {
5773                         u64 mask;
5774
5775                         dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5776                                    eri->desc, reg);
5777                         /*
5778                          * Read-modify-write so any other masked bits
5779                          * remain masked.
5780                          */
5781                         mask = read_kctxt_csr(dd, context, eri->mask);
5782                         mask &= ~reg;
5783                         write_kctxt_csr(dd, context, eri->mask, mask);
5784                         break;
5785                 }
5786         }
5787 }
5788
5789 /*
5790  * CCE block "misc" interrupt.  Source is < 16.
5791  */
5792 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5793 {
5794         const struct err_reg_info *eri = &misc_errs[source];
5795
5796         if (eri->handler) {
5797                 interrupt_clear_down(dd, 0, eri);
5798         } else {
5799                 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5800                            source);
5801         }
5802 }
5803
5804 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5805 {
5806         return flag_string(buf, buf_len, flags,
5807                            sc_err_status_flags,
5808                            ARRAY_SIZE(sc_err_status_flags));
5809 }
5810
5811 /*
5812  * Send context error interrupt.  Source (hw_context) is < 160.
5813  *
5814  * All send context errors cause the send context to halt.  The normal
5815  * clear-down mechanism cannot be used because we cannot clear the
5816  * error bits until several other long-running items are done first.
5817  * This is OK because with the context halted, nothing else is going
5818  * to happen on it anyway.
5819  */
5820 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5821                                 unsigned int hw_context)
5822 {
5823         struct send_context_info *sci;
5824         struct send_context *sc;
5825         char flags[96];
5826         u64 status;
5827         u32 sw_index;
5828         int i = 0;
5829
5830         sw_index = dd->hw_to_sw[hw_context];
5831         if (sw_index >= dd->num_send_contexts) {
5832                 dd_dev_err(dd,
5833                            "out of range sw index %u for send context %u\n",
5834                            sw_index, hw_context);
5835                 return;
5836         }
5837         sci = &dd->send_contexts[sw_index];
5838         sc = sci->sc;
5839         if (!sc) {
5840                 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5841                            sw_index, hw_context);
5842                 return;
5843         }
5844
5845         /* tell the software that a halt has begun */
5846         sc_stop(sc, SCF_HALTED);
5847
5848         status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5849
5850         dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5851                     send_context_err_status_string(flags, sizeof(flags),
5852                                                    status));
5853
5854         if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5855                 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5856
5857         /*
5858          * Automatically restart halted kernel contexts out of interrupt
5859          * context.  User contexts must ask the driver to restart the context.
5860          */
5861         if (sc->type != SC_USER)
5862                 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5863
5864         /*
5865          * Update the counters for the corresponding status bits.
5866          * Note that these particular counters are aggregated over all
5867          * 160 contexts.
5868          */
5869         for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5870                 if (status & (1ull << i))
5871                         incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5872         }
5873 }
5874
5875 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5876                                 unsigned int source, u64 status)
5877 {
5878         struct sdma_engine *sde;
5879         int i = 0;
5880
5881         sde = &dd->per_sdma[source];
5882 #ifdef CONFIG_SDMA_VERBOSITY
5883         dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5884                    slashstrip(__FILE__), __LINE__, __func__);
5885         dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5886                    sde->this_idx, source, (unsigned long long)status);
5887 #endif
5888         sde->err_cnt++;
5889         sdma_engine_error(sde, status);
5890
5891         /*
5892         * Update the counters for the corresponding status bits.
5893         * Note that these particular counters are aggregated over
5894         * all 16 DMA engines.
5895         */
5896         for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5897                 if (status & (1ull << i))
5898                         incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5899         }
5900 }
5901
5902 /*
5903  * CCE block SDMA error interrupt.  Source is < 16.
5904  */
5905 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5906 {
5907 #ifdef CONFIG_SDMA_VERBOSITY
5908         struct sdma_engine *sde = &dd->per_sdma[source];
5909
5910         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5911                    slashstrip(__FILE__), __LINE__, __func__);
5912         dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5913                    source);
5914         sdma_dumpstate(sde);
5915 #endif
5916         interrupt_clear_down(dd, source, &sdma_eng_err);
5917 }
5918
5919 /*
5920  * CCE block "various" interrupt.  Source is < 8.
5921  */
5922 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5923 {
5924         const struct err_reg_info *eri = &various_err[source];
5925
5926         /*
5927          * TCritInt cannot go through interrupt_clear_down()
5928          * because it is not a second tier interrupt. The handler
5929          * should be called directly.
5930          */
5931         if (source == TCRIT_INT_SOURCE)
5932                 handle_temp_err(dd);
5933         else if (eri->handler)
5934                 interrupt_clear_down(dd, 0, eri);
5935         else
5936                 dd_dev_info(dd,
5937                             "%s: Unimplemented/reserved interrupt %d\n",
5938                             __func__, source);
5939 }
5940
5941 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5942 {
5943         /* src_ctx is always zero */
5944         struct hfi1_pportdata *ppd = dd->pport;
5945         unsigned long flags;
5946         u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5947
5948         if (reg & QSFP_HFI0_MODPRST_N) {
5949                 if (!qsfp_mod_present(ppd)) {
5950                         dd_dev_info(dd, "%s: QSFP module removed\n",
5951                                     __func__);
5952
5953                         ppd->driver_link_ready = 0;
5954                         /*
5955                          * Cable removed, reset all our information about the
5956                          * cache and cable capabilities
5957                          */
5958
5959                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5960                         /*
5961                          * We don't set cache_refresh_required here as we expect
5962                          * an interrupt when a cable is inserted
5963                          */
5964                         ppd->qsfp_info.cache_valid = 0;
5965                         ppd->qsfp_info.reset_needed = 0;
5966                         ppd->qsfp_info.limiting_active = 0;
5967                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5968                                                flags);
5969                         /* Invert the ModPresent pin now to detect plug-in */
5970                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5971                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5972
5973                         if ((ppd->offline_disabled_reason >
5974                           HFI1_ODR_MASK(
5975                           OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
5976                           (ppd->offline_disabled_reason ==
5977                           HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5978                                 ppd->offline_disabled_reason =
5979                                 HFI1_ODR_MASK(
5980                                 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
5981
5982                         if (ppd->host_link_state == HLS_DN_POLL) {
5983                                 /*
5984                                  * The link is still in POLL. This means
5985                                  * that the normal link down processing
5986                                  * will not happen. We have to do it here
5987                                  * before turning the DC off.
5988                                  */
5989                                 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5990                         }
5991                 } else {
5992                         dd_dev_info(dd, "%s: QSFP module inserted\n",
5993                                     __func__);
5994
5995                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5996                         ppd->qsfp_info.cache_valid = 0;
5997                         ppd->qsfp_info.cache_refresh_required = 1;
5998                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5999                                                flags);
6000
6001                         /*
6002                          * Stop inversion of ModPresent pin to detect
6003                          * removal of the cable
6004                          */
6005                         qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6006                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6007                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6008
6009                         ppd->offline_disabled_reason =
6010                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6011                 }
6012         }
6013
6014         if (reg & QSFP_HFI0_INT_N) {
6015                 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6016                             __func__);
6017                 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6018                 ppd->qsfp_info.check_interrupt_flags = 1;
6019                 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6020         }
6021
6022         /* Schedule the QSFP work only if there is a cable attached. */
6023         if (qsfp_mod_present(ppd))
6024                 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6025 }
6026
6027 static int request_host_lcb_access(struct hfi1_devdata *dd)
6028 {
6029         int ret;
6030
6031         ret = do_8051_command(dd, HCMD_MISC,
6032                               (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6033                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6034         if (ret != HCMD_SUCCESS) {
6035                 dd_dev_err(dd, "%s: command failed with error %d\n",
6036                            __func__, ret);
6037         }
6038         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6039 }
6040
6041 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6042 {
6043         int ret;
6044
6045         ret = do_8051_command(dd, HCMD_MISC,
6046                               (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6047                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6048         if (ret != HCMD_SUCCESS) {
6049                 dd_dev_err(dd, "%s: command failed with error %d\n",
6050                            __func__, ret);
6051         }
6052         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6053 }
6054
6055 /*
6056  * Set the LCB selector - allow host access.  The DCC selector always
6057  * points to the host.
6058  */
6059 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6060 {
6061         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6062                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6063                   DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6064 }
6065
6066 /*
6067  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6068  * points to the host.
6069  */
6070 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6071 {
6072         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6073                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6074 }
6075
6076 /*
6077  * Acquire LCB access from the 8051.  If the host already has access,
6078  * just increment a counter.  Otherwise, inform the 8051 that the
6079  * host is taking access.
6080  *
6081  * Returns:
6082  *      0 on success
6083  *      -EBUSY if the 8051 has control and cannot be disturbed
6084  *      -errno if unable to acquire access from the 8051
6085  */
6086 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6087 {
6088         struct hfi1_pportdata *ppd = dd->pport;
6089         int ret = 0;
6090
6091         /*
6092          * Use the host link state lock so the operation of this routine
6093          * { link state check, selector change, count increment } can occur
6094          * as a unit against a link state change.  Otherwise there is a
6095          * race between the state change and the count increment.
6096          */
6097         if (sleep_ok) {
6098                 mutex_lock(&ppd->hls_lock);
6099         } else {
6100                 while (!mutex_trylock(&ppd->hls_lock))
6101                         udelay(1);
6102         }
6103
6104         /* this access is valid only when the link is up */
6105         if ((ppd->host_link_state & HLS_UP) == 0) {
6106                 dd_dev_info(dd, "%s: link state %s not up\n",
6107                             __func__, link_state_name(ppd->host_link_state));
6108                 ret = -EBUSY;
6109                 goto done;
6110         }
6111
6112         if (dd->lcb_access_count == 0) {
6113                 ret = request_host_lcb_access(dd);
6114                 if (ret) {
6115                         dd_dev_err(dd,
6116                                    "%s: unable to acquire LCB access, err %d\n",
6117                                    __func__, ret);
6118                         goto done;
6119                 }
6120                 set_host_lcb_access(dd);
6121         }
6122         dd->lcb_access_count++;
6123 done:
6124         mutex_unlock(&ppd->hls_lock);
6125         return ret;
6126 }
6127
6128 /*
6129  * Release LCB access by decrementing the use count.  If the count is moving
6130  * from 1 to 0, inform 8051 that it has control back.
6131  *
6132  * Returns:
6133  *      0 on success
6134  *      -errno if unable to release access to the 8051
6135  */
6136 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6137 {
6138         int ret = 0;
6139
6140         /*
6141          * Use the host link state lock because the acquire needed it.
6142          * Here, we only need to keep { selector change, count decrement }
6143          * as a unit.
6144          */
6145         if (sleep_ok) {
6146                 mutex_lock(&dd->pport->hls_lock);
6147         } else {
6148                 while (!mutex_trylock(&dd->pport->hls_lock))
6149                         udelay(1);
6150         }
6151
6152         if (dd->lcb_access_count == 0) {
6153                 dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6154                            __func__);
6155                 goto done;
6156         }
6157
6158         if (dd->lcb_access_count == 1) {
6159                 set_8051_lcb_access(dd);
6160                 ret = request_8051_lcb_access(dd);
6161                 if (ret) {
6162                         dd_dev_err(dd,
6163                                    "%s: unable to release LCB access, err %d\n",
6164                                    __func__, ret);
6165                         /* restore host access if the grant didn't work */
6166                         set_host_lcb_access(dd);
6167                         goto done;
6168                 }
6169         }
6170         dd->lcb_access_count--;
6171 done:
6172         mutex_unlock(&dd->pport->hls_lock);
6173         return ret;
6174 }
6175
6176 /*
6177  * Initialize LCB access variables and state.  Called during driver load,
6178  * after most of the initialization is finished.
6179  *
6180  * The DC default is LCB access on for the host.  The driver defaults to
6181  * leaving access to the 8051.  Assign access now - this constrains the call
6182  * to this routine to be after all LCB set-up is done.  In particular, after
6183  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6184  */
6185 static void init_lcb_access(struct hfi1_devdata *dd)
6186 {
6187         dd->lcb_access_count = 0;
6188 }
6189
6190 /*
6191  * Write a response back to a 8051 request.
6192  */
6193 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6194 {
6195         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6196                   DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6197                   (u64)return_code <<
6198                   DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6199                   (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6200 }
6201
6202 /*
6203  * Handle host requests from the 8051.
6204  *
6205  * This is a work-queue function outside of the interrupt.
6206  */
6207 void handle_8051_request(struct work_struct *work)
6208 {
6209         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6210                                                         dc_host_req_work);
6211         struct hfi1_devdata *dd = ppd->dd;
6212         u64 reg;
6213         u16 data = 0;
6214         u8 type, i, lanes, *cache = ppd->qsfp_info.cache;
6215         u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
6216
6217         reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6218         if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6219                 return; /* no request */
6220
6221         /* zero out COMPLETED so the response is seen */
6222         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6223
6224         /* extract request details */
6225         type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6226                         & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6227         data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6228                         & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6229
6230         switch (type) {
6231         case HREQ_LOAD_CONFIG:
6232         case HREQ_SAVE_CONFIG:
6233         case HREQ_READ_CONFIG:
6234         case HREQ_SET_TX_EQ_ABS:
6235         case HREQ_SET_TX_EQ_REL:
6236                 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6237                             type);
6238                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6239                 break;
6240
6241         case HREQ_ENABLE:
6242                 lanes = data & 0xF;
6243                 for (i = 0; lanes; lanes >>= 1, i++) {
6244                         if (!(lanes & 1))
6245                                 continue;
6246                         if (data & 0x200) {
6247                                 /* enable TX CDR */
6248                                 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6249                                     cache[QSFP_CDR_INFO_OFFS] & 0x80)
6250                                         cdr_ctrl_byte |= (1 << (i + 4));
6251                         } else {
6252                                 /* disable TX CDR */
6253                                 if (cache[QSFP_MOD_PWR_OFFS] & 0x8 &&
6254                                     cache[QSFP_CDR_INFO_OFFS] & 0x80)
6255                                         cdr_ctrl_byte &= ~(1 << (i + 4));
6256                         }
6257
6258                         if (data & 0x800) {
6259                                 /* enable RX CDR */
6260                                 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6261                                     cache[QSFP_CDR_INFO_OFFS] & 0x40)
6262                                         cdr_ctrl_byte |= (1 << i);
6263                         } else {
6264                                 /* disable RX CDR */
6265                                 if (cache[QSFP_MOD_PWR_OFFS] & 0x4 &&
6266                                     cache[QSFP_CDR_INFO_OFFS] & 0x40)
6267                                         cdr_ctrl_byte &= ~(1 << i);
6268                         }
6269                 }
6270                 qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
6271                            &cdr_ctrl_byte, 1);
6272                 hreq_response(dd, HREQ_SUCCESS, data);
6273                 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
6274                 break;
6275
6276         case HREQ_CONFIG_DONE:
6277                 hreq_response(dd, HREQ_SUCCESS, 0);
6278                 break;
6279
6280         case HREQ_INTERFACE_TEST:
6281                 hreq_response(dd, HREQ_SUCCESS, data);
6282                 break;
6283
6284         default:
6285                 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6286                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6287                 break;
6288         }
6289 }
6290
6291 static void write_global_credit(struct hfi1_devdata *dd,
6292                                 u8 vau, u16 total, u16 shared)
6293 {
6294         write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6295                   ((u64)total <<
6296                    SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6297                   ((u64)shared <<
6298                    SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6299                   ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6300 }
6301
6302 /*
6303  * Set up initial VL15 credits of the remote.  Assumes the rest of
6304  * the CM credit registers are zero from a previous global or credit reset .
6305  */
6306 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6307 {
6308         /* leave shared count at zero for both global and VL15 */
6309         write_global_credit(dd, vau, vl15buf, 0);
6310
6311         /* We may need some credits for another VL when sending packets
6312          * with the snoop interface. Dividing it down the middle for VL15
6313          * and VL0 should suffice.
6314          */
6315         if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6316                 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6317                     << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6318                 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6319                     << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6320         } else {
6321                 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6322                         << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6323         }
6324 }
6325
6326 /*
6327  * Zero all credit details from the previous connection and
6328  * reset the CM manager's internal counters.
6329  */
6330 void reset_link_credits(struct hfi1_devdata *dd)
6331 {
6332         int i;
6333
6334         /* remove all previous VL credit limits */
6335         for (i = 0; i < TXE_NUM_DATA_VL; i++)
6336                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6337         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6338         write_global_credit(dd, 0, 0, 0);
6339         /* reset the CM block */
6340         pio_send_control(dd, PSC_CM_RESET);
6341 }
6342
6343 /* convert a vCU to a CU */
6344 static u32 vcu_to_cu(u8 vcu)
6345 {
6346         return 1 << vcu;
6347 }
6348
6349 /* convert a CU to a vCU */
6350 static u8 cu_to_vcu(u32 cu)
6351 {
6352         return ilog2(cu);
6353 }
6354
6355 /* convert a vAU to an AU */
6356 static u32 vau_to_au(u8 vau)
6357 {
6358         return 8 * (1 << vau);
6359 }
6360
6361 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6362 {
6363         ppd->sm_trap_qp = 0x0;
6364         ppd->sa_qp = 0x1;
6365 }
6366
6367 /*
6368  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6369  */
6370 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6371 {
6372         u64 reg;
6373
6374         /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6375         write_csr(dd, DC_LCB_CFG_RUN, 0);
6376         /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6377         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6378                   1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6379         /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6380         dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6381         reg = read_csr(dd, DCC_CFG_RESET);
6382         write_csr(dd, DCC_CFG_RESET, reg |
6383                   (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6384                   (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6385         (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6386         if (!abort) {
6387                 udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6388                 write_csr(dd, DCC_CFG_RESET, reg);
6389                 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6390         }
6391 }
6392
6393 /*
6394  * This routine should be called after the link has been transitioned to
6395  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6396  * reset).
6397  *
6398  * The expectation is that the caller of this routine would have taken
6399  * care of properly transitioning the link into the correct state.
6400  */
6401 static void dc_shutdown(struct hfi1_devdata *dd)
6402 {
6403         unsigned long flags;
6404
6405         spin_lock_irqsave(&dd->dc8051_lock, flags);
6406         if (dd->dc_shutdown) {
6407                 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6408                 return;
6409         }
6410         dd->dc_shutdown = 1;
6411         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6412         /* Shutdown the LCB */
6413         lcb_shutdown(dd, 1);
6414         /*
6415          * Going to OFFLINE would have causes the 8051 to put the
6416          * SerDes into reset already. Just need to shut down the 8051,
6417          * itself.
6418          */
6419         write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6420 }
6421
6422 /*
6423  * Calling this after the DC has been brought out of reset should not
6424  * do any damage.
6425  */
6426 static void dc_start(struct hfi1_devdata *dd)
6427 {
6428         unsigned long flags;
6429         int ret;
6430
6431         spin_lock_irqsave(&dd->dc8051_lock, flags);
6432         if (!dd->dc_shutdown)
6433                 goto done;
6434         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6435         /* Take the 8051 out of reset */
6436         write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6437         /* Wait until 8051 is ready */
6438         ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6439         if (ret) {
6440                 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6441                            __func__);
6442         }
6443         /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6444         write_csr(dd, DCC_CFG_RESET, 0x10);
6445         /* lcb_shutdown() with abort=1 does not restore these */
6446         write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6447         spin_lock_irqsave(&dd->dc8051_lock, flags);
6448         dd->dc_shutdown = 0;
6449 done:
6450         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6451 }
6452
6453 /*
6454  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6455  */
6456 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6457 {
6458         u64 rx_radr, tx_radr;
6459         u32 version;
6460
6461         if (dd->icode != ICODE_FPGA_EMULATION)
6462                 return;
6463
6464         /*
6465          * These LCB defaults on emulator _s are good, nothing to do here:
6466          *      LCB_CFG_TX_FIFOS_RADR
6467          *      LCB_CFG_RX_FIFOS_RADR
6468          *      LCB_CFG_LN_DCLK
6469          *      LCB_CFG_IGNORE_LOST_RCLK
6470          */
6471         if (is_emulator_s(dd))
6472                 return;
6473         /* else this is _p */
6474
6475         version = emulator_rev(dd);
6476         if (!is_ax(dd))
6477                 version = 0x2d; /* all B0 use 0x2d or higher settings */
6478
6479         if (version <= 0x12) {
6480                 /* release 0x12 and below */
6481
6482                 /*
6483                  * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6484                  * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6485                  * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6486                  */
6487                 rx_radr =
6488                       0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6489                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6490                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6491                 /*
6492                  * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6493                  * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6494                  */
6495                 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6496         } else if (version <= 0x18) {
6497                 /* release 0x13 up to 0x18 */
6498                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6499                 rx_radr =
6500                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6501                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6502                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6503                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6504         } else if (version == 0x19) {
6505                 /* release 0x19 */
6506                 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6507                 rx_radr =
6508                       0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6509                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6510                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6511                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6512         } else if (version == 0x1a) {
6513                 /* release 0x1a */
6514                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6515                 rx_radr =
6516                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6517                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6518                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6519                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6520                 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6521         } else {
6522                 /* release 0x1b and higher */
6523                 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6524                 rx_radr =
6525                       0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6526                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6527                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6528                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6529         }
6530
6531         write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6532         /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6533         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6534                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6535         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6536 }
6537
6538 /*
6539  * Handle a SMA idle message
6540  *
6541  * This is a work-queue function outside of the interrupt.
6542  */
6543 void handle_sma_message(struct work_struct *work)
6544 {
6545         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6546                                                         sma_message_work);
6547         struct hfi1_devdata *dd = ppd->dd;
6548         u64 msg;
6549         int ret;
6550
6551         /*
6552          * msg is bytes 1-4 of the 40-bit idle message - the command code
6553          * is stripped off
6554          */
6555         ret = read_idle_sma(dd, &msg);
6556         if (ret)
6557                 return;
6558         dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6559         /*
6560          * React to the SMA message.  Byte[1] (0 for us) is the command.
6561          */
6562         switch (msg & 0xff) {
6563         case SMA_IDLE_ARM:
6564                 /*
6565                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6566                  * State Transitions
6567                  *
6568                  * Only expected in INIT or ARMED, discard otherwise.
6569                  */
6570                 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6571                         ppd->neighbor_normal = 1;
6572                 break;
6573         case SMA_IDLE_ACTIVE:
6574                 /*
6575                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6576                  * State Transitions
6577                  *
6578                  * Can activate the node.  Discard otherwise.
6579                  */
6580                 if (ppd->host_link_state == HLS_UP_ARMED &&
6581                     ppd->is_active_optimize_enabled) {
6582                         ppd->neighbor_normal = 1;
6583                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
6584                         if (ret)
6585                                 dd_dev_err(
6586                                         dd,
6587                                         "%s: received Active SMA idle message, couldn't set link to Active\n",
6588                                         __func__);
6589                 }
6590                 break;
6591         default:
6592                 dd_dev_err(dd,
6593                            "%s: received unexpected SMA idle message 0x%llx\n",
6594                            __func__, msg);
6595                 break;
6596         }
6597 }
6598
6599 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6600 {
6601         u64 rcvctrl;
6602         unsigned long flags;
6603
6604         spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6605         rcvctrl = read_csr(dd, RCV_CTRL);
6606         rcvctrl |= add;
6607         rcvctrl &= ~clear;
6608         write_csr(dd, RCV_CTRL, rcvctrl);
6609         spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6610 }
6611
6612 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6613 {
6614         adjust_rcvctrl(dd, add, 0);
6615 }
6616
6617 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6618 {
6619         adjust_rcvctrl(dd, 0, clear);
6620 }
6621
6622 /*
6623  * Called from all interrupt handlers to start handling an SPC freeze.
6624  */
6625 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6626 {
6627         struct hfi1_devdata *dd = ppd->dd;
6628         struct send_context *sc;
6629         int i;
6630
6631         if (flags & FREEZE_SELF)
6632                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6633
6634         /* enter frozen mode */
6635         dd->flags |= HFI1_FROZEN;
6636
6637         /* notify all SDMA engines that they are going into a freeze */
6638         sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6639
6640         /* do halt pre-handling on all enabled send contexts */
6641         for (i = 0; i < dd->num_send_contexts; i++) {
6642                 sc = dd->send_contexts[i].sc;
6643                 if (sc && (sc->flags & SCF_ENABLED))
6644                         sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6645         }
6646
6647         /* Send context are frozen. Notify user space */
6648         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6649
6650         if (flags & FREEZE_ABORT) {
6651                 dd_dev_err(dd,
6652                            "Aborted freeze recovery. Please REBOOT system\n");
6653                 return;
6654         }
6655         /* queue non-interrupt handler */
6656         queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6657 }
6658
6659 /*
6660  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6661  * depending on the "freeze" parameter.
6662  *
6663  * No need to return an error if it times out, our only option
6664  * is to proceed anyway.
6665  */
6666 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6667 {
6668         unsigned long timeout;
6669         u64 reg;
6670
6671         timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6672         while (1) {
6673                 reg = read_csr(dd, CCE_STATUS);
6674                 if (freeze) {
6675                         /* waiting until all indicators are set */
6676                         if ((reg & ALL_FROZE) == ALL_FROZE)
6677                                 return; /* all done */
6678                 } else {
6679                         /* waiting until all indicators are clear */
6680                         if ((reg & ALL_FROZE) == 0)
6681                                 return; /* all done */
6682                 }
6683
6684                 if (time_after(jiffies, timeout)) {
6685                         dd_dev_err(dd,
6686                                    "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6687                                    freeze ? "" : "un", reg & ALL_FROZE,
6688                                    freeze ? ALL_FROZE : 0ull);
6689                         return;
6690                 }
6691                 usleep_range(80, 120);
6692         }
6693 }
6694
6695 /*
6696  * Do all freeze handling for the RXE block.
6697  */
6698 static void rxe_freeze(struct hfi1_devdata *dd)
6699 {
6700         int i;
6701
6702         /* disable port */
6703         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6704
6705         /* disable all receive contexts */
6706         for (i = 0; i < dd->num_rcv_contexts; i++)
6707                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6708 }
6709
6710 /*
6711  * Unfreeze handling for the RXE block - kernel contexts only.
6712  * This will also enable the port.  User contexts will do unfreeze
6713  * handling on a per-context basis as they call into the driver.
6714  *
6715  */
6716 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6717 {
6718         u32 rcvmask;
6719         int i;
6720
6721         /* enable all kernel contexts */
6722         for (i = 0; i < dd->n_krcv_queues; i++) {
6723                 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6724                 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6725                 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6726                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6727                 hfi1_rcvctrl(dd, rcvmask, i);
6728         }
6729
6730         /* enable port */
6731         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6732 }
6733
6734 /*
6735  * Non-interrupt SPC freeze handling.
6736  *
6737  * This is a work-queue function outside of the triggering interrupt.
6738  */
6739 void handle_freeze(struct work_struct *work)
6740 {
6741         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6742                                                                 freeze_work);
6743         struct hfi1_devdata *dd = ppd->dd;
6744
6745         /* wait for freeze indicators on all affected blocks */
6746         wait_for_freeze_status(dd, 1);
6747
6748         /* SPC is now frozen */
6749
6750         /* do send PIO freeze steps */
6751         pio_freeze(dd);
6752
6753         /* do send DMA freeze steps */
6754         sdma_freeze(dd);
6755
6756         /* do send egress freeze steps - nothing to do */
6757
6758         /* do receive freeze steps */
6759         rxe_freeze(dd);
6760
6761         /*
6762          * Unfreeze the hardware - clear the freeze, wait for each
6763          * block's frozen bit to clear, then clear the frozen flag.
6764          */
6765         write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6766         wait_for_freeze_status(dd, 0);
6767
6768         if (is_ax(dd)) {
6769                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6770                 wait_for_freeze_status(dd, 1);
6771                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6772                 wait_for_freeze_status(dd, 0);
6773         }
6774
6775         /* do send PIO unfreeze steps for kernel contexts */
6776         pio_kernel_unfreeze(dd);
6777
6778         /* do send DMA unfreeze steps */
6779         sdma_unfreeze(dd);
6780
6781         /* do send egress unfreeze steps - nothing to do */
6782
6783         /* do receive unfreeze steps for kernel contexts */
6784         rxe_kernel_unfreeze(dd);
6785
6786         /*
6787          * The unfreeze procedure touches global device registers when
6788          * it disables and re-enables RXE. Mark the device unfrozen
6789          * after all that is done so other parts of the driver waiting
6790          * for the device to unfreeze don't do things out of order.
6791          *
6792          * The above implies that the meaning of HFI1_FROZEN flag is
6793          * "Device has gone into freeze mode and freeze mode handling
6794          * is still in progress."
6795          *
6796          * The flag will be removed when freeze mode processing has
6797          * completed.
6798          */
6799         dd->flags &= ~HFI1_FROZEN;
6800         wake_up(&dd->event_queue);
6801
6802         /* no longer frozen */
6803 }
6804
6805 /*
6806  * Handle a link up interrupt from the 8051.
6807  *
6808  * This is a work-queue function outside of the interrupt.
6809  */
6810 void handle_link_up(struct work_struct *work)
6811 {
6812         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6813                                                   link_up_work);
6814         set_link_state(ppd, HLS_UP_INIT);
6815
6816         /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6817         read_ltp_rtt(ppd->dd);
6818         /*
6819          * OPA specifies that certain counters are cleared on a transition
6820          * to link up, so do that.
6821          */
6822         clear_linkup_counters(ppd->dd);
6823         /*
6824          * And (re)set link up default values.
6825          */
6826         set_linkup_defaults(ppd);
6827
6828         /* enforce link speed enabled */
6829         if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6830                 /* oops - current speed is not enabled, bounce */
6831                 dd_dev_err(ppd->dd,
6832                            "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6833                            ppd->link_speed_active, ppd->link_speed_enabled);
6834                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6835                                      OPA_LINKDOWN_REASON_SPEED_POLICY);
6836                 set_link_state(ppd, HLS_DN_OFFLINE);
6837                 tune_serdes(ppd);
6838                 start_link(ppd);
6839         }
6840 }
6841
6842 /*
6843  * Several pieces of LNI information were cached for SMA in ppd.
6844  * Reset these on link down
6845  */
6846 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6847 {
6848         ppd->neighbor_guid = 0;
6849         ppd->neighbor_port_number = 0;
6850         ppd->neighbor_type = 0;
6851         ppd->neighbor_fm_security = 0;
6852 }
6853
6854 /*
6855  * Handle a link down interrupt from the 8051.
6856  *
6857  * This is a work-queue function outside of the interrupt.
6858  */
6859 void handle_link_down(struct work_struct *work)
6860 {
6861         u8 lcl_reason, neigh_reason = 0;
6862         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6863                                                                 link_down_work);
6864
6865         if ((ppd->host_link_state &
6866              (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6867              ppd->port_type == PORT_TYPE_FIXED)
6868                 ppd->offline_disabled_reason =
6869                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6870
6871         /* Go offline first, then deal with reading/writing through 8051 */
6872         set_link_state(ppd, HLS_DN_OFFLINE);
6873
6874         lcl_reason = 0;
6875         read_planned_down_reason_code(ppd->dd, &neigh_reason);
6876
6877         /*
6878          * If no reason, assume peer-initiated but missed
6879          * LinkGoingDown idle flits.
6880          */
6881         if (neigh_reason == 0)
6882                 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6883
6884         set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6885
6886         reset_neighbor_info(ppd);
6887
6888         /* disable the port */
6889         clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6890
6891         /*
6892          * If there is no cable attached, turn the DC off. Otherwise,
6893          * start the link bring up.
6894          */
6895         if (!qsfp_mod_present(ppd)) {
6896                 dc_shutdown(ppd->dd);
6897         } else {
6898                 tune_serdes(ppd);
6899                 start_link(ppd);
6900         }
6901 }
6902
6903 void handle_link_bounce(struct work_struct *work)
6904 {
6905         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6906                                                         link_bounce_work);
6907
6908         /*
6909          * Only do something if the link is currently up.
6910          */
6911         if (ppd->host_link_state & HLS_UP) {
6912                 set_link_state(ppd, HLS_DN_OFFLINE);
6913                 tune_serdes(ppd);
6914                 start_link(ppd);
6915         } else {
6916                 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6917                             __func__, link_state_name(ppd->host_link_state));
6918         }
6919 }
6920
6921 /*
6922  * Mask conversion: Capability exchange to Port LTP.  The capability
6923  * exchange has an implicit 16b CRC that is mandatory.
6924  */
6925 static int cap_to_port_ltp(int cap)
6926 {
6927         int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
6928
6929         if (cap & CAP_CRC_14B)
6930                 port_ltp |= PORT_LTP_CRC_MODE_14;
6931         if (cap & CAP_CRC_48B)
6932                 port_ltp |= PORT_LTP_CRC_MODE_48;
6933         if (cap & CAP_CRC_12B_16B_PER_LANE)
6934                 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
6935
6936         return port_ltp;
6937 }
6938
6939 /*
6940  * Convert an OPA Port LTP mask to capability mask
6941  */
6942 int port_ltp_to_cap(int port_ltp)
6943 {
6944         int cap_mask = 0;
6945
6946         if (port_ltp & PORT_LTP_CRC_MODE_14)
6947                 cap_mask |= CAP_CRC_14B;
6948         if (port_ltp & PORT_LTP_CRC_MODE_48)
6949                 cap_mask |= CAP_CRC_48B;
6950         if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
6951                 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
6952
6953         return cap_mask;
6954 }
6955
6956 /*
6957  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
6958  */
6959 static int lcb_to_port_ltp(int lcb_crc)
6960 {
6961         int port_ltp = 0;
6962
6963         if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
6964                 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
6965         else if (lcb_crc == LCB_CRC_48B)
6966                 port_ltp = PORT_LTP_CRC_MODE_48;
6967         else if (lcb_crc == LCB_CRC_14B)
6968                 port_ltp = PORT_LTP_CRC_MODE_14;
6969         else
6970                 port_ltp = PORT_LTP_CRC_MODE_16;
6971
6972         return port_ltp;
6973 }
6974
6975 /*
6976  * Our neighbor has indicated that we are allowed to act as a fabric
6977  * manager, so place the full management partition key in the second
6978  * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
6979  * that we should already have the limited management partition key in
6980  * array element 1, and also that the port is not yet up when
6981  * add_full_mgmt_pkey() is invoked.
6982  */
6983 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
6984 {
6985         struct hfi1_devdata *dd = ppd->dd;
6986
6987         /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
6988         if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
6989                 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
6990                             __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
6991         ppd->pkeys[2] = FULL_MGMT_P_KEY;
6992         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
6993 }
6994
6995 /*
6996  * Convert the given link width to the OPA link width bitmask.
6997  */
6998 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
6999 {
7000         switch (width) {
7001         case 0:
7002                 /*
7003                  * Simulator and quick linkup do not set the width.
7004                  * Just set it to 4x without complaint.
7005                  */
7006                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7007                         return OPA_LINK_WIDTH_4X;
7008                 return 0; /* no lanes up */
7009         case 1: return OPA_LINK_WIDTH_1X;
7010         case 2: return OPA_LINK_WIDTH_2X;
7011         case 3: return OPA_LINK_WIDTH_3X;
7012         default:
7013                 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7014                             __func__, width);
7015                 /* fall through */
7016         case 4: return OPA_LINK_WIDTH_4X;
7017         }
7018 }
7019
7020 /*
7021  * Do a population count on the bottom nibble.
7022  */
7023 static const u8 bit_counts[16] = {
7024         0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7025 };
7026
7027 static inline u8 nibble_to_count(u8 nibble)
7028 {
7029         return bit_counts[nibble & 0xf];
7030 }
7031
7032 /*
7033  * Read the active lane information from the 8051 registers and return
7034  * their widths.
7035  *
7036  * Active lane information is found in these 8051 registers:
7037  *      enable_lane_tx
7038  *      enable_lane_rx
7039  */
7040 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7041                             u16 *rx_width)
7042 {
7043         u16 tx, rx;
7044         u8 enable_lane_rx;
7045         u8 enable_lane_tx;
7046         u8 tx_polarity_inversion;
7047         u8 rx_polarity_inversion;
7048         u8 max_rate;
7049
7050         /* read the active lanes */
7051         read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7052                          &rx_polarity_inversion, &max_rate);
7053         read_local_lni(dd, &enable_lane_rx);
7054
7055         /* convert to counts */
7056         tx = nibble_to_count(enable_lane_tx);
7057         rx = nibble_to_count(enable_lane_rx);
7058
7059         /*
7060          * Set link_speed_active here, overriding what was set in
7061          * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7062          * set the max_rate field in handle_verify_cap until v0.19.
7063          */
7064         if ((dd->icode == ICODE_RTL_SILICON) &&
7065             (dd->dc8051_ver < dc8051_ver(0, 19))) {
7066                 /* max_rate: 0 = 12.5G, 1 = 25G */
7067                 switch (max_rate) {
7068                 case 0:
7069                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7070                         break;
7071                 default:
7072                         dd_dev_err(dd,
7073                                    "%s: unexpected max rate %d, using 25Gb\n",
7074                                    __func__, (int)max_rate);
7075                         /* fall through */
7076                 case 1:
7077                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7078                         break;
7079                 }
7080         }
7081
7082         dd_dev_info(dd,
7083                     "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7084                     enable_lane_tx, tx, enable_lane_rx, rx);
7085         *tx_width = link_width_to_bits(dd, tx);
7086         *rx_width = link_width_to_bits(dd, rx);
7087 }
7088
7089 /*
7090  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7091  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7092  * after link up.  I.e. look elsewhere for downgrade information.
7093  *
7094  * Bits are:
7095  *      + bits [7:4] contain the number of active transmitters
7096  *      + bits [3:0] contain the number of active receivers
7097  * These are numbers 1 through 4 and can be different values if the
7098  * link is asymmetric.
7099  *
7100  * verify_cap_local_fm_link_width[0] retains its original value.
7101  */
7102 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7103                               u16 *rx_width)
7104 {
7105         u16 widths, tx, rx;
7106         u8 misc_bits, local_flags;
7107         u16 active_tx, active_rx;
7108
7109         read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7110         tx = widths >> 12;
7111         rx = (widths >> 8) & 0xf;
7112
7113         *tx_width = link_width_to_bits(dd, tx);
7114         *rx_width = link_width_to_bits(dd, rx);
7115
7116         /* print the active widths */
7117         get_link_widths(dd, &active_tx, &active_rx);
7118 }
7119
7120 /*
7121  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7122  * hardware information when the link first comes up.
7123  *
7124  * The link width is not available until after VerifyCap.AllFramesReceived
7125  * (the trigger for handle_verify_cap), so this is outside that routine
7126  * and should be called when the 8051 signals linkup.
7127  */
7128 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7129 {
7130         u16 tx_width, rx_width;
7131
7132         /* get end-of-LNI link widths */
7133         get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7134
7135         /* use tx_width as the link is supposed to be symmetric on link up */
7136         ppd->link_width_active = tx_width;
7137         /* link width downgrade active (LWD.A) starts out matching LW.A */
7138         ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7139         ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7140         /* per OPA spec, on link up LWD.E resets to LWD.S */
7141         ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7142         /* cache the active egress rate (units {10^6 bits/sec]) */
7143         ppd->current_egress_rate = active_egress_rate(ppd);
7144 }
7145
7146 /*
7147  * Handle a verify capabilities interrupt from the 8051.
7148  *
7149  * This is a work-queue function outside of the interrupt.
7150  */
7151 void handle_verify_cap(struct work_struct *work)
7152 {
7153         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7154                                                                 link_vc_work);
7155         struct hfi1_devdata *dd = ppd->dd;
7156         u64 reg;
7157         u8 power_management;
7158         u8 continious;
7159         u8 vcu;
7160         u8 vau;
7161         u8 z;
7162         u16 vl15buf;
7163         u16 link_widths;
7164         u16 crc_mask;
7165         u16 crc_val;
7166         u16 device_id;
7167         u16 active_tx, active_rx;
7168         u8 partner_supported_crc;
7169         u8 remote_tx_rate;
7170         u8 device_rev;
7171
7172         set_link_state(ppd, HLS_VERIFY_CAP);
7173
7174         lcb_shutdown(dd, 0);
7175         adjust_lcb_for_fpga_serdes(dd);
7176
7177         /*
7178          * These are now valid:
7179          *      remote VerifyCap fields in the general LNI config
7180          *      CSR DC8051_STS_REMOTE_GUID
7181          *      CSR DC8051_STS_REMOTE_NODE_TYPE
7182          *      CSR DC8051_STS_REMOTE_FM_SECURITY
7183          *      CSR DC8051_STS_REMOTE_PORT_NO
7184          */
7185
7186         read_vc_remote_phy(dd, &power_management, &continious);
7187         read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7188                               &partner_supported_crc);
7189         read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7190         read_remote_device_id(dd, &device_id, &device_rev);
7191         /*
7192          * And the 'MgmtAllowed' information, which is exchanged during
7193          * LNI, is also be available at this point.
7194          */
7195         read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7196         /* print the active widths */
7197         get_link_widths(dd, &active_tx, &active_rx);
7198         dd_dev_info(dd,
7199                     "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7200                     (int)power_management, (int)continious);
7201         dd_dev_info(dd,
7202                     "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7203                     (int)vau, (int)z, (int)vcu, (int)vl15buf,
7204                     (int)partner_supported_crc);
7205         dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7206                     (u32)remote_tx_rate, (u32)link_widths);
7207         dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7208                     (u32)device_id, (u32)device_rev);
7209         /*
7210          * The peer vAU value just read is the peer receiver value.  HFI does
7211          * not support a transmit vAU of 0 (AU == 8).  We advertised that
7212          * with Z=1 in the fabric capabilities sent to the peer.  The peer
7213          * will see our Z=1, and, if it advertised a vAU of 0, will move its
7214          * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7215          * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7216          * subject to the Z value exception.
7217          */
7218         if (vau == 0)
7219                 vau = 1;
7220         set_up_vl15(dd, vau, vl15buf);
7221
7222         /* set up the LCB CRC mode */
7223         crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7224
7225         /* order is important: use the lowest bit in common */
7226         if (crc_mask & CAP_CRC_14B)
7227                 crc_val = LCB_CRC_14B;
7228         else if (crc_mask & CAP_CRC_48B)
7229                 crc_val = LCB_CRC_48B;
7230         else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7231                 crc_val = LCB_CRC_12B_16B_PER_LANE;
7232         else
7233                 crc_val = LCB_CRC_16B;
7234
7235         dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7236         write_csr(dd, DC_LCB_CFG_CRC_MODE,
7237                   (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7238
7239         /* set (14b only) or clear sideband credit */
7240         reg = read_csr(dd, SEND_CM_CTRL);
7241         if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7242                 write_csr(dd, SEND_CM_CTRL,
7243                           reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7244         } else {
7245                 write_csr(dd, SEND_CM_CTRL,
7246                           reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7247         }
7248
7249         ppd->link_speed_active = 0;     /* invalid value */
7250         if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7251                 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7252                 switch (remote_tx_rate) {
7253                 case 0:
7254                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7255                         break;
7256                 case 1:
7257                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7258                         break;
7259                 }
7260         } else {
7261                 /* actual rate is highest bit of the ANDed rates */
7262                 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7263
7264                 if (rate & 2)
7265                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7266                 else if (rate & 1)
7267                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7268         }
7269         if (ppd->link_speed_active == 0) {
7270                 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7271                            __func__, (int)remote_tx_rate);
7272                 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7273         }
7274
7275         /*
7276          * Cache the values of the supported, enabled, and active
7277          * LTP CRC modes to return in 'portinfo' queries. But the bit
7278          * flags that are returned in the portinfo query differ from
7279          * what's in the link_crc_mask, crc_sizes, and crc_val
7280          * variables. Convert these here.
7281          */
7282         ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7283                 /* supported crc modes */
7284         ppd->port_ltp_crc_mode |=
7285                 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7286                 /* enabled crc modes */
7287         ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7288                 /* active crc mode */
7289
7290         /* set up the remote credit return table */
7291         assign_remote_cm_au_table(dd, vcu);
7292
7293         /*
7294          * The LCB is reset on entry to handle_verify_cap(), so this must
7295          * be applied on every link up.
7296          *
7297          * Adjust LCB error kill enable to kill the link if
7298          * these RBUF errors are seen:
7299          *      REPLAY_BUF_MBE_SMASK
7300          *      FLIT_INPUT_BUF_MBE_SMASK
7301          */
7302         if (is_ax(dd)) {                        /* fixed in B0 */
7303                 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7304                 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7305                         | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7306                 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7307         }
7308
7309         /* pull LCB fifos out of reset - all fifo clocks must be stable */
7310         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7311
7312         /* give 8051 access to the LCB CSRs */
7313         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7314         set_8051_lcb_access(dd);
7315
7316         ppd->neighbor_guid =
7317                 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7318         ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7319                                         DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7320         ppd->neighbor_type =
7321                 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7322                 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7323         ppd->neighbor_fm_security =
7324                 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7325                 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7326         dd_dev_info(dd,
7327                     "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7328                     ppd->neighbor_guid, ppd->neighbor_type,
7329                     ppd->mgmt_allowed, ppd->neighbor_fm_security);
7330         if (ppd->mgmt_allowed)
7331                 add_full_mgmt_pkey(ppd);
7332
7333         /* tell the 8051 to go to LinkUp */
7334         set_link_state(ppd, HLS_GOING_UP);
7335 }
7336
7337 /*
7338  * Apply the link width downgrade enabled policy against the current active
7339  * link widths.
7340  *
7341  * Called when the enabled policy changes or the active link widths change.
7342  */
7343 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7344 {
7345         int do_bounce = 0;
7346         int tries;
7347         u16 lwde;
7348         u16 tx, rx;
7349
7350         /* use the hls lock to avoid a race with actual link up */
7351         tries = 0;
7352 retry:
7353         mutex_lock(&ppd->hls_lock);
7354         /* only apply if the link is up */
7355         if (!(ppd->host_link_state & HLS_UP)) {
7356                 /* still going up..wait and retry */
7357                 if (ppd->host_link_state & HLS_GOING_UP) {
7358                         if (++tries < 1000) {
7359                                 mutex_unlock(&ppd->hls_lock);
7360                                 usleep_range(100, 120); /* arbitrary */
7361                                 goto retry;
7362                         }
7363                         dd_dev_err(ppd->dd,
7364                                    "%s: giving up waiting for link state change\n",
7365                                    __func__);
7366                 }
7367                 goto done;
7368         }
7369
7370         lwde = ppd->link_width_downgrade_enabled;
7371
7372         if (refresh_widths) {
7373                 get_link_widths(ppd->dd, &tx, &rx);
7374                 ppd->link_width_downgrade_tx_active = tx;
7375                 ppd->link_width_downgrade_rx_active = rx;
7376         }
7377
7378         if (lwde == 0) {
7379                 /* downgrade is disabled */
7380
7381                 /* bounce if not at starting active width */
7382                 if ((ppd->link_width_active !=
7383                      ppd->link_width_downgrade_tx_active) ||
7384                     (ppd->link_width_active !=
7385                      ppd->link_width_downgrade_rx_active)) {
7386                         dd_dev_err(ppd->dd,
7387                                    "Link downgrade is disabled and link has downgraded, downing link\n");
7388                         dd_dev_err(ppd->dd,
7389                                    "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7390                                    ppd->link_width_active,
7391                                    ppd->link_width_downgrade_tx_active,
7392                                    ppd->link_width_downgrade_rx_active);
7393                         do_bounce = 1;
7394                 }
7395         } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7396                    (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7397                 /* Tx or Rx is outside the enabled policy */
7398                 dd_dev_err(ppd->dd,
7399                            "Link is outside of downgrade allowed, downing link\n");
7400                 dd_dev_err(ppd->dd,
7401                            "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7402                            lwde, ppd->link_width_downgrade_tx_active,
7403                            ppd->link_width_downgrade_rx_active);
7404                 do_bounce = 1;
7405         }
7406
7407 done:
7408         mutex_unlock(&ppd->hls_lock);
7409
7410         if (do_bounce) {
7411                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7412                                      OPA_LINKDOWN_REASON_WIDTH_POLICY);
7413                 set_link_state(ppd, HLS_DN_OFFLINE);
7414                 tune_serdes(ppd);
7415                 start_link(ppd);
7416         }
7417 }
7418
7419 /*
7420  * Handle a link downgrade interrupt from the 8051.
7421  *
7422  * This is a work-queue function outside of the interrupt.
7423  */
7424 void handle_link_downgrade(struct work_struct *work)
7425 {
7426         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7427                                                         link_downgrade_work);
7428
7429         dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7430         apply_link_downgrade_policy(ppd, 1);
7431 }
7432
7433 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7434 {
7435         return flag_string(buf, buf_len, flags, dcc_err_flags,
7436                 ARRAY_SIZE(dcc_err_flags));
7437 }
7438
7439 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7440 {
7441         return flag_string(buf, buf_len, flags, lcb_err_flags,
7442                 ARRAY_SIZE(lcb_err_flags));
7443 }
7444
7445 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7446 {
7447         return flag_string(buf, buf_len, flags, dc8051_err_flags,
7448                 ARRAY_SIZE(dc8051_err_flags));
7449 }
7450
7451 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7452 {
7453         return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7454                 ARRAY_SIZE(dc8051_info_err_flags));
7455 }
7456
7457 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7458 {
7459         return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7460                 ARRAY_SIZE(dc8051_info_host_msg_flags));
7461 }
7462
7463 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7464 {
7465         struct hfi1_pportdata *ppd = dd->pport;
7466         u64 info, err, host_msg;
7467         int queue_link_down = 0;
7468         char buf[96];
7469
7470         /* look at the flags */
7471         if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7472                 /* 8051 information set by firmware */
7473                 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7474                 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7475                 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7476                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7477                 host_msg = (info >>
7478                         DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7479                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7480
7481                 /*
7482                  * Handle error flags.
7483                  */
7484                 if (err & FAILED_LNI) {
7485                         /*
7486                          * LNI error indications are cleared by the 8051
7487                          * only when starting polling.  Only pay attention
7488                          * to them when in the states that occur during
7489                          * LNI.
7490                          */
7491                         if (ppd->host_link_state
7492                             & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7493                                 queue_link_down = 1;
7494                                 dd_dev_info(dd, "Link error: %s\n",
7495                                             dc8051_info_err_string(buf,
7496                                                                    sizeof(buf),
7497                                                                    err &
7498                                                                    FAILED_LNI));
7499                         }
7500                         err &= ~(u64)FAILED_LNI;
7501                 }
7502                 /* unknown frames can happen durning LNI, just count */
7503                 if (err & UNKNOWN_FRAME) {
7504                         ppd->unknown_frame_count++;
7505                         err &= ~(u64)UNKNOWN_FRAME;
7506                 }
7507                 if (err) {
7508                         /* report remaining errors, but do not do anything */
7509                         dd_dev_err(dd, "8051 info error: %s\n",
7510                                    dc8051_info_err_string(buf, sizeof(buf),
7511                                                           err));
7512                 }
7513
7514                 /*
7515                  * Handle host message flags.
7516                  */
7517                 if (host_msg & HOST_REQ_DONE) {
7518                         /*
7519                          * Presently, the driver does a busy wait for
7520                          * host requests to complete.  This is only an
7521                          * informational message.
7522                          * NOTE: The 8051 clears the host message
7523                          * information *on the next 8051 command*.
7524                          * Therefore, when linkup is achieved,
7525                          * this flag will still be set.
7526                          */
7527                         host_msg &= ~(u64)HOST_REQ_DONE;
7528                 }
7529                 if (host_msg & BC_SMA_MSG) {
7530                         queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7531                         host_msg &= ~(u64)BC_SMA_MSG;
7532                 }
7533                 if (host_msg & LINKUP_ACHIEVED) {
7534                         dd_dev_info(dd, "8051: Link up\n");
7535                         queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7536                         host_msg &= ~(u64)LINKUP_ACHIEVED;
7537                 }
7538                 if (host_msg & EXT_DEVICE_CFG_REQ) {
7539                         queue_work(ppd->hfi1_wq, &ppd->dc_host_req_work);
7540                         host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7541                 }
7542                 if (host_msg & VERIFY_CAP_FRAME) {
7543                         queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7544                         host_msg &= ~(u64)VERIFY_CAP_FRAME;
7545                 }
7546                 if (host_msg & LINK_GOING_DOWN) {
7547                         const char *extra = "";
7548                         /* no downgrade action needed if going down */
7549                         if (host_msg & LINK_WIDTH_DOWNGRADED) {
7550                                 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7551                                 extra = " (ignoring downgrade)";
7552                         }
7553                         dd_dev_info(dd, "8051: Link down%s\n", extra);
7554                         queue_link_down = 1;
7555                         host_msg &= ~(u64)LINK_GOING_DOWN;
7556                 }
7557                 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7558                         queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7559                         host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7560                 }
7561                 if (host_msg) {
7562                         /* report remaining messages, but do not do anything */
7563                         dd_dev_info(dd, "8051 info host message: %s\n",
7564                                     dc8051_info_host_msg_string(buf,
7565                                                                 sizeof(buf),
7566                                                                 host_msg));
7567                 }
7568
7569                 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7570         }
7571         if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7572                 /*
7573                  * Lost the 8051 heartbeat.  If this happens, we
7574                  * receive constant interrupts about it.  Disable
7575                  * the interrupt after the first.
7576                  */
7577                 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7578                 write_csr(dd, DC_DC8051_ERR_EN,
7579                           read_csr(dd, DC_DC8051_ERR_EN) &
7580                           ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7581
7582                 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7583         }
7584         if (reg) {
7585                 /* report the error, but do not do anything */
7586                 dd_dev_err(dd, "8051 error: %s\n",
7587                            dc8051_err_string(buf, sizeof(buf), reg));
7588         }
7589
7590         if (queue_link_down) {
7591                 /*
7592                  * if the link is already going down or disabled, do not
7593                  * queue another
7594                  */
7595                 if ((ppd->host_link_state &
7596                     (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7597                     ppd->link_enabled == 0) {
7598                         dd_dev_info(dd, "%s: not queuing link down\n",
7599                                     __func__);
7600                 } else {
7601                         queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7602                 }
7603         }
7604 }
7605
7606 static const char * const fm_config_txt[] = {
7607 [0] =
7608         "BadHeadDist: Distance violation between two head flits",
7609 [1] =
7610         "BadTailDist: Distance violation between two tail flits",
7611 [2] =
7612         "BadCtrlDist: Distance violation between two credit control flits",
7613 [3] =
7614         "BadCrdAck: Credits return for unsupported VL",
7615 [4] =
7616         "UnsupportedVLMarker: Received VL Marker",
7617 [5] =
7618         "BadPreempt: Exceeded the preemption nesting level",
7619 [6] =
7620         "BadControlFlit: Received unsupported control flit",
7621 /* no 7 */
7622 [8] =
7623         "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7624 };
7625
7626 static const char * const port_rcv_txt[] = {
7627 [1] =
7628         "BadPktLen: Illegal PktLen",
7629 [2] =
7630         "PktLenTooLong: Packet longer than PktLen",
7631 [3] =
7632         "PktLenTooShort: Packet shorter than PktLen",
7633 [4] =
7634         "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7635 [5] =
7636         "BadDLID: Illegal DLID (0, doesn't match HFI)",
7637 [6] =
7638         "BadL2: Illegal L2 opcode",
7639 [7] =
7640         "BadSC: Unsupported SC",
7641 [9] =
7642         "BadRC: Illegal RC",
7643 [11] =
7644         "PreemptError: Preempting with same VL",
7645 [12] =
7646         "PreemptVL15: Preempting a VL15 packet",
7647 };
7648
7649 #define OPA_LDR_FMCONFIG_OFFSET 16
7650 #define OPA_LDR_PORTRCV_OFFSET 0
7651 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7652 {
7653         u64 info, hdr0, hdr1;
7654         const char *extra;
7655         char buf[96];
7656         struct hfi1_pportdata *ppd = dd->pport;
7657         u8 lcl_reason = 0;
7658         int do_bounce = 0;
7659
7660         if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7661                 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7662                         info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7663                         dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7664                         /* set status bit */
7665                         dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7666                 }
7667                 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7668         }
7669
7670         if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7671                 struct hfi1_pportdata *ppd = dd->pport;
7672                 /* this counter saturates at (2^32) - 1 */
7673                 if (ppd->link_downed < (u32)UINT_MAX)
7674                         ppd->link_downed++;
7675                 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7676         }
7677
7678         if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7679                 u8 reason_valid = 1;
7680
7681                 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7682                 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7683                         dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7684                         /* set status bit */
7685                         dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7686                 }
7687                 switch (info) {
7688                 case 0:
7689                 case 1:
7690                 case 2:
7691                 case 3:
7692                 case 4:
7693                 case 5:
7694                 case 6:
7695                         extra = fm_config_txt[info];
7696                         break;
7697                 case 8:
7698                         extra = fm_config_txt[info];
7699                         if (ppd->port_error_action &
7700                             OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7701                                 do_bounce = 1;
7702                                 /*
7703                                  * lcl_reason cannot be derived from info
7704                                  * for this error
7705                                  */
7706                                 lcl_reason =
7707                                   OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7708                         }
7709                         break;
7710                 default:
7711                         reason_valid = 0;
7712                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7713                         extra = buf;
7714                         break;
7715                 }
7716
7717                 if (reason_valid && !do_bounce) {
7718                         do_bounce = ppd->port_error_action &
7719                                         (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7720                         lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7721                 }
7722
7723                 /* just report this */
7724                 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7725                 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7726         }
7727
7728         if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7729                 u8 reason_valid = 1;
7730
7731                 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7732                 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7733                 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7734                 if (!(dd->err_info_rcvport.status_and_code &
7735                       OPA_EI_STATUS_SMASK)) {
7736                         dd->err_info_rcvport.status_and_code =
7737                                 info & OPA_EI_CODE_SMASK;
7738                         /* set status bit */
7739                         dd->err_info_rcvport.status_and_code |=
7740                                 OPA_EI_STATUS_SMASK;
7741                         /*
7742                          * save first 2 flits in the packet that caused
7743                          * the error
7744                          */
7745                          dd->err_info_rcvport.packet_flit1 = hdr0;
7746                          dd->err_info_rcvport.packet_flit2 = hdr1;
7747                 }
7748                 switch (info) {
7749                 case 1:
7750                 case 2:
7751                 case 3:
7752                 case 4:
7753                 case 5:
7754                 case 6:
7755                 case 7:
7756                 case 9:
7757                 case 11:
7758                 case 12:
7759                         extra = port_rcv_txt[info];
7760                         break;
7761                 default:
7762                         reason_valid = 0;
7763                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7764                         extra = buf;
7765                         break;
7766                 }
7767
7768                 if (reason_valid && !do_bounce) {
7769                         do_bounce = ppd->port_error_action &
7770                                         (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7771                         lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7772                 }
7773
7774                 /* just report this */
7775                 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7776                 dd_dev_info(dd, "           hdr0 0x%llx, hdr1 0x%llx\n",
7777                             hdr0, hdr1);
7778
7779                 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7780         }
7781
7782         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7783                 /* informative only */
7784                 dd_dev_info(dd, "8051 access to LCB blocked\n");
7785                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7786         }
7787         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7788                 /* informative only */
7789                 dd_dev_info(dd, "host access to LCB blocked\n");
7790                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7791         }
7792
7793         /* report any remaining errors */
7794         if (reg)
7795                 dd_dev_info(dd, "DCC Error: %s\n",
7796                             dcc_err_string(buf, sizeof(buf), reg));
7797
7798         if (lcl_reason == 0)
7799                 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7800
7801         if (do_bounce) {
7802                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7803                 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7804                 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7805         }
7806 }
7807
7808 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7809 {
7810         char buf[96];
7811
7812         dd_dev_info(dd, "LCB Error: %s\n",
7813                     lcb_err_string(buf, sizeof(buf), reg));
7814 }
7815
7816 /*
7817  * CCE block DC interrupt.  Source is < 8.
7818  */
7819 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7820 {
7821         const struct err_reg_info *eri = &dc_errs[source];
7822
7823         if (eri->handler) {
7824                 interrupt_clear_down(dd, 0, eri);
7825         } else if (source == 3 /* dc_lbm_int */) {
7826                 /*
7827                  * This indicates that a parity error has occurred on the
7828                  * address/control lines presented to the LBM.  The error
7829                  * is a single pulse, there is no associated error flag,
7830                  * and it is non-maskable.  This is because if a parity
7831                  * error occurs on the request the request is dropped.
7832                  * This should never occur, but it is nice to know if it
7833                  * ever does.
7834                  */
7835                 dd_dev_err(dd, "Parity error in DC LBM block\n");
7836         } else {
7837                 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7838         }
7839 }
7840
7841 /*
7842  * TX block send credit interrupt.  Source is < 160.
7843  */
7844 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7845 {
7846         sc_group_release_update(dd, source);
7847 }
7848
7849 /*
7850  * TX block SDMA interrupt.  Source is < 48.
7851  *
7852  * SDMA interrupts are grouped by type:
7853  *
7854  *       0 -  N-1 = SDma
7855  *       N - 2N-1 = SDmaProgress
7856  *      2N - 3N-1 = SDmaIdle
7857  */
7858 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7859 {
7860         /* what interrupt */
7861         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
7862         /* which engine */
7863         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7864
7865 #ifdef CONFIG_SDMA_VERBOSITY
7866         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7867                    slashstrip(__FILE__), __LINE__, __func__);
7868         sdma_dumpstate(&dd->per_sdma[which]);
7869 #endif
7870
7871         if (likely(what < 3 && which < dd->num_sdma)) {
7872                 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7873         } else {
7874                 /* should not happen */
7875                 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7876         }
7877 }
7878
7879 /*
7880  * RX block receive available interrupt.  Source is < 160.
7881  */
7882 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7883 {
7884         struct hfi1_ctxtdata *rcd;
7885         char *err_detail;
7886
7887         if (likely(source < dd->num_rcv_contexts)) {
7888                 rcd = dd->rcd[source];
7889                 if (rcd) {
7890                         if (source < dd->first_user_ctxt)
7891                                 rcd->do_interrupt(rcd, 0);
7892                         else
7893                                 handle_user_interrupt(rcd);
7894                         return; /* OK */
7895                 }
7896                 /* received an interrupt, but no rcd */
7897                 err_detail = "dataless";
7898         } else {
7899                 /* received an interrupt, but are not using that context */
7900                 err_detail = "out of range";
7901         }
7902         dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7903                    err_detail, source);
7904 }
7905
7906 /*
7907  * RX block receive urgent interrupt.  Source is < 160.
7908  */
7909 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
7910 {
7911         struct hfi1_ctxtdata *rcd;
7912         char *err_detail;
7913
7914         if (likely(source < dd->num_rcv_contexts)) {
7915                 rcd = dd->rcd[source];
7916                 if (rcd) {
7917                         /* only pay attention to user urgent interrupts */
7918                         if (source >= dd->first_user_ctxt)
7919                                 handle_user_interrupt(rcd);
7920                         return; /* OK */
7921                 }
7922                 /* received an interrupt, but no rcd */
7923                 err_detail = "dataless";
7924         } else {
7925                 /* received an interrupt, but are not using that context */
7926                 err_detail = "out of range";
7927         }
7928         dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
7929                    err_detail, source);
7930 }
7931
7932 /*
7933  * Reserved range interrupt.  Should not be called in normal operation.
7934  */
7935 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
7936 {
7937         char name[64];
7938
7939         dd_dev_err(dd, "unexpected %s interrupt\n",
7940                    is_reserved_name(name, sizeof(name), source));
7941 }
7942
7943 static const struct is_table is_table[] = {
7944 /*
7945  * start                 end
7946  *                              name func               interrupt func
7947  */
7948 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
7949                                 is_misc_err_name,       is_misc_err_int },
7950 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
7951                                 is_sdma_eng_err_name,   is_sdma_eng_err_int },
7952 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
7953                                 is_sendctxt_err_name,   is_sendctxt_err_int },
7954 { IS_SDMA_START,             IS_SDMA_END,
7955                                 is_sdma_eng_name,       is_sdma_eng_int },
7956 { IS_VARIOUS_START,          IS_VARIOUS_END,
7957                                 is_various_name,        is_various_int },
7958 { IS_DC_START,       IS_DC_END,
7959                                 is_dc_name,             is_dc_int },
7960 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
7961                                 is_rcv_avail_name,      is_rcv_avail_int },
7962 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
7963                                 is_rcv_urgent_name,     is_rcv_urgent_int },
7964 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
7965                                 is_send_credit_name,    is_send_credit_int},
7966 { IS_RESERVED_START,     IS_RESERVED_END,
7967                                 is_reserved_name,       is_reserved_int},
7968 };
7969
7970 /*
7971  * Interrupt source interrupt - called when the given source has an interrupt.
7972  * Source is a bit index into an array of 64-bit integers.
7973  */
7974 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
7975 {
7976         const struct is_table *entry;
7977
7978         /* avoids a double compare by walking the table in-order */
7979         for (entry = &is_table[0]; entry->is_name; entry++) {
7980                 if (source < entry->end) {
7981                         trace_hfi1_interrupt(dd, entry, source);
7982                         entry->is_int(dd, source - entry->start);
7983                         return;
7984                 }
7985         }
7986         /* fell off the end */
7987         dd_dev_err(dd, "invalid interrupt source %u\n", source);
7988 }
7989
7990 /*
7991  * General interrupt handler.  This is able to correctly handle
7992  * all interrupts in case INTx is used.
7993  */
7994 static irqreturn_t general_interrupt(int irq, void *data)
7995 {
7996         struct hfi1_devdata *dd = data;
7997         u64 regs[CCE_NUM_INT_CSRS];
7998         u32 bit;
7999         int i;
8000
8001         this_cpu_inc(*dd->int_counter);
8002
8003         /* phase 1: scan and clear all handled interrupts */
8004         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8005                 if (dd->gi_mask[i] == 0) {
8006                         regs[i] = 0;    /* used later */
8007                         continue;
8008                 }
8009                 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8010                                 dd->gi_mask[i];
8011                 /* only clear if anything is set */
8012                 if (regs[i])
8013                         write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8014         }
8015
8016         /* phase 2: call the appropriate handler */
8017         for_each_set_bit(bit, (unsigned long *)&regs[0],
8018                          CCE_NUM_INT_CSRS * 64) {
8019                 is_interrupt(dd, bit);
8020         }
8021
8022         return IRQ_HANDLED;
8023 }
8024
8025 static irqreturn_t sdma_interrupt(int irq, void *data)
8026 {
8027         struct sdma_engine *sde = data;
8028         struct hfi1_devdata *dd = sde->dd;
8029         u64 status;
8030
8031 #ifdef CONFIG_SDMA_VERBOSITY
8032         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8033                    slashstrip(__FILE__), __LINE__, __func__);
8034         sdma_dumpstate(sde);
8035 #endif
8036
8037         this_cpu_inc(*dd->int_counter);
8038
8039         /* This read_csr is really bad in the hot path */
8040         status = read_csr(dd,
8041                           CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8042                           & sde->imask;
8043         if (likely(status)) {
8044                 /* clear the interrupt(s) */
8045                 write_csr(dd,
8046                           CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8047                           status);
8048
8049                 /* handle the interrupt(s) */
8050                 sdma_engine_interrupt(sde, status);
8051         } else
8052                 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8053                            sde->this_idx);
8054
8055         return IRQ_HANDLED;
8056 }
8057
8058 /*
8059  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8060  * to insure that the write completed.  This does NOT guarantee that
8061  * queued DMA writes to memory from the chip are pushed.
8062  */
8063 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8064 {
8065         struct hfi1_devdata *dd = rcd->dd;
8066         u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8067
8068         mmiowb();       /* make sure everything before is written */
8069         write_csr(dd, addr, rcd->imask);
8070         /* force the above write on the chip and get a value back */
8071         (void)read_csr(dd, addr);
8072 }
8073
8074 /* force the receive interrupt */
8075 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8076 {
8077         write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8078 }
8079
8080 /*
8081  * Return non-zero if a packet is present.
8082  *
8083  * This routine is called when rechecking for packets after the RcvAvail
8084  * interrupt has been cleared down.  First, do a quick check of memory for
8085  * a packet present.  If not found, use an expensive CSR read of the context
8086  * tail to determine the actual tail.  The CSR read is necessary because there
8087  * is no method to push pending DMAs to memory other than an interrupt and we
8088  * are trying to determine if we need to force an interrupt.
8089  */
8090 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8091 {
8092         u32 tail;
8093         int present;
8094
8095         if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8096                 present = (rcd->seq_cnt ==
8097                                 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8098         else /* is RDMA rtail */
8099                 present = (rcd->head != get_rcvhdrtail(rcd));
8100
8101         if (present)
8102                 return 1;
8103
8104         /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8105         tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8106         return rcd->head != tail;
8107 }
8108
8109 /*
8110  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8111  * This routine will try to handle packets immediately (latency), but if
8112  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8113  * chip receive interrupt is *not* cleared down until this or the thread (if
8114  * invoked) is finished.  The intent is to avoid extra interrupts while we
8115  * are processing packets anyway.
8116  */
8117 static irqreturn_t receive_context_interrupt(int irq, void *data)
8118 {
8119         struct hfi1_ctxtdata *rcd = data;
8120         struct hfi1_devdata *dd = rcd->dd;
8121         int disposition;
8122         int present;
8123
8124         trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8125         this_cpu_inc(*dd->int_counter);
8126         aspm_ctx_disable(rcd);
8127
8128         /* receive interrupt remains blocked while processing packets */
8129         disposition = rcd->do_interrupt(rcd, 0);
8130
8131         /*
8132          * Too many packets were seen while processing packets in this
8133          * IRQ handler.  Invoke the handler thread.  The receive interrupt
8134          * remains blocked.
8135          */
8136         if (disposition == RCV_PKT_LIMIT)
8137                 return IRQ_WAKE_THREAD;
8138
8139         /*
8140          * The packet processor detected no more packets.  Clear the receive
8141          * interrupt and recheck for a packet packet that may have arrived
8142          * after the previous check and interrupt clear.  If a packet arrived,
8143          * force another interrupt.
8144          */
8145         clear_recv_intr(rcd);
8146         present = check_packet_present(rcd);
8147         if (present)
8148                 force_recv_intr(rcd);
8149
8150         return IRQ_HANDLED;
8151 }
8152
8153 /*
8154  * Receive packet thread handler.  This expects to be invoked with the
8155  * receive interrupt still blocked.
8156  */
8157 static irqreturn_t receive_context_thread(int irq, void *data)
8158 {
8159         struct hfi1_ctxtdata *rcd = data;
8160         int present;
8161
8162         /* receive interrupt is still blocked from the IRQ handler */
8163         (void)rcd->do_interrupt(rcd, 1);
8164
8165         /*
8166          * The packet processor will only return if it detected no more
8167          * packets.  Hold IRQs here so we can safely clear the interrupt and
8168          * recheck for a packet that may have arrived after the previous
8169          * check and the interrupt clear.  If a packet arrived, force another
8170          * interrupt.
8171          */
8172         local_irq_disable();
8173         clear_recv_intr(rcd);
8174         present = check_packet_present(rcd);
8175         if (present)
8176                 force_recv_intr(rcd);
8177         local_irq_enable();
8178
8179         return IRQ_HANDLED;
8180 }
8181
8182 /* ========================================================================= */
8183
8184 u32 read_physical_state(struct hfi1_devdata *dd)
8185 {
8186         u64 reg;
8187
8188         reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8189         return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8190                                 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8191 }
8192
8193 u32 read_logical_state(struct hfi1_devdata *dd)
8194 {
8195         u64 reg;
8196
8197         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8198         return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8199                                 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8200 }
8201
8202 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8203 {
8204         u64 reg;
8205
8206         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8207         /* clear current state, set new state */
8208         reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8209         reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8210         write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8211 }
8212
8213 /*
8214  * Use the 8051 to read a LCB CSR.
8215  */
8216 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8217 {
8218         u32 regno;
8219         int ret;
8220
8221         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8222                 if (acquire_lcb_access(dd, 0) == 0) {
8223                         *data = read_csr(dd, addr);
8224                         release_lcb_access(dd, 0);
8225                         return 0;
8226                 }
8227                 return -EBUSY;
8228         }
8229
8230         /* register is an index of LCB registers: (offset - base) / 8 */
8231         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8232         ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8233         if (ret != HCMD_SUCCESS)
8234                 return -EBUSY;
8235         return 0;
8236 }
8237
8238 /*
8239  * Read an LCB CSR.  Access may not be in host control, so check.
8240  * Return 0 on success, -EBUSY on failure.
8241  */
8242 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8243 {
8244         struct hfi1_pportdata *ppd = dd->pport;
8245
8246         /* if up, go through the 8051 for the value */
8247         if (ppd->host_link_state & HLS_UP)
8248                 return read_lcb_via_8051(dd, addr, data);
8249         /* if going up or down, no access */
8250         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8251                 return -EBUSY;
8252         /* otherwise, host has access */
8253         *data = read_csr(dd, addr);
8254         return 0;
8255 }
8256
8257 /*
8258  * Use the 8051 to write a LCB CSR.
8259  */
8260 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8261 {
8262         u32 regno;
8263         int ret;
8264
8265         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8266             (dd->dc8051_ver < dc8051_ver(0, 20))) {
8267                 if (acquire_lcb_access(dd, 0) == 0) {
8268                         write_csr(dd, addr, data);
8269                         release_lcb_access(dd, 0);
8270                         return 0;
8271                 }
8272                 return -EBUSY;
8273         }
8274
8275         /* register is an index of LCB registers: (offset - base) / 8 */
8276         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8277         ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8278         if (ret != HCMD_SUCCESS)
8279                 return -EBUSY;
8280         return 0;
8281 }
8282
8283 /*
8284  * Write an LCB CSR.  Access may not be in host control, so check.
8285  * Return 0 on success, -EBUSY on failure.
8286  */
8287 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8288 {
8289         struct hfi1_pportdata *ppd = dd->pport;
8290
8291         /* if up, go through the 8051 for the value */
8292         if (ppd->host_link_state & HLS_UP)
8293                 return write_lcb_via_8051(dd, addr, data);
8294         /* if going up or down, no access */
8295         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8296                 return -EBUSY;
8297         /* otherwise, host has access */
8298         write_csr(dd, addr, data);
8299         return 0;
8300 }
8301
8302 /*
8303  * Returns:
8304  *      < 0 = Linux error, not able to get access
8305  *      > 0 = 8051 command RETURN_CODE
8306  */
8307 static int do_8051_command(
8308         struct hfi1_devdata *dd,
8309         u32 type,
8310         u64 in_data,
8311         u64 *out_data)
8312 {
8313         u64 reg, completed;
8314         int return_code;
8315         unsigned long flags;
8316         unsigned long timeout;
8317
8318         hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8319
8320         /*
8321          * Alternative to holding the lock for a long time:
8322          * - keep busy wait - have other users bounce off
8323          */
8324         spin_lock_irqsave(&dd->dc8051_lock, flags);
8325
8326         /* We can't send any commands to the 8051 if it's in reset */
8327         if (dd->dc_shutdown) {
8328                 return_code = -ENODEV;
8329                 goto fail;
8330         }
8331
8332         /*
8333          * If an 8051 host command timed out previously, then the 8051 is
8334          * stuck.
8335          *
8336          * On first timeout, attempt to reset and restart the entire DC
8337          * block (including 8051). (Is this too big of a hammer?)
8338          *
8339          * If the 8051 times out a second time, the reset did not bring it
8340          * back to healthy life. In that case, fail any subsequent commands.
8341          */
8342         if (dd->dc8051_timed_out) {
8343                 if (dd->dc8051_timed_out > 1) {
8344                         dd_dev_err(dd,
8345                                    "Previous 8051 host command timed out, skipping command %u\n",
8346                                    type);
8347                         return_code = -ENXIO;
8348                         goto fail;
8349                 }
8350                 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8351                 dc_shutdown(dd);
8352                 dc_start(dd);
8353                 spin_lock_irqsave(&dd->dc8051_lock, flags);
8354         }
8355
8356         /*
8357          * If there is no timeout, then the 8051 command interface is
8358          * waiting for a command.
8359          */
8360
8361         /*
8362          * When writing a LCB CSR, out_data contains the full value to
8363          * to be written, while in_data contains the relative LCB
8364          * address in 7:0.  Do the work here, rather than the caller,
8365          * of distrubting the write data to where it needs to go:
8366          *
8367          * Write data
8368          *   39:00 -> in_data[47:8]
8369          *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8370          *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8371          */
8372         if (type == HCMD_WRITE_LCB_CSR) {
8373                 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8374                 reg = ((((*out_data) >> 40) & 0xff) <<
8375                                 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8376                       | ((((*out_data) >> 48) & 0xffff) <<
8377                                 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8378                 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8379         }
8380
8381         /*
8382          * Do two writes: the first to stabilize the type and req_data, the
8383          * second to activate.
8384          */
8385         reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8386                         << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8387                 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8388                         << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8389         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8390         reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8391         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8392
8393         /* wait for completion, alternate: interrupt */
8394         timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8395         while (1) {
8396                 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8397                 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8398                 if (completed)
8399                         break;
8400                 if (time_after(jiffies, timeout)) {
8401                         dd->dc8051_timed_out++;
8402                         dd_dev_err(dd, "8051 host command %u timeout\n", type);
8403                         if (out_data)
8404                                 *out_data = 0;
8405                         return_code = -ETIMEDOUT;
8406                         goto fail;
8407                 }
8408                 udelay(2);
8409         }
8410
8411         if (out_data) {
8412                 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8413                                 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8414                 if (type == HCMD_READ_LCB_CSR) {
8415                         /* top 16 bits are in a different register */
8416                         *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8417                                 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8418                                 << (48
8419                                     - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8420                 }
8421         }
8422         return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8423                                 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8424         dd->dc8051_timed_out = 0;
8425         /*
8426          * Clear command for next user.
8427          */
8428         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8429
8430 fail:
8431         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8432
8433         return return_code;
8434 }
8435
8436 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8437 {
8438         return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8439 }
8440
8441 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8442                      u8 lane_id, u32 config_data)
8443 {
8444         u64 data;
8445         int ret;
8446
8447         data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8448                 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8449                 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8450         ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8451         if (ret != HCMD_SUCCESS) {
8452                 dd_dev_err(dd,
8453                            "load 8051 config: field id %d, lane %d, err %d\n",
8454                            (int)field_id, (int)lane_id, ret);
8455         }
8456         return ret;
8457 }
8458
8459 /*
8460  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8461  * set the result, even on error.
8462  * Return 0 on success, -errno on failure
8463  */
8464 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8465                      u32 *result)
8466 {
8467         u64 big_data;
8468         u32 addr;
8469         int ret;
8470
8471         /* address start depends on the lane_id */
8472         if (lane_id < 4)
8473                 addr = (4 * NUM_GENERAL_FIELDS)
8474                         + (lane_id * 4 * NUM_LANE_FIELDS);
8475         else
8476                 addr = 0;
8477         addr += field_id * 4;
8478
8479         /* read is in 8-byte chunks, hardware will truncate the address down */
8480         ret = read_8051_data(dd, addr, 8, &big_data);
8481
8482         if (ret == 0) {
8483                 /* extract the 4 bytes we want */
8484                 if (addr & 0x4)
8485                         *result = (u32)(big_data >> 32);
8486                 else
8487                         *result = (u32)big_data;
8488         } else {
8489                 *result = 0;
8490                 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8491                            __func__, lane_id, field_id);
8492         }
8493
8494         return ret;
8495 }
8496
8497 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8498                               u8 continuous)
8499 {
8500         u32 frame;
8501
8502         frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8503                 | power_management << POWER_MANAGEMENT_SHIFT;
8504         return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8505                                 GENERAL_CONFIG, frame);
8506 }
8507
8508 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8509                                  u16 vl15buf, u8 crc_sizes)
8510 {
8511         u32 frame;
8512
8513         frame = (u32)vau << VAU_SHIFT
8514                 | (u32)z << Z_SHIFT
8515                 | (u32)vcu << VCU_SHIFT
8516                 | (u32)vl15buf << VL15BUF_SHIFT
8517                 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8518         return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8519                                 GENERAL_CONFIG, frame);
8520 }
8521
8522 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8523                                      u8 *flag_bits, u16 *link_widths)
8524 {
8525         u32 frame;
8526
8527         read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8528                          &frame);
8529         *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8530         *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8531         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8532 }
8533
8534 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8535                                      u8 misc_bits,
8536                                      u8 flag_bits,
8537                                      u16 link_widths)
8538 {
8539         u32 frame;
8540
8541         frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8542                 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8543                 | (u32)link_widths << LINK_WIDTH_SHIFT;
8544         return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8545                      frame);
8546 }
8547
8548 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8549                                  u8 device_rev)
8550 {
8551         u32 frame;
8552
8553         frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8554                 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8555         return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8556 }
8557
8558 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8559                                   u8 *device_rev)
8560 {
8561         u32 frame;
8562
8563         read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8564         *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8565         *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8566                         & REMOTE_DEVICE_REV_MASK;
8567 }
8568
8569 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8570 {
8571         u32 frame;
8572
8573         read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8574         *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8575         *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8576 }
8577
8578 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8579                                u8 *continuous)
8580 {
8581         u32 frame;
8582
8583         read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8584         *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8585                                         & POWER_MANAGEMENT_MASK;
8586         *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8587                                         & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8588 }
8589
8590 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8591                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8592 {
8593         u32 frame;
8594
8595         read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8596         *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8597         *z = (frame >> Z_SHIFT) & Z_MASK;
8598         *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8599         *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8600         *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8601 }
8602
8603 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8604                                       u8 *remote_tx_rate,
8605                                       u16 *link_widths)
8606 {
8607         u32 frame;
8608
8609         read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8610                          &frame);
8611         *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8612                                 & REMOTE_TX_RATE_MASK;
8613         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8614 }
8615
8616 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8617 {
8618         u32 frame;
8619
8620         read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8621         *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8622 }
8623
8624 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8625 {
8626         u32 frame;
8627
8628         read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8629         *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8630 }
8631
8632 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8633 {
8634         read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8635 }
8636
8637 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8638 {
8639         read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8640 }
8641
8642 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8643 {
8644         u32 frame;
8645         int ret;
8646
8647         *link_quality = 0;
8648         if (dd->pport->host_link_state & HLS_UP) {
8649                 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8650                                        &frame);
8651                 if (ret == 0)
8652                         *link_quality = (frame >> LINK_QUALITY_SHIFT)
8653                                                 & LINK_QUALITY_MASK;
8654         }
8655 }
8656
8657 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8658 {
8659         u32 frame;
8660
8661         read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8662         *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8663 }
8664
8665 static int read_tx_settings(struct hfi1_devdata *dd,
8666                             u8 *enable_lane_tx,
8667                             u8 *tx_polarity_inversion,
8668                             u8 *rx_polarity_inversion,
8669                             u8 *max_rate)
8670 {
8671         u32 frame;
8672         int ret;
8673
8674         ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8675         *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8676                                 & ENABLE_LANE_TX_MASK;
8677         *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8678                                 & TX_POLARITY_INVERSION_MASK;
8679         *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8680                                 & RX_POLARITY_INVERSION_MASK;
8681         *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8682         return ret;
8683 }
8684
8685 static int write_tx_settings(struct hfi1_devdata *dd,
8686                              u8 enable_lane_tx,
8687                              u8 tx_polarity_inversion,
8688                              u8 rx_polarity_inversion,
8689                              u8 max_rate)
8690 {
8691         u32 frame;
8692
8693         /* no need to mask, all variable sizes match field widths */
8694         frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8695                 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8696                 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8697                 | max_rate << MAX_RATE_SHIFT;
8698         return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8699 }
8700
8701 static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8702 {
8703         u32 frame, version, prod_id;
8704         int ret, lane;
8705
8706         /* 4 lanes */
8707         for (lane = 0; lane < 4; lane++) {
8708                 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8709                 if (ret) {
8710                         dd_dev_err(dd,
8711                                    "Unable to read lane %d firmware details\n",
8712                                    lane);
8713                         continue;
8714                 }
8715                 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8716                                         & SPICO_ROM_VERSION_MASK;
8717                 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8718                                         & SPICO_ROM_PROD_ID_MASK;
8719                 dd_dev_info(dd,
8720                             "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8721                             lane, version, prod_id);
8722         }
8723 }
8724
8725 /*
8726  * Read an idle LCB message.
8727  *
8728  * Returns 0 on success, -EINVAL on error
8729  */
8730 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8731 {
8732         int ret;
8733
8734         ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
8735         if (ret != HCMD_SUCCESS) {
8736                 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8737                            (u32)type, ret);
8738                 return -EINVAL;
8739         }
8740         dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8741         /* return only the payload as we already know the type */
8742         *data_out >>= IDLE_PAYLOAD_SHIFT;
8743         return 0;
8744 }
8745
8746 /*
8747  * Read an idle SMA message.  To be done in response to a notification from
8748  * the 8051.
8749  *
8750  * Returns 0 on success, -EINVAL on error
8751  */
8752 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8753 {
8754         return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8755                                  data);
8756 }
8757
8758 /*
8759  * Send an idle LCB message.
8760  *
8761  * Returns 0 on success, -EINVAL on error
8762  */
8763 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8764 {
8765         int ret;
8766
8767         dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8768         ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8769         if (ret != HCMD_SUCCESS) {
8770                 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8771                            data, ret);
8772                 return -EINVAL;
8773         }
8774         return 0;
8775 }
8776
8777 /*
8778  * Send an idle SMA message.
8779  *
8780  * Returns 0 on success, -EINVAL on error
8781  */
8782 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8783 {
8784         u64 data;
8785
8786         data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8787                 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8788         return send_idle_message(dd, data);
8789 }
8790
8791 /*
8792  * Initialize the LCB then do a quick link up.  This may or may not be
8793  * in loopback.
8794  *
8795  * return 0 on success, -errno on error
8796  */
8797 static int do_quick_linkup(struct hfi1_devdata *dd)
8798 {
8799         u64 reg;
8800         unsigned long timeout;
8801         int ret;
8802
8803         lcb_shutdown(dd, 0);
8804
8805         if (loopback) {
8806                 /* LCB_CFG_LOOPBACK.VAL = 2 */
8807                 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8808                 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8809                           IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8810                 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8811         }
8812
8813         /* start the LCBs */
8814         /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8815         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8816
8817         /* simulator only loopback steps */
8818         if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8819                 /* LCB_CFG_RUN.EN = 1 */
8820                 write_csr(dd, DC_LCB_CFG_RUN,
8821                           1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8822
8823                 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8824                 timeout = jiffies + msecs_to_jiffies(10);
8825                 while (1) {
8826                         reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8827                         if (reg)
8828                                 break;
8829                         if (time_after(jiffies, timeout)) {
8830                                 dd_dev_err(dd,
8831                                            "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8832                                 return -ETIMEDOUT;
8833                         }
8834                         udelay(2);
8835                 }
8836
8837                 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8838                           1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8839         }
8840
8841         if (!loopback) {
8842                 /*
8843                  * When doing quick linkup and not in loopback, both
8844                  * sides must be done with LCB set-up before either
8845                  * starts the quick linkup.  Put a delay here so that
8846                  * both sides can be started and have a chance to be
8847                  * done with LCB set up before resuming.
8848                  */
8849                 dd_dev_err(dd,
8850                            "Pausing for peer to be finished with LCB set up\n");
8851                 msleep(5000);
8852                 dd_dev_err(dd, "Continuing with quick linkup\n");
8853         }
8854
8855         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8856         set_8051_lcb_access(dd);
8857
8858         /*
8859          * State "quick" LinkUp request sets the physical link state to
8860          * LinkUp without a verify capability sequence.
8861          * This state is in simulator v37 and later.
8862          */
8863         ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8864         if (ret != HCMD_SUCCESS) {
8865                 dd_dev_err(dd,
8866                            "%s: set physical link state to quick LinkUp failed with return %d\n",
8867                            __func__, ret);
8868
8869                 set_host_lcb_access(dd);
8870                 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8871
8872                 if (ret >= 0)
8873                         ret = -EINVAL;
8874                 return ret;
8875         }
8876
8877         return 0; /* success */
8878 }
8879
8880 /*
8881  * Set the SerDes to internal loopback mode.
8882  * Returns 0 on success, -errno on error.
8883  */
8884 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8885 {
8886         int ret;
8887
8888         ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8889         if (ret == HCMD_SUCCESS)
8890                 return 0;
8891         dd_dev_err(dd,
8892                    "Set physical link state to SerDes Loopback failed with return %d\n",
8893                    ret);
8894         if (ret >= 0)
8895                 ret = -EINVAL;
8896         return ret;
8897 }
8898
8899 /*
8900  * Do all special steps to set up loopback.
8901  */
8902 static int init_loopback(struct hfi1_devdata *dd)
8903 {
8904         dd_dev_info(dd, "Entering loopback mode\n");
8905
8906         /* all loopbacks should disable self GUID check */
8907         write_csr(dd, DC_DC8051_CFG_MODE,
8908                   (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
8909
8910         /*
8911          * The simulator has only one loopback option - LCB.  Switch
8912          * to that option, which includes quick link up.
8913          *
8914          * Accept all valid loopback values.
8915          */
8916         if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
8917             (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
8918              loopback == LOOPBACK_CABLE)) {
8919                 loopback = LOOPBACK_LCB;
8920                 quick_linkup = 1;
8921                 return 0;
8922         }
8923
8924         /* handle serdes loopback */
8925         if (loopback == LOOPBACK_SERDES) {
8926                 /* internal serdes loopack needs quick linkup on RTL */
8927                 if (dd->icode == ICODE_RTL_SILICON)
8928                         quick_linkup = 1;
8929                 return set_serdes_loopback_mode(dd);
8930         }
8931
8932         /* LCB loopback - handled at poll time */
8933         if (loopback == LOOPBACK_LCB) {
8934                 quick_linkup = 1; /* LCB is always quick linkup */
8935
8936                 /* not supported in emulation due to emulation RTL changes */
8937                 if (dd->icode == ICODE_FPGA_EMULATION) {
8938                         dd_dev_err(dd,
8939                                    "LCB loopback not supported in emulation\n");
8940                         return -EINVAL;
8941                 }
8942                 return 0;
8943         }
8944
8945         /* external cable loopback requires no extra steps */
8946         if (loopback == LOOPBACK_CABLE)
8947                 return 0;
8948
8949         dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
8950         return -EINVAL;
8951 }
8952
8953 /*
8954  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
8955  * used in the Verify Capability link width attribute.
8956  */
8957 static u16 opa_to_vc_link_widths(u16 opa_widths)
8958 {
8959         int i;
8960         u16 result = 0;
8961
8962         static const struct link_bits {
8963                 u16 from;
8964                 u16 to;
8965         } opa_link_xlate[] = {
8966                 { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
8967                 { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
8968                 { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
8969                 { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
8970         };
8971
8972         for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
8973                 if (opa_widths & opa_link_xlate[i].from)
8974                         result |= opa_link_xlate[i].to;
8975         }
8976         return result;
8977 }
8978
8979 /*
8980  * Set link attributes before moving to polling.
8981  */
8982 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
8983 {
8984         struct hfi1_devdata *dd = ppd->dd;
8985         u8 enable_lane_tx;
8986         u8 tx_polarity_inversion;
8987         u8 rx_polarity_inversion;
8988         int ret;
8989
8990         /* reset our fabric serdes to clear any lingering problems */
8991         fabric_serdes_reset(dd);
8992
8993         /* set the local tx rate - need to read-modify-write */
8994         ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
8995                                &rx_polarity_inversion, &ppd->local_tx_rate);
8996         if (ret)
8997                 goto set_local_link_attributes_fail;
8998
8999         if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9000                 /* set the tx rate to the fastest enabled */
9001                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9002                         ppd->local_tx_rate = 1;
9003                 else
9004                         ppd->local_tx_rate = 0;
9005         } else {
9006                 /* set the tx rate to all enabled */
9007                 ppd->local_tx_rate = 0;
9008                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9009                         ppd->local_tx_rate |= 2;
9010                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9011                         ppd->local_tx_rate |= 1;
9012         }
9013
9014         enable_lane_tx = 0xF; /* enable all four lanes */
9015         ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9016                                 rx_polarity_inversion, ppd->local_tx_rate);
9017         if (ret != HCMD_SUCCESS)
9018                 goto set_local_link_attributes_fail;
9019
9020         /*
9021          * DC supports continuous updates.
9022          */
9023         ret = write_vc_local_phy(dd,
9024                                  0 /* no power management */,
9025                                  1 /* continuous updates */);
9026         if (ret != HCMD_SUCCESS)
9027                 goto set_local_link_attributes_fail;
9028
9029         /* z=1 in the next call: AU of 0 is not supported by the hardware */
9030         ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9031                                     ppd->port_crc_mode_enabled);
9032         if (ret != HCMD_SUCCESS)
9033                 goto set_local_link_attributes_fail;
9034
9035         ret = write_vc_local_link_width(dd, 0, 0,
9036                                         opa_to_vc_link_widths(
9037                                                 ppd->link_width_enabled));
9038         if (ret != HCMD_SUCCESS)
9039                 goto set_local_link_attributes_fail;
9040
9041         /* let peer know who we are */
9042         ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9043         if (ret == HCMD_SUCCESS)
9044                 return 0;
9045
9046 set_local_link_attributes_fail:
9047         dd_dev_err(dd,
9048                    "Failed to set local link attributes, return 0x%x\n",
9049                    ret);
9050         return ret;
9051 }
9052
9053 /*
9054  * Call this to start the link.  Schedule a retry if the cable is not
9055  * present or if unable to start polling.  Do not do anything if the
9056  * link is disabled.  Returns 0 if link is disabled or moved to polling
9057  */
9058 int start_link(struct hfi1_pportdata *ppd)
9059 {
9060         if (!ppd->link_enabled) {
9061                 dd_dev_info(ppd->dd,
9062                             "%s: stopping link start because link is disabled\n",
9063                             __func__);
9064                 return 0;
9065         }
9066         if (!ppd->driver_link_ready) {
9067                 dd_dev_info(ppd->dd,
9068                             "%s: stopping link start because driver is not ready\n",
9069                             __func__);
9070                 return 0;
9071         }
9072
9073         if (qsfp_mod_present(ppd) || loopback == LOOPBACK_SERDES ||
9074             loopback == LOOPBACK_LCB ||
9075             ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9076                 return set_link_state(ppd, HLS_DN_POLL);
9077
9078         dd_dev_info(ppd->dd,
9079                     "%s: stopping link start because no cable is present\n",
9080                     __func__);
9081         return -EAGAIN;
9082 }
9083
9084 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9085 {
9086         struct hfi1_devdata *dd = ppd->dd;
9087         u64 mask;
9088         unsigned long timeout;
9089
9090         /*
9091          * Check for QSFP interrupt for t_init (SFF 8679)
9092          */
9093         timeout = jiffies + msecs_to_jiffies(2000);
9094         while (1) {
9095                 mask = read_csr(dd, dd->hfi1_id ?
9096                                 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9097                 if (!(mask & QSFP_HFI0_INT_N)) {
9098                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9099                                   ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9100                         break;
9101                 }
9102                 if (time_after(jiffies, timeout)) {
9103                         dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9104                                     __func__);
9105                         break;
9106                 }
9107                 udelay(2);
9108         }
9109 }
9110
9111 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9112 {
9113         struct hfi1_devdata *dd = ppd->dd;
9114         u64 mask;
9115
9116         mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9117         if (enable)
9118                 mask |= (u64)QSFP_HFI0_INT_N;
9119         else
9120                 mask &= ~(u64)QSFP_HFI0_INT_N;
9121         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9122 }
9123
9124 void reset_qsfp(struct hfi1_pportdata *ppd)
9125 {
9126         struct hfi1_devdata *dd = ppd->dd;
9127         u64 mask, qsfp_mask;
9128
9129         /* Disable INT_N from triggering QSFP interrupts */
9130         set_qsfp_int_n(ppd, 0);
9131
9132         /* Reset the QSFP */
9133         mask = (u64)QSFP_HFI0_RESET_N;
9134         qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
9135         qsfp_mask |= mask;
9136         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
9137
9138         qsfp_mask = read_csr(dd,
9139                              dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9140         qsfp_mask &= ~mask;
9141         write_csr(dd,
9142                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9143
9144         udelay(10);
9145
9146         qsfp_mask |= mask;
9147         write_csr(dd,
9148                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9149
9150         wait_for_qsfp_init(ppd);
9151
9152         /*
9153          * Allow INT_N to trigger the QSFP interrupt to watch
9154          * for alarms and warnings
9155          */
9156         set_qsfp_int_n(ppd, 1);
9157 }
9158
9159 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9160                                         u8 *qsfp_interrupt_status)
9161 {
9162         struct hfi1_devdata *dd = ppd->dd;
9163
9164         if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9165             (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9166                 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9167                             __func__);
9168
9169         if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9170             (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9171                 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9172                             __func__);
9173
9174         if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9175             (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9176                 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9177                             __func__);
9178
9179         if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9180             (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9181                 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9182                             __func__);
9183
9184         /* Byte 2 is vendor specific */
9185
9186         if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9187             (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9188                 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9189                             __func__);
9190
9191         if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9192             (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9193                 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9194                             __func__);
9195
9196         if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9197             (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9198                 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9199                             __func__);
9200
9201         if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9202             (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9203                 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9204                             __func__);
9205
9206         if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9207             (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9208                 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9209                             __func__);
9210
9211         if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9212             (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9213                 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9214                             __func__);
9215
9216         if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9217             (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9218                 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9219                             __func__);
9220
9221         if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9222             (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9223                 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9224                             __func__);
9225
9226         if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9227             (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9228                 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9229                             __func__);
9230
9231         if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9232             (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9233                 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9234                             __func__);
9235
9236         if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9237             (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9238                 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9239                             __func__);
9240
9241         if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9242             (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9243                 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9244                             __func__);
9245
9246         /* Bytes 9-10 and 11-12 are reserved */
9247         /* Bytes 13-15 are vendor specific */
9248
9249         return 0;
9250 }
9251
9252 /* This routine will only be scheduled if the QSFP module is present */
9253 void qsfp_event(struct work_struct *work)
9254 {
9255         struct qsfp_data *qd;
9256         struct hfi1_pportdata *ppd;
9257         struct hfi1_devdata *dd;
9258
9259         qd = container_of(work, struct qsfp_data, qsfp_work);
9260         ppd = qd->ppd;
9261         dd = ppd->dd;
9262
9263         /* Sanity check */
9264         if (!qsfp_mod_present(ppd))
9265                 return;
9266
9267         /*
9268          * Turn DC back on after cables has been
9269          * re-inserted. Up until now, the DC has been in
9270          * reset to save power.
9271          */
9272         dc_start(dd);
9273
9274         if (qd->cache_refresh_required) {
9275                 set_qsfp_int_n(ppd, 0);
9276
9277                 wait_for_qsfp_init(ppd);
9278
9279                 /*
9280                  * Allow INT_N to trigger the QSFP interrupt to watch
9281                  * for alarms and warnings
9282                  */
9283                 set_qsfp_int_n(ppd, 1);
9284
9285                 tune_serdes(ppd);
9286
9287                 start_link(ppd);
9288         }
9289
9290         if (qd->check_interrupt_flags) {
9291                 u8 qsfp_interrupt_status[16] = {0,};
9292
9293                 if (qsfp_read(ppd, dd->hfi1_id, 6,
9294                               &qsfp_interrupt_status[0], 16) != 16) {
9295                         dd_dev_info(dd,
9296                                     "%s: Failed to read status of QSFP module\n",
9297                                     __func__);
9298                 } else {
9299                         unsigned long flags;
9300
9301                         handle_qsfp_error_conditions(
9302                                         ppd, qsfp_interrupt_status);
9303                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9304                         ppd->qsfp_info.check_interrupt_flags = 0;
9305                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9306                                                flags);
9307                 }
9308         }
9309 }
9310
9311 static void init_qsfp_int(struct hfi1_devdata *dd)
9312 {
9313         struct hfi1_pportdata *ppd = dd->pport;
9314         u64 qsfp_mask, cce_int_mask;
9315         const int qsfp1_int_smask = QSFP1_INT % 64;
9316         const int qsfp2_int_smask = QSFP2_INT % 64;
9317
9318         /*
9319          * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9320          * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9321          * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9322          * the index of the appropriate CSR in the CCEIntMask CSR array
9323          */
9324         cce_int_mask = read_csr(dd, CCE_INT_MASK +
9325                                 (8 * (QSFP1_INT / 64)));
9326         if (dd->hfi1_id) {
9327                 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9328                 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9329                           cce_int_mask);
9330         } else {
9331                 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9332                 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9333                           cce_int_mask);
9334         }
9335
9336         qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9337         /* Clear current status to avoid spurious interrupts */
9338         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9339                   qsfp_mask);
9340         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9341                   qsfp_mask);
9342
9343         set_qsfp_int_n(ppd, 0);
9344
9345         /* Handle active low nature of INT_N and MODPRST_N pins */
9346         if (qsfp_mod_present(ppd))
9347                 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9348         write_csr(dd,
9349                   dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9350                   qsfp_mask);
9351 }
9352
9353 /*
9354  * Do a one-time initialize of the LCB block.
9355  */
9356 static void init_lcb(struct hfi1_devdata *dd)
9357 {
9358         /* simulator does not correctly handle LCB cclk loopback, skip */
9359         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9360                 return;
9361
9362         /* the DC has been reset earlier in the driver load */
9363
9364         /* set LCB for cclk loopback on the port */
9365         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9366         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9367         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9368         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9369         write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9370         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9371         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9372 }
9373
9374 int bringup_serdes(struct hfi1_pportdata *ppd)
9375 {
9376         struct hfi1_devdata *dd = ppd->dd;
9377         u64 guid;
9378         int ret;
9379
9380         if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9381                 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9382
9383         guid = ppd->guid;
9384         if (!guid) {
9385                 if (dd->base_guid)
9386                         guid = dd->base_guid + ppd->port - 1;
9387                 ppd->guid = guid;
9388         }
9389
9390         /* Set linkinit_reason on power up per OPA spec */
9391         ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9392
9393         /* one-time init of the LCB */
9394         init_lcb(dd);
9395
9396         if (loopback) {
9397                 ret = init_loopback(dd);
9398                 if (ret < 0)
9399                         return ret;
9400         }
9401
9402         /* tune the SERDES to a ballpark setting for
9403          * optimal signal and bit error rate
9404          * Needs to be done before starting the link
9405          */
9406         tune_serdes(ppd);
9407
9408         return start_link(ppd);
9409 }
9410
9411 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9412 {
9413         struct hfi1_devdata *dd = ppd->dd;
9414
9415         /*
9416          * Shut down the link and keep it down.   First turn off that the
9417          * driver wants to allow the link to be up (driver_link_ready).
9418          * Then make sure the link is not automatically restarted
9419          * (link_enabled).  Cancel any pending restart.  And finally
9420          * go offline.
9421          */
9422         ppd->driver_link_ready = 0;
9423         ppd->link_enabled = 0;
9424
9425         ppd->offline_disabled_reason =
9426                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9427         set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9428                              OPA_LINKDOWN_REASON_SMA_DISABLED);
9429         set_link_state(ppd, HLS_DN_OFFLINE);
9430
9431         /* disable the port */
9432         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9433 }
9434
9435 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9436 {
9437         struct hfi1_pportdata *ppd;
9438         int i;
9439
9440         ppd = (struct hfi1_pportdata *)(dd + 1);
9441         for (i = 0; i < dd->num_pports; i++, ppd++) {
9442                 ppd->ibport_data.rvp.rc_acks = NULL;
9443                 ppd->ibport_data.rvp.rc_qacks = NULL;
9444                 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9445                 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9446                 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9447                 if (!ppd->ibport_data.rvp.rc_acks ||
9448                     !ppd->ibport_data.rvp.rc_delayed_comp ||
9449                     !ppd->ibport_data.rvp.rc_qacks)
9450                         return -ENOMEM;
9451         }
9452
9453         return 0;
9454 }
9455
9456 static const char * const pt_names[] = {
9457         "expected",
9458         "eager",
9459         "invalid"
9460 };
9461
9462 static const char *pt_name(u32 type)
9463 {
9464         return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9465 }
9466
9467 /*
9468  * index is the index into the receive array
9469  */
9470 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9471                   u32 type, unsigned long pa, u16 order)
9472 {
9473         u64 reg;
9474         void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9475                               (dd->kregbase + RCV_ARRAY));
9476
9477         if (!(dd->flags & HFI1_PRESENT))
9478                 goto done;
9479
9480         if (type == PT_INVALID) {
9481                 pa = 0;
9482         } else if (type > PT_INVALID) {
9483                 dd_dev_err(dd,
9484                            "unexpected receive array type %u for index %u, not handled\n",
9485                            type, index);
9486                 goto done;
9487         }
9488
9489         hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9490                   pt_name(type), index, pa, (unsigned long)order);
9491
9492 #define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9493         reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9494                 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9495                 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9496                                         << RCV_ARRAY_RT_ADDR_SHIFT;
9497         writeq(reg, base + (index * 8));
9498
9499         if (type == PT_EAGER)
9500                 /*
9501                  * Eager entries are written one-by-one so we have to push them
9502                  * after we write the entry.
9503                  */
9504                 flush_wc();
9505 done:
9506         return;
9507 }
9508
9509 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9510 {
9511         struct hfi1_devdata *dd = rcd->dd;
9512         u32 i;
9513
9514         /* this could be optimized */
9515         for (i = rcd->eager_base; i < rcd->eager_base +
9516                      rcd->egrbufs.alloced; i++)
9517                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9518
9519         for (i = rcd->expected_base;
9520                         i < rcd->expected_base + rcd->expected_count; i++)
9521                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9522 }
9523
9524 int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9525                         struct hfi1_ctxt_info *kinfo)
9526 {
9527         kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9528                 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9529         return 0;
9530 }
9531
9532 struct hfi1_message_header *hfi1_get_msgheader(
9533                                 struct hfi1_devdata *dd, __le32 *rhf_addr)
9534 {
9535         u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9536
9537         return (struct hfi1_message_header *)
9538                 (rhf_addr - dd->rhf_offset + offset);
9539 }
9540
9541 static const char * const ib_cfg_name_strings[] = {
9542         "HFI1_IB_CFG_LIDLMC",
9543         "HFI1_IB_CFG_LWID_DG_ENB",
9544         "HFI1_IB_CFG_LWID_ENB",
9545         "HFI1_IB_CFG_LWID",
9546         "HFI1_IB_CFG_SPD_ENB",
9547         "HFI1_IB_CFG_SPD",
9548         "HFI1_IB_CFG_RXPOL_ENB",
9549         "HFI1_IB_CFG_LREV_ENB",
9550         "HFI1_IB_CFG_LINKLATENCY",
9551         "HFI1_IB_CFG_HRTBT",
9552         "HFI1_IB_CFG_OP_VLS",
9553         "HFI1_IB_CFG_VL_HIGH_CAP",
9554         "HFI1_IB_CFG_VL_LOW_CAP",
9555         "HFI1_IB_CFG_OVERRUN_THRESH",
9556         "HFI1_IB_CFG_PHYERR_THRESH",
9557         "HFI1_IB_CFG_LINKDEFAULT",
9558         "HFI1_IB_CFG_PKEYS",
9559         "HFI1_IB_CFG_MTU",
9560         "HFI1_IB_CFG_LSTATE",
9561         "HFI1_IB_CFG_VL_HIGH_LIMIT",
9562         "HFI1_IB_CFG_PMA_TICKS",
9563         "HFI1_IB_CFG_PORT"
9564 };
9565
9566 static const char *ib_cfg_name(int which)
9567 {
9568         if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9569                 return "invalid";
9570         return ib_cfg_name_strings[which];
9571 }
9572
9573 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9574 {
9575         struct hfi1_devdata *dd = ppd->dd;
9576         int val = 0;
9577
9578         switch (which) {
9579         case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9580                 val = ppd->link_width_enabled;
9581                 break;
9582         case HFI1_IB_CFG_LWID: /* currently active Link-width */
9583                 val = ppd->link_width_active;
9584                 break;
9585         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9586                 val = ppd->link_speed_enabled;
9587                 break;
9588         case HFI1_IB_CFG_SPD: /* current Link speed */
9589                 val = ppd->link_speed_active;
9590                 break;
9591
9592         case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9593         case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9594         case HFI1_IB_CFG_LINKLATENCY:
9595                 goto unimplemented;
9596
9597         case HFI1_IB_CFG_OP_VLS:
9598                 val = ppd->vls_operational;
9599                 break;
9600         case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9601                 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9602                 break;
9603         case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9604                 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9605                 break;
9606         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9607                 val = ppd->overrun_threshold;
9608                 break;
9609         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9610                 val = ppd->phy_error_threshold;
9611                 break;
9612         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9613                 val = dd->link_default;
9614                 break;
9615
9616         case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9617         case HFI1_IB_CFG_PMA_TICKS:
9618         default:
9619 unimplemented:
9620                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9621                         dd_dev_info(
9622                                 dd,
9623                                 "%s: which %s: not implemented\n",
9624                                 __func__,
9625                                 ib_cfg_name(which));
9626                 break;
9627         }
9628
9629         return val;
9630 }
9631
9632 /*
9633  * The largest MAD packet size.
9634  */
9635 #define MAX_MAD_PACKET 2048
9636
9637 /*
9638  * Return the maximum header bytes that can go on the _wire_
9639  * for this device. This count includes the ICRC which is
9640  * not part of the packet held in memory but it is appended
9641  * by the HW.
9642  * This is dependent on the device's receive header entry size.
9643  * HFI allows this to be set per-receive context, but the
9644  * driver presently enforces a global value.
9645  */
9646 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9647 {
9648         /*
9649          * The maximum non-payload (MTU) bytes in LRH.PktLen are
9650          * the Receive Header Entry Size minus the PBC (or RHF) size
9651          * plus one DW for the ICRC appended by HW.
9652          *
9653          * dd->rcd[0].rcvhdrqentsize is in DW.
9654          * We use rcd[0] as all context will have the same value. Also,
9655          * the first kernel context would have been allocated by now so
9656          * we are guaranteed a valid value.
9657          */
9658         return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9659 }
9660
9661 /*
9662  * Set Send Length
9663  * @ppd - per port data
9664  *
9665  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
9666  * registers compare against LRH.PktLen, so use the max bytes included
9667  * in the LRH.
9668  *
9669  * This routine changes all VL values except VL15, which it maintains at
9670  * the same value.
9671  */
9672 static void set_send_length(struct hfi1_pportdata *ppd)
9673 {
9674         struct hfi1_devdata *dd = ppd->dd;
9675         u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9676         u32 maxvlmtu = dd->vld[15].mtu;
9677         u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9678                               & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9679                 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9680         int i;
9681
9682         for (i = 0; i < ppd->vls_supported; i++) {
9683                 if (dd->vld[i].mtu > maxvlmtu)
9684                         maxvlmtu = dd->vld[i].mtu;
9685                 if (i <= 3)
9686                         len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9687                                  & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9688                                 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9689                 else
9690                         len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9691                                  & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9692                                 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9693         }
9694         write_csr(dd, SEND_LEN_CHECK0, len1);
9695         write_csr(dd, SEND_LEN_CHECK1, len2);
9696         /* adjust kernel credit return thresholds based on new MTUs */
9697         /* all kernel receive contexts have the same hdrqentsize */
9698         for (i = 0; i < ppd->vls_supported; i++) {
9699                 sc_set_cr_threshold(dd->vld[i].sc,
9700                                     sc_mtu_to_threshold(dd->vld[i].sc,
9701                                                         dd->vld[i].mtu,
9702                                                         dd->rcd[0]->
9703                                                         rcvhdrqentsize));
9704         }
9705         sc_set_cr_threshold(dd->vld[15].sc,
9706                             sc_mtu_to_threshold(dd->vld[15].sc,
9707                                                 dd->vld[15].mtu,
9708                                                 dd->rcd[0]->rcvhdrqentsize));
9709
9710         /* Adjust maximum MTU for the port in DC */
9711         dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9712                 (ilog2(maxvlmtu >> 8) + 1);
9713         len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9714         len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9715         len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9716                 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9717         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9718 }
9719
9720 static void set_lidlmc(struct hfi1_pportdata *ppd)
9721 {
9722         int i;
9723         u64 sreg = 0;
9724         struct hfi1_devdata *dd = ppd->dd;
9725         u32 mask = ~((1U << ppd->lmc) - 1);
9726         u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9727
9728         if (dd->hfi1_snoop.mode_flag)
9729                 dd_dev_info(dd, "Set lid/lmc while snooping");
9730
9731         c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9732                 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9733         c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9734                         << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
9735               ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9736                         << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9737         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9738
9739         /*
9740          * Iterate over all the send contexts and set their SLID check
9741          */
9742         sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9743                         SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9744                (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9745                         SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9746
9747         for (i = 0; i < dd->chip_send_contexts; i++) {
9748                 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9749                           i, (u32)sreg);
9750                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9751         }
9752
9753         /* Now we have to do the same thing for the sdma engines */
9754         sdma_update_lmc(dd, mask, ppd->lid);
9755 }
9756
9757 static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9758 {
9759         unsigned long timeout;
9760         u32 curr_state;
9761
9762         timeout = jiffies + msecs_to_jiffies(msecs);
9763         while (1) {
9764                 curr_state = read_physical_state(dd);
9765                 if (curr_state == state)
9766                         break;
9767                 if (time_after(jiffies, timeout)) {
9768                         dd_dev_err(dd,
9769                                    "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9770                                    state, curr_state);
9771                         return -ETIMEDOUT;
9772                 }
9773                 usleep_range(1950, 2050); /* sleep 2ms-ish */
9774         }
9775
9776         return 0;
9777 }
9778
9779 /*
9780  * Helper for set_link_state().  Do not call except from that routine.
9781  * Expects ppd->hls_mutex to be held.
9782  *
9783  * @rem_reason value to be sent to the neighbor
9784  *
9785  * LinkDownReasons only set if transition succeeds.
9786  */
9787 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9788 {
9789         struct hfi1_devdata *dd = ppd->dd;
9790         u32 pstate, previous_state;
9791         u32 last_local_state;
9792         u32 last_remote_state;
9793         int ret;
9794         int do_transition;
9795         int do_wait;
9796
9797         previous_state = ppd->host_link_state;
9798         ppd->host_link_state = HLS_GOING_OFFLINE;
9799         pstate = read_physical_state(dd);
9800         if (pstate == PLS_OFFLINE) {
9801                 do_transition = 0;      /* in right state */
9802                 do_wait = 0;            /* ...no need to wait */
9803         } else if ((pstate & 0xff) == PLS_OFFLINE) {
9804                 do_transition = 0;      /* in an offline transient state */
9805                 do_wait = 1;            /* ...wait for it to settle */
9806         } else {
9807                 do_transition = 1;      /* need to move to offline */
9808                 do_wait = 1;            /* ...will need to wait */
9809         }
9810
9811         if (do_transition) {
9812                 ret = set_physical_link_state(dd,
9813                                               (rem_reason << 8) | PLS_OFFLINE);
9814
9815                 if (ret != HCMD_SUCCESS) {
9816                         dd_dev_err(dd,
9817                                    "Failed to transition to Offline link state, return %d\n",
9818                                    ret);
9819                         return -EINVAL;
9820                 }
9821                 if (ppd->offline_disabled_reason ==
9822                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
9823                         ppd->offline_disabled_reason =
9824                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
9825         }
9826
9827         if (do_wait) {
9828                 /* it can take a while for the link to go down */
9829                 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
9830                 if (ret < 0)
9831                         return ret;
9832         }
9833
9834         /* make sure the logical state is also down */
9835         wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9836
9837         /*
9838          * Now in charge of LCB - must be after the physical state is
9839          * offline.quiet and before host_link_state is changed.
9840          */
9841         set_host_lcb_access(dd);
9842         write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9843         ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9844
9845         if (ppd->port_type == PORT_TYPE_QSFP &&
9846             ppd->qsfp_info.limiting_active &&
9847             qsfp_mod_present(ppd)) {
9848                 set_qsfp_tx(ppd, 0);
9849         }
9850
9851         /*
9852          * The LNI has a mandatory wait time after the physical state
9853          * moves to Offline.Quiet.  The wait time may be different
9854          * depending on how the link went down.  The 8051 firmware
9855          * will observe the needed wait time and only move to ready
9856          * when that is completed.  The largest of the quiet timeouts
9857          * is 6s, so wait that long and then at least 0.5s more for
9858          * other transitions, and another 0.5s for a buffer.
9859          */
9860         ret = wait_fm_ready(dd, 7000);
9861         if (ret) {
9862                 dd_dev_err(dd,
9863                            "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9864                 /* state is really offline, so make it so */
9865                 ppd->host_link_state = HLS_DN_OFFLINE;
9866                 return ret;
9867         }
9868
9869         /*
9870          * The state is now offline and the 8051 is ready to accept host
9871          * requests.
9872          *      - change our state
9873          *      - notify others if we were previously in a linkup state
9874          */
9875         ppd->host_link_state = HLS_DN_OFFLINE;
9876         if (previous_state & HLS_UP) {
9877                 /* went down while link was up */
9878                 handle_linkup_change(dd, 0);
9879         } else if (previous_state
9880                         & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
9881                 /* went down while attempting link up */
9882                 /* byte 1 of last_*_state is the failure reason */
9883                 read_last_local_state(dd, &last_local_state);
9884                 read_last_remote_state(dd, &last_remote_state);
9885                 dd_dev_err(dd,
9886                            "LNI failure last states: local 0x%08x, remote 0x%08x\n",
9887                            last_local_state, last_remote_state);
9888         }
9889
9890         /* the active link width (downgrade) is 0 on link down */
9891         ppd->link_width_active = 0;
9892         ppd->link_width_downgrade_tx_active = 0;
9893         ppd->link_width_downgrade_rx_active = 0;
9894         ppd->current_egress_rate = 0;
9895         return 0;
9896 }
9897
9898 /* return the link state name */
9899 static const char *link_state_name(u32 state)
9900 {
9901         const char *name;
9902         int n = ilog2(state);
9903         static const char * const names[] = {
9904                 [__HLS_UP_INIT_BP]       = "INIT",
9905                 [__HLS_UP_ARMED_BP]      = "ARMED",
9906                 [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
9907                 [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
9908                 [__HLS_DN_POLL_BP]       = "POLL",
9909                 [__HLS_DN_DISABLE_BP]    = "DISABLE",
9910                 [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
9911                 [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
9912                 [__HLS_GOING_UP_BP]      = "GOING_UP",
9913                 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
9914                 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
9915         };
9916
9917         name = n < ARRAY_SIZE(names) ? names[n] : NULL;
9918         return name ? name : "unknown";
9919 }
9920
9921 /* return the link state reason name */
9922 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
9923 {
9924         if (state == HLS_UP_INIT) {
9925                 switch (ppd->linkinit_reason) {
9926                 case OPA_LINKINIT_REASON_LINKUP:
9927                         return "(LINKUP)";
9928                 case OPA_LINKINIT_REASON_FLAPPING:
9929                         return "(FLAPPING)";
9930                 case OPA_LINKINIT_OUTSIDE_POLICY:
9931                         return "(OUTSIDE_POLICY)";
9932                 case OPA_LINKINIT_QUARANTINED:
9933                         return "(QUARANTINED)";
9934                 case OPA_LINKINIT_INSUFIC_CAPABILITY:
9935                         return "(INSUFIC_CAPABILITY)";
9936                 default:
9937                         break;
9938                 }
9939         }
9940         return "";
9941 }
9942
9943 /*
9944  * driver_physical_state - convert the driver's notion of a port's
9945  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
9946  * Return -1 (converted to a u32) to indicate error.
9947  */
9948 u32 driver_physical_state(struct hfi1_pportdata *ppd)
9949 {
9950         switch (ppd->host_link_state) {
9951         case HLS_UP_INIT:
9952         case HLS_UP_ARMED:
9953         case HLS_UP_ACTIVE:
9954                 return IB_PORTPHYSSTATE_LINKUP;
9955         case HLS_DN_POLL:
9956                 return IB_PORTPHYSSTATE_POLLING;
9957         case HLS_DN_DISABLE:
9958                 return IB_PORTPHYSSTATE_DISABLED;
9959         case HLS_DN_OFFLINE:
9960                 return OPA_PORTPHYSSTATE_OFFLINE;
9961         case HLS_VERIFY_CAP:
9962                 return IB_PORTPHYSSTATE_POLLING;
9963         case HLS_GOING_UP:
9964                 return IB_PORTPHYSSTATE_POLLING;
9965         case HLS_GOING_OFFLINE:
9966                 return OPA_PORTPHYSSTATE_OFFLINE;
9967         case HLS_LINK_COOLDOWN:
9968                 return OPA_PORTPHYSSTATE_OFFLINE;
9969         case HLS_DN_DOWNDEF:
9970         default:
9971                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9972                            ppd->host_link_state);
9973                 return  -1;
9974         }
9975 }
9976
9977 /*
9978  * driver_logical_state - convert the driver's notion of a port's
9979  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
9980  * (converted to a u32) to indicate error.
9981  */
9982 u32 driver_logical_state(struct hfi1_pportdata *ppd)
9983 {
9984         if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
9985                 return IB_PORT_DOWN;
9986
9987         switch (ppd->host_link_state & HLS_UP) {
9988         case HLS_UP_INIT:
9989                 return IB_PORT_INIT;
9990         case HLS_UP_ARMED:
9991                 return IB_PORT_ARMED;
9992         case HLS_UP_ACTIVE:
9993                 return IB_PORT_ACTIVE;
9994         default:
9995                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
9996                            ppd->host_link_state);
9997         return -1;
9998         }
9999 }
10000
10001 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10002                           u8 neigh_reason, u8 rem_reason)
10003 {
10004         if (ppd->local_link_down_reason.latest == 0 &&
10005             ppd->neigh_link_down_reason.latest == 0) {
10006                 ppd->local_link_down_reason.latest = lcl_reason;
10007                 ppd->neigh_link_down_reason.latest = neigh_reason;
10008                 ppd->remote_link_down_reason = rem_reason;
10009         }
10010 }
10011
10012 /*
10013  * Change the physical and/or logical link state.
10014  *
10015  * Do not call this routine while inside an interrupt.  It contains
10016  * calls to routines that can take multiple seconds to finish.
10017  *
10018  * Returns 0 on success, -errno on failure.
10019  */
10020 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10021 {
10022         struct hfi1_devdata *dd = ppd->dd;
10023         struct ib_event event = {.device = NULL};
10024         int ret1, ret = 0;
10025         int was_up, is_down;
10026         int orig_new_state, poll_bounce;
10027
10028         mutex_lock(&ppd->hls_lock);
10029
10030         orig_new_state = state;
10031         if (state == HLS_DN_DOWNDEF)
10032                 state = dd->link_default;
10033
10034         /* interpret poll -> poll as a link bounce */
10035         poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10036                       state == HLS_DN_POLL;
10037
10038         dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10039                     link_state_name(ppd->host_link_state),
10040                     link_state_name(orig_new_state),
10041                     poll_bounce ? "(bounce) " : "",
10042                     link_state_reason_name(ppd, state));
10043
10044         was_up = !!(ppd->host_link_state & HLS_UP);
10045
10046         /*
10047          * If we're going to a (HLS_*) link state that implies the logical
10048          * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10049          * reset is_sm_config_started to 0.
10050          */
10051         if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10052                 ppd->is_sm_config_started = 0;
10053
10054         /*
10055          * Do nothing if the states match.  Let a poll to poll link bounce
10056          * go through.
10057          */
10058         if (ppd->host_link_state == state && !poll_bounce)
10059                 goto done;
10060
10061         switch (state) {
10062         case HLS_UP_INIT:
10063                 if (ppd->host_link_state == HLS_DN_POLL &&
10064                     (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10065                         /*
10066                          * Quick link up jumps from polling to here.
10067                          *
10068                          * Whether in normal or loopback mode, the
10069                          * simulator jumps from polling to link up.
10070                          * Accept that here.
10071                          */
10072                         /* OK */
10073                 } else if (ppd->host_link_state != HLS_GOING_UP) {
10074                         goto unexpected;
10075                 }
10076
10077                 ppd->host_link_state = HLS_UP_INIT;
10078                 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10079                 if (ret) {
10080                         /* logical state didn't change, stay at going_up */
10081                         ppd->host_link_state = HLS_GOING_UP;
10082                         dd_dev_err(dd,
10083                                    "%s: logical state did not change to INIT\n",
10084                                    __func__);
10085                 } else {
10086                         /* clear old transient LINKINIT_REASON code */
10087                         if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10088                                 ppd->linkinit_reason =
10089                                         OPA_LINKINIT_REASON_LINKUP;
10090
10091                         /* enable the port */
10092                         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10093
10094                         handle_linkup_change(dd, 1);
10095                 }
10096                 break;
10097         case HLS_UP_ARMED:
10098                 if (ppd->host_link_state != HLS_UP_INIT)
10099                         goto unexpected;
10100
10101                 ppd->host_link_state = HLS_UP_ARMED;
10102                 set_logical_state(dd, LSTATE_ARMED);
10103                 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10104                 if (ret) {
10105                         /* logical state didn't change, stay at init */
10106                         ppd->host_link_state = HLS_UP_INIT;
10107                         dd_dev_err(dd,
10108                                    "%s: logical state did not change to ARMED\n",
10109                                    __func__);
10110                 }
10111                 /*
10112                  * The simulator does not currently implement SMA messages,
10113                  * so neighbor_normal is not set.  Set it here when we first
10114                  * move to Armed.
10115                  */
10116                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10117                         ppd->neighbor_normal = 1;
10118                 break;
10119         case HLS_UP_ACTIVE:
10120                 if (ppd->host_link_state != HLS_UP_ARMED)
10121                         goto unexpected;
10122
10123                 ppd->host_link_state = HLS_UP_ACTIVE;
10124                 set_logical_state(dd, LSTATE_ACTIVE);
10125                 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10126                 if (ret) {
10127                         /* logical state didn't change, stay at armed */
10128                         ppd->host_link_state = HLS_UP_ARMED;
10129                         dd_dev_err(dd,
10130                                    "%s: logical state did not change to ACTIVE\n",
10131                                    __func__);
10132                 } else {
10133                         /* tell all engines to go running */
10134                         sdma_all_running(dd);
10135
10136                         /* Signal the IB layer that the port has went active */
10137                         event.device = &dd->verbs_dev.rdi.ibdev;
10138                         event.element.port_num = ppd->port;
10139                         event.event = IB_EVENT_PORT_ACTIVE;
10140                 }
10141                 break;
10142         case HLS_DN_POLL:
10143                 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10144                      ppd->host_link_state == HLS_DN_OFFLINE) &&
10145                     dd->dc_shutdown)
10146                         dc_start(dd);
10147                 /* Hand LED control to the DC */
10148                 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10149
10150                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10151                         u8 tmp = ppd->link_enabled;
10152
10153                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10154                         if (ret) {
10155                                 ppd->link_enabled = tmp;
10156                                 break;
10157                         }
10158                         ppd->remote_link_down_reason = 0;
10159
10160                         if (ppd->driver_link_ready)
10161                                 ppd->link_enabled = 1;
10162                 }
10163
10164                 set_all_slowpath(ppd->dd);
10165                 ret = set_local_link_attributes(ppd);
10166                 if (ret)
10167                         break;
10168
10169                 ppd->port_error_action = 0;
10170                 ppd->host_link_state = HLS_DN_POLL;
10171
10172                 if (quick_linkup) {
10173                         /* quick linkup does not go into polling */
10174                         ret = do_quick_linkup(dd);
10175                 } else {
10176                         ret1 = set_physical_link_state(dd, PLS_POLLING);
10177                         if (ret1 != HCMD_SUCCESS) {
10178                                 dd_dev_err(dd,
10179                                            "Failed to transition to Polling link state, return 0x%x\n",
10180                                            ret1);
10181                                 ret = -EINVAL;
10182                         }
10183                 }
10184                 ppd->offline_disabled_reason =
10185                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10186                 /*
10187                  * If an error occurred above, go back to offline.  The
10188                  * caller may reschedule another attempt.
10189                  */
10190                 if (ret)
10191                         goto_offline(ppd, 0);
10192                 break;
10193         case HLS_DN_DISABLE:
10194                 /* link is disabled */
10195                 ppd->link_enabled = 0;
10196
10197                 /* allow any state to transition to disabled */
10198
10199                 /* must transition to offline first */
10200                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10201                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10202                         if (ret)
10203                                 break;
10204                         ppd->remote_link_down_reason = 0;
10205                 }
10206
10207                 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10208                 if (ret1 != HCMD_SUCCESS) {
10209                         dd_dev_err(dd,
10210                                    "Failed to transition to Disabled link state, return 0x%x\n",
10211                                    ret1);
10212                         ret = -EINVAL;
10213                         break;
10214                 }
10215                 ppd->host_link_state = HLS_DN_DISABLE;
10216                 dc_shutdown(dd);
10217                 break;
10218         case HLS_DN_OFFLINE:
10219                 if (ppd->host_link_state == HLS_DN_DISABLE)
10220                         dc_start(dd);
10221
10222                 /* allow any state to transition to offline */
10223                 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10224                 if (!ret)
10225                         ppd->remote_link_down_reason = 0;
10226                 break;
10227         case HLS_VERIFY_CAP:
10228                 if (ppd->host_link_state != HLS_DN_POLL)
10229                         goto unexpected;
10230                 ppd->host_link_state = HLS_VERIFY_CAP;
10231                 break;
10232         case HLS_GOING_UP:
10233                 if (ppd->host_link_state != HLS_VERIFY_CAP)
10234                         goto unexpected;
10235
10236                 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10237                 if (ret1 != HCMD_SUCCESS) {
10238                         dd_dev_err(dd,
10239                                    "Failed to transition to link up state, return 0x%x\n",
10240                                    ret1);
10241                         ret = -EINVAL;
10242                         break;
10243                 }
10244                 ppd->host_link_state = HLS_GOING_UP;
10245                 break;
10246
10247         case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10248         case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10249         default:
10250                 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10251                             __func__, state);
10252                 ret = -EINVAL;
10253                 break;
10254         }
10255
10256         is_down = !!(ppd->host_link_state & (HLS_DN_POLL |
10257                         HLS_DN_DISABLE | HLS_DN_OFFLINE));
10258
10259         if (was_up && is_down && ppd->local_link_down_reason.sma == 0 &&
10260             ppd->neigh_link_down_reason.sma == 0) {
10261                 ppd->local_link_down_reason.sma =
10262                   ppd->local_link_down_reason.latest;
10263                 ppd->neigh_link_down_reason.sma =
10264                   ppd->neigh_link_down_reason.latest;
10265         }
10266
10267         goto done;
10268
10269 unexpected:
10270         dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10271                    __func__, link_state_name(ppd->host_link_state),
10272                    link_state_name(state));
10273         ret = -EINVAL;
10274
10275 done:
10276         mutex_unlock(&ppd->hls_lock);
10277
10278         if (event.device)
10279                 ib_dispatch_event(&event);
10280
10281         return ret;
10282 }
10283
10284 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10285 {
10286         u64 reg;
10287         int ret = 0;
10288
10289         switch (which) {
10290         case HFI1_IB_CFG_LIDLMC:
10291                 set_lidlmc(ppd);
10292                 break;
10293         case HFI1_IB_CFG_VL_HIGH_LIMIT:
10294                 /*
10295                  * The VL Arbitrator high limit is sent in units of 4k
10296                  * bytes, while HFI stores it in units of 64 bytes.
10297                  */
10298                 val *= 4096 / 64;
10299                 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10300                         << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10301                 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10302                 break;
10303         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10304                 /* HFI only supports POLL as the default link down state */
10305                 if (val != HLS_DN_POLL)
10306                         ret = -EINVAL;
10307                 break;
10308         case HFI1_IB_CFG_OP_VLS:
10309                 if (ppd->vls_operational != val) {
10310                         ppd->vls_operational = val;
10311                         if (!ppd->port)
10312                                 ret = -EINVAL;
10313                 }
10314                 break;
10315         /*
10316          * For link width, link width downgrade, and speed enable, always AND
10317          * the setting with what is actually supported.  This has two benefits.
10318          * First, enabled can't have unsupported values, no matter what the
10319          * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10320          * "fill in with your supported value" have all the bits in the
10321          * field set, so simply ANDing with supported has the desired result.
10322          */
10323         case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10324                 ppd->link_width_enabled = val & ppd->link_width_supported;
10325                 break;
10326         case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10327                 ppd->link_width_downgrade_enabled =
10328                                 val & ppd->link_width_downgrade_supported;
10329                 break;
10330         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10331                 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10332                 break;
10333         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10334                 /*
10335                  * HFI does not follow IB specs, save this value
10336                  * so we can report it, if asked.
10337                  */
10338                 ppd->overrun_threshold = val;
10339                 break;
10340         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10341                 /*
10342                  * HFI does not follow IB specs, save this value
10343                  * so we can report it, if asked.
10344                  */
10345                 ppd->phy_error_threshold = val;
10346                 break;
10347
10348         case HFI1_IB_CFG_MTU:
10349                 set_send_length(ppd);
10350                 break;
10351
10352         case HFI1_IB_CFG_PKEYS:
10353                 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10354                         set_partition_keys(ppd);
10355                 break;
10356
10357         default:
10358                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10359                         dd_dev_info(ppd->dd,
10360                                     "%s: which %s, val 0x%x: not implemented\n",
10361                                     __func__, ib_cfg_name(which), val);
10362                 break;
10363         }
10364         return ret;
10365 }
10366
10367 /* begin functions related to vl arbitration table caching */
10368 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10369 {
10370         int i;
10371
10372         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10373                         VL_ARB_LOW_PRIO_TABLE_SIZE);
10374         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10375                         VL_ARB_HIGH_PRIO_TABLE_SIZE);
10376
10377         /*
10378          * Note that we always return values directly from the
10379          * 'vl_arb_cache' (and do no CSR reads) in response to a
10380          * 'Get(VLArbTable)'. This is obviously correct after a
10381          * 'Set(VLArbTable)', since the cache will then be up to
10382          * date. But it's also correct prior to any 'Set(VLArbTable)'
10383          * since then both the cache, and the relevant h/w registers
10384          * will be zeroed.
10385          */
10386
10387         for (i = 0; i < MAX_PRIO_TABLE; i++)
10388                 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10389 }
10390
10391 /*
10392  * vl_arb_lock_cache
10393  *
10394  * All other vl_arb_* functions should be called only after locking
10395  * the cache.
10396  */
10397 static inline struct vl_arb_cache *
10398 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10399 {
10400         if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10401                 return NULL;
10402         spin_lock(&ppd->vl_arb_cache[idx].lock);
10403         return &ppd->vl_arb_cache[idx];
10404 }
10405
10406 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10407 {
10408         spin_unlock(&ppd->vl_arb_cache[idx].lock);
10409 }
10410
10411 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10412                              struct ib_vl_weight_elem *vl)
10413 {
10414         memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10415 }
10416
10417 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10418                              struct ib_vl_weight_elem *vl)
10419 {
10420         memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10421 }
10422
10423 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10424                               struct ib_vl_weight_elem *vl)
10425 {
10426         return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10427 }
10428
10429 /* end functions related to vl arbitration table caching */
10430
10431 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10432                           u32 size, struct ib_vl_weight_elem *vl)
10433 {
10434         struct hfi1_devdata *dd = ppd->dd;
10435         u64 reg;
10436         unsigned int i, is_up = 0;
10437         int drain, ret = 0;
10438
10439         mutex_lock(&ppd->hls_lock);
10440
10441         if (ppd->host_link_state & HLS_UP)
10442                 is_up = 1;
10443
10444         drain = !is_ax(dd) && is_up;
10445
10446         if (drain)
10447                 /*
10448                  * Before adjusting VL arbitration weights, empty per-VL
10449                  * FIFOs, otherwise a packet whose VL weight is being
10450                  * set to 0 could get stuck in a FIFO with no chance to
10451                  * egress.
10452                  */
10453                 ret = stop_drain_data_vls(dd);
10454
10455         if (ret) {
10456                 dd_dev_err(
10457                         dd,
10458                         "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10459                         __func__);
10460                 goto err;
10461         }
10462
10463         for (i = 0; i < size; i++, vl++) {
10464                 /*
10465                  * NOTE: The low priority shift and mask are used here, but
10466                  * they are the same for both the low and high registers.
10467                  */
10468                 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10469                                 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10470                       | (((u64)vl->weight
10471                                 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10472                                 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10473                 write_csr(dd, target + (i * 8), reg);
10474         }
10475         pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10476
10477         if (drain)
10478                 open_fill_data_vls(dd); /* reopen all VLs */
10479
10480 err:
10481         mutex_unlock(&ppd->hls_lock);
10482
10483         return ret;
10484 }
10485
10486 /*
10487  * Read one credit merge VL register.
10488  */
10489 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10490                            struct vl_limit *vll)
10491 {
10492         u64 reg = read_csr(dd, csr);
10493
10494         vll->dedicated = cpu_to_be16(
10495                 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10496                 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10497         vll->shared = cpu_to_be16(
10498                 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10499                 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10500 }
10501
10502 /*
10503  * Read the current credit merge limits.
10504  */
10505 static int get_buffer_control(struct hfi1_devdata *dd,
10506                               struct buffer_control *bc, u16 *overall_limit)
10507 {
10508         u64 reg;
10509         int i;
10510
10511         /* not all entries are filled in */
10512         memset(bc, 0, sizeof(*bc));
10513
10514         /* OPA and HFI have a 1-1 mapping */
10515         for (i = 0; i < TXE_NUM_DATA_VL; i++)
10516                 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
10517
10518         /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10519         read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10520
10521         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10522         bc->overall_shared_limit = cpu_to_be16(
10523                 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10524                 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10525         if (overall_limit)
10526                 *overall_limit = (reg
10527                         >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10528                         & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10529         return sizeof(struct buffer_control);
10530 }
10531
10532 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10533 {
10534         u64 reg;
10535         int i;
10536
10537         /* each register contains 16 SC->VLnt mappings, 4 bits each */
10538         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10539         for (i = 0; i < sizeof(u64); i++) {
10540                 u8 byte = *(((u8 *)&reg) + i);
10541
10542                 dp->vlnt[2 * i] = byte & 0xf;
10543                 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10544         }
10545
10546         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10547         for (i = 0; i < sizeof(u64); i++) {
10548                 u8 byte = *(((u8 *)&reg) + i);
10549
10550                 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10551                 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10552         }
10553         return sizeof(struct sc2vlnt);
10554 }
10555
10556 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10557                               struct ib_vl_weight_elem *vl)
10558 {
10559         unsigned int i;
10560
10561         for (i = 0; i < nelems; i++, vl++) {
10562                 vl->vl = 0xf;
10563                 vl->weight = 0;
10564         }
10565 }
10566
10567 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10568 {
10569         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10570                   DC_SC_VL_VAL(15_0,
10571                                0, dp->vlnt[0] & 0xf,
10572                                1, dp->vlnt[1] & 0xf,
10573                                2, dp->vlnt[2] & 0xf,
10574                                3, dp->vlnt[3] & 0xf,
10575                                4, dp->vlnt[4] & 0xf,
10576                                5, dp->vlnt[5] & 0xf,
10577                                6, dp->vlnt[6] & 0xf,
10578                                7, dp->vlnt[7] & 0xf,
10579                                8, dp->vlnt[8] & 0xf,
10580                                9, dp->vlnt[9] & 0xf,
10581                                10, dp->vlnt[10] & 0xf,
10582                                11, dp->vlnt[11] & 0xf,
10583                                12, dp->vlnt[12] & 0xf,
10584                                13, dp->vlnt[13] & 0xf,
10585                                14, dp->vlnt[14] & 0xf,
10586                                15, dp->vlnt[15] & 0xf));
10587         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10588                   DC_SC_VL_VAL(31_16,
10589                                16, dp->vlnt[16] & 0xf,
10590                                17, dp->vlnt[17] & 0xf,
10591                                18, dp->vlnt[18] & 0xf,
10592                                19, dp->vlnt[19] & 0xf,
10593                                20, dp->vlnt[20] & 0xf,
10594                                21, dp->vlnt[21] & 0xf,
10595                                22, dp->vlnt[22] & 0xf,
10596                                23, dp->vlnt[23] & 0xf,
10597                                24, dp->vlnt[24] & 0xf,
10598                                25, dp->vlnt[25] & 0xf,
10599                                26, dp->vlnt[26] & 0xf,
10600                                27, dp->vlnt[27] & 0xf,
10601                                28, dp->vlnt[28] & 0xf,
10602                                29, dp->vlnt[29] & 0xf,
10603                                30, dp->vlnt[30] & 0xf,
10604                                31, dp->vlnt[31] & 0xf));
10605 }
10606
10607 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10608                         u16 limit)
10609 {
10610         if (limit != 0)
10611                 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10612                             what, (int)limit, idx);
10613 }
10614
10615 /* change only the shared limit portion of SendCmGLobalCredit */
10616 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10617 {
10618         u64 reg;
10619
10620         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10621         reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10622         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10623         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10624 }
10625
10626 /* change only the total credit limit portion of SendCmGLobalCredit */
10627 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10628 {
10629         u64 reg;
10630
10631         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10632         reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10633         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10634         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10635 }
10636
10637 /* set the given per-VL shared limit */
10638 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10639 {
10640         u64 reg;
10641         u32 addr;
10642
10643         if (vl < TXE_NUM_DATA_VL)
10644                 addr = SEND_CM_CREDIT_VL + (8 * vl);
10645         else
10646                 addr = SEND_CM_CREDIT_VL15;
10647
10648         reg = read_csr(dd, addr);
10649         reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10650         reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10651         write_csr(dd, addr, reg);
10652 }
10653
10654 /* set the given per-VL dedicated limit */
10655 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10656 {
10657         u64 reg;
10658         u32 addr;
10659
10660         if (vl < TXE_NUM_DATA_VL)
10661                 addr = SEND_CM_CREDIT_VL + (8 * vl);
10662         else
10663                 addr = SEND_CM_CREDIT_VL15;
10664
10665         reg = read_csr(dd, addr);
10666         reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10667         reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10668         write_csr(dd, addr, reg);
10669 }
10670
10671 /* spin until the given per-VL status mask bits clear */
10672 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10673                                      const char *which)
10674 {
10675         unsigned long timeout;
10676         u64 reg;
10677
10678         timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10679         while (1) {
10680                 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10681
10682                 if (reg == 0)
10683                         return; /* success */
10684                 if (time_after(jiffies, timeout))
10685                         break;          /* timed out */
10686                 udelay(1);
10687         }
10688
10689         dd_dev_err(dd,
10690                    "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10691                    which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10692         /*
10693          * If this occurs, it is likely there was a credit loss on the link.
10694          * The only recovery from that is a link bounce.
10695          */
10696         dd_dev_err(dd,
10697                    "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
10698 }
10699
10700 /*
10701  * The number of credits on the VLs may be changed while everything
10702  * is "live", but the following algorithm must be followed due to
10703  * how the hardware is actually implemented.  In particular,
10704  * Return_Credit_Status[] is the only correct status check.
10705  *
10706  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10707  *     set Global_Shared_Credit_Limit = 0
10708  *     use_all_vl = 1
10709  * mask0 = all VLs that are changing either dedicated or shared limits
10710  * set Shared_Limit[mask0] = 0
10711  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10712  * if (changing any dedicated limit)
10713  *     mask1 = all VLs that are lowering dedicated limits
10714  *     lower Dedicated_Limit[mask1]
10715  *     spin until Return_Credit_Status[mask1] == 0
10716  *     raise Dedicated_Limits
10717  * raise Shared_Limits
10718  * raise Global_Shared_Credit_Limit
10719  *
10720  * lower = if the new limit is lower, set the limit to the new value
10721  * raise = if the new limit is higher than the current value (may be changed
10722  *      earlier in the algorithm), set the new limit to the new value
10723  */
10724 int set_buffer_control(struct hfi1_pportdata *ppd,
10725                        struct buffer_control *new_bc)
10726 {
10727         struct hfi1_devdata *dd = ppd->dd;
10728         u64 changing_mask, ld_mask, stat_mask;
10729         int change_count;
10730         int i, use_all_mask;
10731         int this_shared_changing;
10732         int vl_count = 0, ret;
10733         /*
10734          * A0: add the variable any_shared_limit_changing below and in the
10735          * algorithm above.  If removing A0 support, it can be removed.
10736          */
10737         int any_shared_limit_changing;
10738         struct buffer_control cur_bc;
10739         u8 changing[OPA_MAX_VLS];
10740         u8 lowering_dedicated[OPA_MAX_VLS];
10741         u16 cur_total;
10742         u32 new_total = 0;
10743         const u64 all_mask =
10744         SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10745          | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10746          | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10747          | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10748          | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10749          | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10750          | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10751          | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10752          | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10753
10754 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10755 #define NUM_USABLE_VLS 16       /* look at VL15 and less */
10756
10757         /* find the new total credits, do sanity check on unused VLs */
10758         for (i = 0; i < OPA_MAX_VLS; i++) {
10759                 if (valid_vl(i)) {
10760                         new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10761                         continue;
10762                 }
10763                 nonzero_msg(dd, i, "dedicated",
10764                             be16_to_cpu(new_bc->vl[i].dedicated));
10765                 nonzero_msg(dd, i, "shared",
10766                             be16_to_cpu(new_bc->vl[i].shared));
10767                 new_bc->vl[i].dedicated = 0;
10768                 new_bc->vl[i].shared = 0;
10769         }
10770         new_total += be16_to_cpu(new_bc->overall_shared_limit);
10771
10772         /* fetch the current values */
10773         get_buffer_control(dd, &cur_bc, &cur_total);
10774
10775         /*
10776          * Create the masks we will use.
10777          */
10778         memset(changing, 0, sizeof(changing));
10779         memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10780         /*
10781          * NOTE: Assumes that the individual VL bits are adjacent and in
10782          * increasing order
10783          */
10784         stat_mask =
10785                 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10786         changing_mask = 0;
10787         ld_mask = 0;
10788         change_count = 0;
10789         any_shared_limit_changing = 0;
10790         for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10791                 if (!valid_vl(i))
10792                         continue;
10793                 this_shared_changing = new_bc->vl[i].shared
10794                                                 != cur_bc.vl[i].shared;
10795                 if (this_shared_changing)
10796                         any_shared_limit_changing = 1;
10797                 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10798                     this_shared_changing) {
10799                         changing[i] = 1;
10800                         changing_mask |= stat_mask;
10801                         change_count++;
10802                 }
10803                 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10804                                         be16_to_cpu(cur_bc.vl[i].dedicated)) {
10805                         lowering_dedicated[i] = 1;
10806                         ld_mask |= stat_mask;
10807                 }
10808         }
10809
10810         /* bracket the credit change with a total adjustment */
10811         if (new_total > cur_total)
10812                 set_global_limit(dd, new_total);
10813
10814         /*
10815          * Start the credit change algorithm.
10816          */
10817         use_all_mask = 0;
10818         if ((be16_to_cpu(new_bc->overall_shared_limit) <
10819              be16_to_cpu(cur_bc.overall_shared_limit)) ||
10820             (is_ax(dd) && any_shared_limit_changing)) {
10821                 set_global_shared(dd, 0);
10822                 cur_bc.overall_shared_limit = 0;
10823                 use_all_mask = 1;
10824         }
10825
10826         for (i = 0; i < NUM_USABLE_VLS; i++) {
10827                 if (!valid_vl(i))
10828                         continue;
10829
10830                 if (changing[i]) {
10831                         set_vl_shared(dd, i, 0);
10832                         cur_bc.vl[i].shared = 0;
10833                 }
10834         }
10835
10836         wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10837                                  "shared");
10838
10839         if (change_count > 0) {
10840                 for (i = 0; i < NUM_USABLE_VLS; i++) {
10841                         if (!valid_vl(i))
10842                                 continue;
10843
10844                         if (lowering_dedicated[i]) {
10845                                 set_vl_dedicated(dd, i,
10846                                                  be16_to_cpu(new_bc->
10847                                                              vl[i].dedicated));
10848                                 cur_bc.vl[i].dedicated =
10849                                                 new_bc->vl[i].dedicated;
10850                         }
10851                 }
10852
10853                 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10854
10855                 /* now raise all dedicated that are going up */
10856                 for (i = 0; i < NUM_USABLE_VLS; i++) {
10857                         if (!valid_vl(i))
10858                                 continue;
10859
10860                         if (be16_to_cpu(new_bc->vl[i].dedicated) >
10861                                         be16_to_cpu(cur_bc.vl[i].dedicated))
10862                                 set_vl_dedicated(dd, i,
10863                                                  be16_to_cpu(new_bc->
10864                                                              vl[i].dedicated));
10865                 }
10866         }
10867
10868         /* next raise all shared that are going up */
10869         for (i = 0; i < NUM_USABLE_VLS; i++) {
10870                 if (!valid_vl(i))
10871                         continue;
10872
10873                 if (be16_to_cpu(new_bc->vl[i].shared) >
10874                                 be16_to_cpu(cur_bc.vl[i].shared))
10875                         set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10876         }
10877
10878         /* finally raise the global shared */
10879         if (be16_to_cpu(new_bc->overall_shared_limit) >
10880             be16_to_cpu(cur_bc.overall_shared_limit))
10881                 set_global_shared(dd,
10882                                   be16_to_cpu(new_bc->overall_shared_limit));
10883
10884         /* bracket the credit change with a total adjustment */
10885         if (new_total < cur_total)
10886                 set_global_limit(dd, new_total);
10887
10888         /*
10889          * Determine the actual number of operational VLS using the number of
10890          * dedicated and shared credits for each VL.
10891          */
10892         if (change_count > 0) {
10893                 for (i = 0; i < TXE_NUM_DATA_VL; i++)
10894                         if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
10895                             be16_to_cpu(new_bc->vl[i].shared) > 0)
10896                                 vl_count++;
10897                 ppd->actual_vls_operational = vl_count;
10898                 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
10899                                     ppd->actual_vls_operational :
10900                                     ppd->vls_operational,
10901                                     NULL);
10902                 if (ret == 0)
10903                         ret = pio_map_init(dd, ppd->port - 1, vl_count ?
10904                                            ppd->actual_vls_operational :
10905                                            ppd->vls_operational, NULL);
10906                 if (ret)
10907                         return ret;
10908         }
10909         return 0;
10910 }
10911
10912 /*
10913  * Read the given fabric manager table. Return the size of the
10914  * table (in bytes) on success, and a negative error code on
10915  * failure.
10916  */
10917 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
10918
10919 {
10920         int size;
10921         struct vl_arb_cache *vlc;
10922
10923         switch (which) {
10924         case FM_TBL_VL_HIGH_ARB:
10925                 size = 256;
10926                 /*
10927                  * OPA specifies 128 elements (of 2 bytes each), though
10928                  * HFI supports only 16 elements in h/w.
10929                  */
10930                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10931                 vl_arb_get_cache(vlc, t);
10932                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10933                 break;
10934         case FM_TBL_VL_LOW_ARB:
10935                 size = 256;
10936                 /*
10937                  * OPA specifies 128 elements (of 2 bytes each), though
10938                  * HFI supports only 16 elements in h/w.
10939                  */
10940                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10941                 vl_arb_get_cache(vlc, t);
10942                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10943                 break;
10944         case FM_TBL_BUFFER_CONTROL:
10945                 size = get_buffer_control(ppd->dd, t, NULL);
10946                 break;
10947         case FM_TBL_SC2VLNT:
10948                 size = get_sc2vlnt(ppd->dd, t);
10949                 break;
10950         case FM_TBL_VL_PREEMPT_ELEMS:
10951                 size = 256;
10952                 /* OPA specifies 128 elements, of 2 bytes each */
10953                 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
10954                 break;
10955         case FM_TBL_VL_PREEMPT_MATRIX:
10956                 size = 256;
10957                 /*
10958                  * OPA specifies that this is the same size as the VL
10959                  * arbitration tables (i.e., 256 bytes).
10960                  */
10961                 break;
10962         default:
10963                 return -EINVAL;
10964         }
10965         return size;
10966 }
10967
10968 /*
10969  * Write the given fabric manager table.
10970  */
10971 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
10972 {
10973         int ret = 0;
10974         struct vl_arb_cache *vlc;
10975
10976         switch (which) {
10977         case FM_TBL_VL_HIGH_ARB:
10978                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
10979                 if (vl_arb_match_cache(vlc, t)) {
10980                         vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10981                         break;
10982                 }
10983                 vl_arb_set_cache(vlc, t);
10984                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
10985                 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
10986                                      VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
10987                 break;
10988         case FM_TBL_VL_LOW_ARB:
10989                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
10990                 if (vl_arb_match_cache(vlc, t)) {
10991                         vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10992                         break;
10993                 }
10994                 vl_arb_set_cache(vlc, t);
10995                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
10996                 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
10997                                      VL_ARB_LOW_PRIO_TABLE_SIZE, t);
10998                 break;
10999         case FM_TBL_BUFFER_CONTROL:
11000                 ret = set_buffer_control(ppd, t);
11001                 break;
11002         case FM_TBL_SC2VLNT:
11003                 set_sc2vlnt(ppd->dd, t);
11004                 break;
11005         default:
11006                 ret = -EINVAL;
11007         }
11008         return ret;
11009 }
11010
11011 /*
11012  * Disable all data VLs.
11013  *
11014  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11015  */
11016 static int disable_data_vls(struct hfi1_devdata *dd)
11017 {
11018         if (is_ax(dd))
11019                 return 1;
11020
11021         pio_send_control(dd, PSC_DATA_VL_DISABLE);
11022
11023         return 0;
11024 }
11025
11026 /*
11027  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11028  * Just re-enables all data VLs (the "fill" part happens
11029  * automatically - the name was chosen for symmetry with
11030  * stop_drain_data_vls()).
11031  *
11032  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11033  */
11034 int open_fill_data_vls(struct hfi1_devdata *dd)
11035 {
11036         if (is_ax(dd))
11037                 return 1;
11038
11039         pio_send_control(dd, PSC_DATA_VL_ENABLE);
11040
11041         return 0;
11042 }
11043
11044 /*
11045  * drain_data_vls() - assumes that disable_data_vls() has been called,
11046  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11047  * engines to drop to 0.
11048  */
11049 static void drain_data_vls(struct hfi1_devdata *dd)
11050 {
11051         sc_wait(dd);
11052         sdma_wait(dd);
11053         pause_for_credit_return(dd);
11054 }
11055
11056 /*
11057  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11058  *
11059  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11060  * meant to be used like this:
11061  *
11062  * stop_drain_data_vls(dd);
11063  * // do things with per-VL resources
11064  * open_fill_data_vls(dd);
11065  */
11066 int stop_drain_data_vls(struct hfi1_devdata *dd)
11067 {
11068         int ret;
11069
11070         ret = disable_data_vls(dd);
11071         if (ret == 0)
11072                 drain_data_vls(dd);
11073
11074         return ret;
11075 }
11076
11077 /*
11078  * Convert a nanosecond time to a cclock count.  No matter how slow
11079  * the cclock, a non-zero ns will always have a non-zero result.
11080  */
11081 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11082 {
11083         u32 cclocks;
11084
11085         if (dd->icode == ICODE_FPGA_EMULATION)
11086                 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11087         else  /* simulation pretends to be ASIC */
11088                 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11089         if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11090                 cclocks = 1;
11091         return cclocks;
11092 }
11093
11094 /*
11095  * Convert a cclock count to nanoseconds. Not matter how slow
11096  * the cclock, a non-zero cclocks will always have a non-zero result.
11097  */
11098 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11099 {
11100         u32 ns;
11101
11102         if (dd->icode == ICODE_FPGA_EMULATION)
11103                 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11104         else  /* simulation pretends to be ASIC */
11105                 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11106         if (cclocks && !ns)
11107                 ns = 1;
11108         return ns;
11109 }
11110
11111 /*
11112  * Dynamically adjust the receive interrupt timeout for a context based on
11113  * incoming packet rate.
11114  *
11115  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11116  */
11117 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11118 {
11119         struct hfi1_devdata *dd = rcd->dd;
11120         u32 timeout = rcd->rcvavail_timeout;
11121
11122         /*
11123          * This algorithm doubles or halves the timeout depending on whether
11124          * the number of packets received in this interrupt were less than or
11125          * greater equal the interrupt count.
11126          *
11127          * The calculations below do not allow a steady state to be achieved.
11128          * Only at the endpoints it is possible to have an unchanging
11129          * timeout.
11130          */
11131         if (npkts < rcv_intr_count) {
11132                 /*
11133                  * Not enough packets arrived before the timeout, adjust
11134                  * timeout downward.
11135                  */
11136                 if (timeout < 2) /* already at minimum? */
11137                         return;
11138                 timeout >>= 1;
11139         } else {
11140                 /*
11141                  * More than enough packets arrived before the timeout, adjust
11142                  * timeout upward.
11143                  */
11144                 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11145                         return;
11146                 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11147         }
11148
11149         rcd->rcvavail_timeout = timeout;
11150         /*
11151          * timeout cannot be larger than rcv_intr_timeout_csr which has already
11152          * been verified to be in range
11153          */
11154         write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11155                         (u64)timeout <<
11156                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11157 }
11158
11159 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11160                     u32 intr_adjust, u32 npkts)
11161 {
11162         struct hfi1_devdata *dd = rcd->dd;
11163         u64 reg;
11164         u32 ctxt = rcd->ctxt;
11165
11166         /*
11167          * Need to write timeout register before updating RcvHdrHead to ensure
11168          * that a new value is used when the HW decides to restart counting.
11169          */
11170         if (intr_adjust)
11171                 adjust_rcv_timeout(rcd, npkts);
11172         if (updegr) {
11173                 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11174                         << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11175                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11176         }
11177         mmiowb();
11178         reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11179                 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11180                         << RCV_HDR_HEAD_HEAD_SHIFT);
11181         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11182         mmiowb();
11183 }
11184
11185 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11186 {
11187         u32 head, tail;
11188
11189         head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11190                 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11191
11192         if (rcd->rcvhdrtail_kvaddr)
11193                 tail = get_rcvhdrtail(rcd);
11194         else
11195                 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11196
11197         return head == tail;
11198 }
11199
11200 /*
11201  * Context Control and Receive Array encoding for buffer size:
11202  *      0x0 invalid
11203  *      0x1   4 KB
11204  *      0x2   8 KB
11205  *      0x3  16 KB
11206  *      0x4  32 KB
11207  *      0x5  64 KB
11208  *      0x6 128 KB
11209  *      0x7 256 KB
11210  *      0x8 512 KB (Receive Array only)
11211  *      0x9   1 MB (Receive Array only)
11212  *      0xa   2 MB (Receive Array only)
11213  *
11214  *      0xB-0xF - reserved (Receive Array only)
11215  *
11216  *
11217  * This routine assumes that the value has already been sanity checked.
11218  */
11219 static u32 encoded_size(u32 size)
11220 {
11221         switch (size) {
11222         case   4 * 1024: return 0x1;
11223         case   8 * 1024: return 0x2;
11224         case  16 * 1024: return 0x3;
11225         case  32 * 1024: return 0x4;
11226         case  64 * 1024: return 0x5;
11227         case 128 * 1024: return 0x6;
11228         case 256 * 1024: return 0x7;
11229         case 512 * 1024: return 0x8;
11230         case   1 * 1024 * 1024: return 0x9;
11231         case   2 * 1024 * 1024: return 0xa;
11232         }
11233         return 0x1;     /* if invalid, go with the minimum size */
11234 }
11235
11236 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11237 {
11238         struct hfi1_ctxtdata *rcd;
11239         u64 rcvctrl, reg;
11240         int did_enable = 0;
11241
11242         rcd = dd->rcd[ctxt];
11243         if (!rcd)
11244                 return;
11245
11246         hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11247
11248         rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11249         /* if the context already enabled, don't do the extra steps */
11250         if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11251             !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11252                 /* reset the tail and hdr addresses, and sequence count */
11253                 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11254                                 rcd->rcvhdrq_phys);
11255                 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11256                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11257                                         rcd->rcvhdrqtailaddr_phys);
11258                 rcd->seq_cnt = 1;
11259
11260                 /* reset the cached receive header queue head value */
11261                 rcd->head = 0;
11262
11263                 /*
11264                  * Zero the receive header queue so we don't get false
11265                  * positives when checking the sequence number.  The
11266                  * sequence numbers could land exactly on the same spot.
11267                  * E.g. a rcd restart before the receive header wrapped.
11268                  */
11269                 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11270
11271                 /* starting timeout */
11272                 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11273
11274                 /* enable the context */
11275                 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11276
11277                 /* clean the egr buffer size first */
11278                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11279                 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11280                                 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11281                                         << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11282
11283                 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11284                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11285                 did_enable = 1;
11286
11287                 /* zero RcvEgrIndexHead */
11288                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11289
11290                 /* set eager count and base index */
11291                 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11292                         & RCV_EGR_CTRL_EGR_CNT_MASK)
11293                        << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11294                         (((rcd->eager_base >> RCV_SHIFT)
11295                           & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11296                          << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11297                 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11298
11299                 /*
11300                  * Set TID (expected) count and base index.
11301                  * rcd->expected_count is set to individual RcvArray entries,
11302                  * not pairs, and the CSR takes a pair-count in groups of
11303                  * four, so divide by 8.
11304                  */
11305                 reg = (((rcd->expected_count >> RCV_SHIFT)
11306                                         & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11307                                 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11308                       (((rcd->expected_base >> RCV_SHIFT)
11309                                         & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11310                                 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11311                 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11312                 if (ctxt == HFI1_CTRL_CTXT)
11313                         write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11314         }
11315         if (op & HFI1_RCVCTRL_CTXT_DIS) {
11316                 write_csr(dd, RCV_VL15, 0);
11317                 /*
11318                  * When receive context is being disabled turn on tail
11319                  * update with a dummy tail address and then disable
11320                  * receive context.
11321                  */
11322                 if (dd->rcvhdrtail_dummy_physaddr) {
11323                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11324                                         dd->rcvhdrtail_dummy_physaddr);
11325                         /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11326                         rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11327                 }
11328
11329                 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11330         }
11331         if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11332                 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11333         if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11334                 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11335         if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11336                 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11337         if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11338                 /* See comment on RcvCtxtCtrl.TailUpd above */
11339                 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11340                         rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11341         }
11342         if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11343                 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11344         if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11345                 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11346         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11347                 /*
11348                  * In one-packet-per-eager mode, the size comes from
11349                  * the RcvArray entry.
11350                  */
11351                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11352                 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11353         }
11354         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11355                 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11356         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11357                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11358         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11359                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11360         if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11361                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11362         if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11363                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11364         rcd->rcvctrl = rcvctrl;
11365         hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11366         write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11367
11368         /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11369         if (did_enable &&
11370             (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11371                 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11372                 if (reg != 0) {
11373                         dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11374                                     ctxt, reg);
11375                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11376                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11377                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11378                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11379                         reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11380                         dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11381                                     ctxt, reg, reg == 0 ? "not" : "still");
11382                 }
11383         }
11384
11385         if (did_enable) {
11386                 /*
11387                  * The interrupt timeout and count must be set after
11388                  * the context is enabled to take effect.
11389                  */
11390                 /* set interrupt timeout */
11391                 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11392                                 (u64)rcd->rcvavail_timeout <<
11393                                 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11394
11395                 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11396                 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11397                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11398         }
11399
11400         if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11401                 /*
11402                  * If the context has been disabled and the Tail Update has
11403                  * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11404                  * so it doesn't contain an address that is invalid.
11405                  */
11406                 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11407                                 dd->rcvhdrtail_dummy_physaddr);
11408 }
11409
11410 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11411 {
11412         int ret;
11413         u64 val = 0;
11414
11415         if (namep) {
11416                 ret = dd->cntrnameslen;
11417                 *namep = dd->cntrnames;
11418         } else {
11419                 const struct cntr_entry *entry;
11420                 int i, j;
11421
11422                 ret = (dd->ndevcntrs) * sizeof(u64);
11423
11424                 /* Get the start of the block of counters */
11425                 *cntrp = dd->cntrs;
11426
11427                 /*
11428                  * Now go and fill in each counter in the block.
11429                  */
11430                 for (i = 0; i < DEV_CNTR_LAST; i++) {
11431                         entry = &dev_cntrs[i];
11432                         hfi1_cdbg(CNTR, "reading %s", entry->name);
11433                         if (entry->flags & CNTR_DISABLED) {
11434                                 /* Nothing */
11435                                 hfi1_cdbg(CNTR, "\tDisabled\n");
11436                         } else {
11437                                 if (entry->flags & CNTR_VL) {
11438                                         hfi1_cdbg(CNTR, "\tPer VL\n");
11439                                         for (j = 0; j < C_VL_COUNT; j++) {
11440                                                 val = entry->rw_cntr(entry,
11441                                                                   dd, j,
11442                                                                   CNTR_MODE_R,
11443                                                                   0);
11444                                                 hfi1_cdbg(
11445                                                    CNTR,
11446                                                    "\t\tRead 0x%llx for %d\n",
11447                                                    val, j);
11448                                                 dd->cntrs[entry->offset + j] =
11449                                                                             val;
11450                                         }
11451                                 } else if (entry->flags & CNTR_SDMA) {
11452                                         hfi1_cdbg(CNTR,
11453                                                   "\t Per SDMA Engine\n");
11454                                         for (j = 0; j < dd->chip_sdma_engines;
11455                                              j++) {
11456                                                 val =
11457                                                 entry->rw_cntr(entry, dd, j,
11458                                                                CNTR_MODE_R, 0);
11459                                                 hfi1_cdbg(CNTR,
11460                                                           "\t\tRead 0x%llx for %d\n",
11461                                                           val, j);
11462                                                 dd->cntrs[entry->offset + j] =
11463                                                                         val;
11464                                         }
11465                                 } else {
11466                                         val = entry->rw_cntr(entry, dd,
11467                                                         CNTR_INVALID_VL,
11468                                                         CNTR_MODE_R, 0);
11469                                         dd->cntrs[entry->offset] = val;
11470                                         hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11471                                 }
11472                         }
11473                 }
11474         }
11475         return ret;
11476 }
11477
11478 /*
11479  * Used by sysfs to create files for hfi stats to read
11480  */
11481 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
11482 {
11483         int ret;
11484         u64 val = 0;
11485
11486         if (namep) {
11487                 ret = ppd->dd->portcntrnameslen;
11488                 *namep = ppd->dd->portcntrnames;
11489         } else {
11490                 const struct cntr_entry *entry;
11491                 int i, j;
11492
11493                 ret = ppd->dd->nportcntrs * sizeof(u64);
11494                 *cntrp = ppd->cntrs;
11495
11496                 for (i = 0; i < PORT_CNTR_LAST; i++) {
11497                         entry = &port_cntrs[i];
11498                         hfi1_cdbg(CNTR, "reading %s", entry->name);
11499                         if (entry->flags & CNTR_DISABLED) {
11500                                 /* Nothing */
11501                                 hfi1_cdbg(CNTR, "\tDisabled\n");
11502                                 continue;
11503                         }
11504
11505                         if (entry->flags & CNTR_VL) {
11506                                 hfi1_cdbg(CNTR, "\tPer VL");
11507                                 for (j = 0; j < C_VL_COUNT; j++) {
11508                                         val = entry->rw_cntr(entry, ppd, j,
11509                                                                CNTR_MODE_R,
11510                                                                0);
11511                                         hfi1_cdbg(
11512                                            CNTR,
11513                                            "\t\tRead 0x%llx for %d",
11514                                            val, j);
11515                                         ppd->cntrs[entry->offset + j] = val;
11516                                 }
11517                         } else {
11518                                 val = entry->rw_cntr(entry, ppd,
11519                                                        CNTR_INVALID_VL,
11520                                                        CNTR_MODE_R,
11521                                                        0);
11522                                 ppd->cntrs[entry->offset] = val;
11523                                 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11524                         }
11525                 }
11526         }
11527         return ret;
11528 }
11529
11530 static void free_cntrs(struct hfi1_devdata *dd)
11531 {
11532         struct hfi1_pportdata *ppd;
11533         int i;
11534
11535         if (dd->synth_stats_timer.data)
11536                 del_timer_sync(&dd->synth_stats_timer);
11537         dd->synth_stats_timer.data = 0;
11538         ppd = (struct hfi1_pportdata *)(dd + 1);
11539         for (i = 0; i < dd->num_pports; i++, ppd++) {
11540                 kfree(ppd->cntrs);
11541                 kfree(ppd->scntrs);
11542                 free_percpu(ppd->ibport_data.rvp.rc_acks);
11543                 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11544                 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
11545                 ppd->cntrs = NULL;
11546                 ppd->scntrs = NULL;
11547                 ppd->ibport_data.rvp.rc_acks = NULL;
11548                 ppd->ibport_data.rvp.rc_qacks = NULL;
11549                 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
11550         }
11551         kfree(dd->portcntrnames);
11552         dd->portcntrnames = NULL;
11553         kfree(dd->cntrs);
11554         dd->cntrs = NULL;
11555         kfree(dd->scntrs);
11556         dd->scntrs = NULL;
11557         kfree(dd->cntrnames);
11558         dd->cntrnames = NULL;
11559 }
11560
11561 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11562 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11563
11564 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11565                               u64 *psval, void *context, int vl)
11566 {
11567         u64 val;
11568         u64 sval = *psval;
11569
11570         if (entry->flags & CNTR_DISABLED) {
11571                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11572                 return 0;
11573         }
11574
11575         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11576
11577         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11578
11579         /* If its a synthetic counter there is more work we need to do */
11580         if (entry->flags & CNTR_SYNTH) {
11581                 if (sval == CNTR_MAX) {
11582                         /* No need to read already saturated */
11583                         return CNTR_MAX;
11584                 }
11585
11586                 if (entry->flags & CNTR_32BIT) {
11587                         /* 32bit counters can wrap multiple times */
11588                         u64 upper = sval >> 32;
11589                         u64 lower = (sval << 32) >> 32;
11590
11591                         if (lower > val) { /* hw wrapped */
11592                                 if (upper == CNTR_32BIT_MAX)
11593                                         val = CNTR_MAX;
11594                                 else
11595                                         upper++;
11596                         }
11597
11598                         if (val != CNTR_MAX)
11599                                 val = (upper << 32) | val;
11600
11601                 } else {
11602                         /* If we rolled we are saturated */
11603                         if ((val < sval) || (val > CNTR_MAX))
11604                                 val = CNTR_MAX;
11605                 }
11606         }
11607
11608         *psval = val;
11609
11610         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11611
11612         return val;
11613 }
11614
11615 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11616                                struct cntr_entry *entry,
11617                                u64 *psval, void *context, int vl, u64 data)
11618 {
11619         u64 val;
11620
11621         if (entry->flags & CNTR_DISABLED) {
11622                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11623                 return 0;
11624         }
11625
11626         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11627
11628         if (entry->flags & CNTR_SYNTH) {
11629                 *psval = data;
11630                 if (entry->flags & CNTR_32BIT) {
11631                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11632                                              (data << 32) >> 32);
11633                         val = data; /* return the full 64bit value */
11634                 } else {
11635                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11636                                              data);
11637                 }
11638         } else {
11639                 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11640         }
11641
11642         *psval = val;
11643
11644         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11645
11646         return val;
11647 }
11648
11649 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11650 {
11651         struct cntr_entry *entry;
11652         u64 *sval;
11653
11654         entry = &dev_cntrs[index];
11655         sval = dd->scntrs + entry->offset;
11656
11657         if (vl != CNTR_INVALID_VL)
11658                 sval += vl;
11659
11660         return read_dev_port_cntr(dd, entry, sval, dd, vl);
11661 }
11662
11663 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11664 {
11665         struct cntr_entry *entry;
11666         u64 *sval;
11667
11668         entry = &dev_cntrs[index];
11669         sval = dd->scntrs + entry->offset;
11670
11671         if (vl != CNTR_INVALID_VL)
11672                 sval += vl;
11673
11674         return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11675 }
11676
11677 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11678 {
11679         struct cntr_entry *entry;
11680         u64 *sval;
11681
11682         entry = &port_cntrs[index];
11683         sval = ppd->scntrs + entry->offset;
11684
11685         if (vl != CNTR_INVALID_VL)
11686                 sval += vl;
11687
11688         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11689             (index <= C_RCV_HDR_OVF_LAST)) {
11690                 /* We do not want to bother for disabled contexts */
11691                 return 0;
11692         }
11693
11694         return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11695 }
11696
11697 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11698 {
11699         struct cntr_entry *entry;
11700         u64 *sval;
11701
11702         entry = &port_cntrs[index];
11703         sval = ppd->scntrs + entry->offset;
11704
11705         if (vl != CNTR_INVALID_VL)
11706                 sval += vl;
11707
11708         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11709             (index <= C_RCV_HDR_OVF_LAST)) {
11710                 /* We do not want to bother for disabled contexts */
11711                 return 0;
11712         }
11713
11714         return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11715 }
11716
11717 static void update_synth_timer(unsigned long opaque)
11718 {
11719         u64 cur_tx;
11720         u64 cur_rx;
11721         u64 total_flits;
11722         u8 update = 0;
11723         int i, j, vl;
11724         struct hfi1_pportdata *ppd;
11725         struct cntr_entry *entry;
11726
11727         struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11728
11729         /*
11730          * Rather than keep beating on the CSRs pick a minimal set that we can
11731          * check to watch for potential roll over. We can do this by looking at
11732          * the number of flits sent/recv. If the total flits exceeds 32bits then
11733          * we have to iterate all the counters and update.
11734          */
11735         entry = &dev_cntrs[C_DC_RCV_FLITS];
11736         cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11737
11738         entry = &dev_cntrs[C_DC_XMIT_FLITS];
11739         cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11740
11741         hfi1_cdbg(
11742             CNTR,
11743             "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11744             dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11745
11746         if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11747                 /*
11748                  * May not be strictly necessary to update but it won't hurt and
11749                  * simplifies the logic here.
11750                  */
11751                 update = 1;
11752                 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11753                           dd->unit);
11754         } else {
11755                 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11756                 hfi1_cdbg(CNTR,
11757                           "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11758                           total_flits, (u64)CNTR_32BIT_MAX);
11759                 if (total_flits >= CNTR_32BIT_MAX) {
11760                         hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11761                                   dd->unit);
11762                         update = 1;
11763                 }
11764         }
11765
11766         if (update) {
11767                 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11768                 for (i = 0; i < DEV_CNTR_LAST; i++) {
11769                         entry = &dev_cntrs[i];
11770                         if (entry->flags & CNTR_VL) {
11771                                 for (vl = 0; vl < C_VL_COUNT; vl++)
11772                                         read_dev_cntr(dd, i, vl);
11773                         } else {
11774                                 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11775                         }
11776                 }
11777                 ppd = (struct hfi1_pportdata *)(dd + 1);
11778                 for (i = 0; i < dd->num_pports; i++, ppd++) {
11779                         for (j = 0; j < PORT_CNTR_LAST; j++) {
11780                                 entry = &port_cntrs[j];
11781                                 if (entry->flags & CNTR_VL) {
11782                                         for (vl = 0; vl < C_VL_COUNT; vl++)
11783                                                 read_port_cntr(ppd, j, vl);
11784                                 } else {
11785                                         read_port_cntr(ppd, j, CNTR_INVALID_VL);
11786                                 }
11787                         }
11788                 }
11789
11790                 /*
11791                  * We want the value in the register. The goal is to keep track
11792                  * of the number of "ticks" not the counter value. In other
11793                  * words if the register rolls we want to notice it and go ahead
11794                  * and force an update.
11795                  */
11796                 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11797                 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11798                                                 CNTR_MODE_R, 0);
11799
11800                 entry = &dev_cntrs[C_DC_RCV_FLITS];
11801                 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11802                                                 CNTR_MODE_R, 0);
11803
11804                 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11805                           dd->unit, dd->last_tx, dd->last_rx);
11806
11807         } else {
11808                 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11809         }
11810
11811 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11812 }
11813
11814 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
11815 static int init_cntrs(struct hfi1_devdata *dd)
11816 {
11817         int i, rcv_ctxts, j;
11818         size_t sz;
11819         char *p;
11820         char name[C_MAX_NAME];
11821         struct hfi1_pportdata *ppd;
11822         const char *bit_type_32 = ",32";
11823         const int bit_type_32_sz = strlen(bit_type_32);
11824
11825         /* set up the stats timer; the add_timer is done at the end */
11826         setup_timer(&dd->synth_stats_timer, update_synth_timer,
11827                     (unsigned long)dd);
11828
11829         /***********************/
11830         /* per device counters */
11831         /***********************/
11832
11833         /* size names and determine how many we have*/
11834         dd->ndevcntrs = 0;
11835         sz = 0;
11836
11837         for (i = 0; i < DEV_CNTR_LAST; i++) {
11838                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11839                         hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11840                         continue;
11841                 }
11842
11843                 if (dev_cntrs[i].flags & CNTR_VL) {
11844                         dev_cntrs[i].offset = dd->ndevcntrs;
11845                         for (j = 0; j < C_VL_COUNT; j++) {
11846                                 snprintf(name, C_MAX_NAME, "%s%d",
11847                                          dev_cntrs[i].name, vl_from_idx(j));
11848                                 sz += strlen(name);
11849                                 /* Add ",32" for 32-bit counters */
11850                                 if (dev_cntrs[i].flags & CNTR_32BIT)
11851                                         sz += bit_type_32_sz;
11852                                 sz++;
11853                                 dd->ndevcntrs++;
11854                         }
11855                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11856                         dev_cntrs[i].offset = dd->ndevcntrs;
11857                         for (j = 0; j < dd->chip_sdma_engines; j++) {
11858                                 snprintf(name, C_MAX_NAME, "%s%d",
11859                                          dev_cntrs[i].name, j);
11860                                 sz += strlen(name);
11861                                 /* Add ",32" for 32-bit counters */
11862                                 if (dev_cntrs[i].flags & CNTR_32BIT)
11863                                         sz += bit_type_32_sz;
11864                                 sz++;
11865                                 dd->ndevcntrs++;
11866                         }
11867                 } else {
11868                         /* +1 for newline. */
11869                         sz += strlen(dev_cntrs[i].name) + 1;
11870                         /* Add ",32" for 32-bit counters */
11871                         if (dev_cntrs[i].flags & CNTR_32BIT)
11872                                 sz += bit_type_32_sz;
11873                         dev_cntrs[i].offset = dd->ndevcntrs;
11874                         dd->ndevcntrs++;
11875                 }
11876         }
11877
11878         /* allocate space for the counter values */
11879         dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11880         if (!dd->cntrs)
11881                 goto bail;
11882
11883         dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11884         if (!dd->scntrs)
11885                 goto bail;
11886
11887         /* allocate space for the counter names */
11888         dd->cntrnameslen = sz;
11889         dd->cntrnames = kmalloc(sz, GFP_KERNEL);
11890         if (!dd->cntrnames)
11891                 goto bail;
11892
11893         /* fill in the names */
11894         for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
11895                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11896                         /* Nothing */
11897                 } else if (dev_cntrs[i].flags & CNTR_VL) {
11898                         for (j = 0; j < C_VL_COUNT; j++) {
11899                                 snprintf(name, C_MAX_NAME, "%s%d",
11900                                          dev_cntrs[i].name,
11901                                          vl_from_idx(j));
11902                                 memcpy(p, name, strlen(name));
11903                                 p += strlen(name);
11904
11905                                 /* Counter is 32 bits */
11906                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
11907                                         memcpy(p, bit_type_32, bit_type_32_sz);
11908                                         p += bit_type_32_sz;
11909                                 }
11910
11911                                 *p++ = '\n';
11912                         }
11913                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11914                         for (j = 0; j < dd->chip_sdma_engines; j++) {
11915                                 snprintf(name, C_MAX_NAME, "%s%d",
11916                                          dev_cntrs[i].name, j);
11917                                 memcpy(p, name, strlen(name));
11918                                 p += strlen(name);
11919
11920                                 /* Counter is 32 bits */
11921                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
11922                                         memcpy(p, bit_type_32, bit_type_32_sz);
11923                                         p += bit_type_32_sz;
11924                                 }
11925
11926                                 *p++ = '\n';
11927                         }
11928                 } else {
11929                         memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
11930                         p += strlen(dev_cntrs[i].name);
11931
11932                         /* Counter is 32 bits */
11933                         if (dev_cntrs[i].flags & CNTR_32BIT) {
11934                                 memcpy(p, bit_type_32, bit_type_32_sz);
11935                                 p += bit_type_32_sz;
11936                         }
11937
11938                         *p++ = '\n';
11939                 }
11940         }
11941
11942         /*********************/
11943         /* per port counters */
11944         /*********************/
11945
11946         /*
11947          * Go through the counters for the overflows and disable the ones we
11948          * don't need. This varies based on platform so we need to do it
11949          * dynamically here.
11950          */
11951         rcv_ctxts = dd->num_rcv_contexts;
11952         for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
11953              i <= C_RCV_HDR_OVF_LAST; i++) {
11954                 port_cntrs[i].flags |= CNTR_DISABLED;
11955         }
11956
11957         /* size port counter names and determine how many we have*/
11958         sz = 0;
11959         dd->nportcntrs = 0;
11960         for (i = 0; i < PORT_CNTR_LAST; i++) {
11961                 if (port_cntrs[i].flags & CNTR_DISABLED) {
11962                         hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
11963                         continue;
11964                 }
11965
11966                 if (port_cntrs[i].flags & CNTR_VL) {
11967                         port_cntrs[i].offset = dd->nportcntrs;
11968                         for (j = 0; j < C_VL_COUNT; j++) {
11969                                 snprintf(name, C_MAX_NAME, "%s%d",
11970                                          port_cntrs[i].name, vl_from_idx(j));
11971                                 sz += strlen(name);
11972                                 /* Add ",32" for 32-bit counters */
11973                                 if (port_cntrs[i].flags & CNTR_32BIT)
11974                                         sz += bit_type_32_sz;
11975                                 sz++;
11976                                 dd->nportcntrs++;
11977                         }
11978                 } else {
11979                         /* +1 for newline */
11980                         sz += strlen(port_cntrs[i].name) + 1;
11981                         /* Add ",32" for 32-bit counters */
11982                         if (port_cntrs[i].flags & CNTR_32BIT)
11983                                 sz += bit_type_32_sz;
11984                         port_cntrs[i].offset = dd->nportcntrs;
11985                         dd->nportcntrs++;
11986                 }
11987         }
11988
11989         /* allocate space for the counter names */
11990         dd->portcntrnameslen = sz;
11991         dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
11992         if (!dd->portcntrnames)
11993                 goto bail;
11994
11995         /* fill in port cntr names */
11996         for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
11997                 if (port_cntrs[i].flags & CNTR_DISABLED)
11998                         continue;
11999
12000                 if (port_cntrs[i].flags & CNTR_VL) {
12001                         for (j = 0; j < C_VL_COUNT; j++) {
12002                                 snprintf(name, C_MAX_NAME, "%s%d",
12003                                          port_cntrs[i].name, vl_from_idx(j));
12004                                 memcpy(p, name, strlen(name));
12005                                 p += strlen(name);
12006
12007                                 /* Counter is 32 bits */
12008                                 if (port_cntrs[i].flags & CNTR_32BIT) {
12009                                         memcpy(p, bit_type_32, bit_type_32_sz);
12010                                         p += bit_type_32_sz;
12011                                 }
12012
12013                                 *p++ = '\n';
12014                         }
12015                 } else {
12016                         memcpy(p, port_cntrs[i].name,
12017                                strlen(port_cntrs[i].name));
12018                         p += strlen(port_cntrs[i].name);
12019
12020                         /* Counter is 32 bits */
12021                         if (port_cntrs[i].flags & CNTR_32BIT) {
12022                                 memcpy(p, bit_type_32, bit_type_32_sz);
12023                                 p += bit_type_32_sz;
12024                         }
12025
12026                         *p++ = '\n';
12027                 }
12028         }
12029
12030         /* allocate per port storage for counter values */
12031         ppd = (struct hfi1_pportdata *)(dd + 1);
12032         for (i = 0; i < dd->num_pports; i++, ppd++) {
12033                 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12034                 if (!ppd->cntrs)
12035                         goto bail;
12036
12037                 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12038                 if (!ppd->scntrs)
12039                         goto bail;
12040         }
12041
12042         /* CPU counters need to be allocated and zeroed */
12043         if (init_cpu_counters(dd))
12044                 goto bail;
12045
12046         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12047         return 0;
12048 bail:
12049         free_cntrs(dd);
12050         return -ENOMEM;
12051 }
12052
12053 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12054 {
12055         switch (chip_lstate) {
12056         default:
12057                 dd_dev_err(dd,
12058                            "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12059                            chip_lstate);
12060                 /* fall through */
12061         case LSTATE_DOWN:
12062                 return IB_PORT_DOWN;
12063         case LSTATE_INIT:
12064                 return IB_PORT_INIT;
12065         case LSTATE_ARMED:
12066                 return IB_PORT_ARMED;
12067         case LSTATE_ACTIVE:
12068                 return IB_PORT_ACTIVE;
12069         }
12070 }
12071
12072 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12073 {
12074         /* look at the HFI meta-states only */
12075         switch (chip_pstate & 0xf0) {
12076         default:
12077                 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12078                            chip_pstate);
12079                 /* fall through */
12080         case PLS_DISABLED:
12081                 return IB_PORTPHYSSTATE_DISABLED;
12082         case PLS_OFFLINE:
12083                 return OPA_PORTPHYSSTATE_OFFLINE;
12084         case PLS_POLLING:
12085                 return IB_PORTPHYSSTATE_POLLING;
12086         case PLS_CONFIGPHY:
12087                 return IB_PORTPHYSSTATE_TRAINING;
12088         case PLS_LINKUP:
12089                 return IB_PORTPHYSSTATE_LINKUP;
12090         case PLS_PHYTEST:
12091                 return IB_PORTPHYSSTATE_PHY_TEST;
12092         }
12093 }
12094
12095 /* return the OPA port logical state name */
12096 const char *opa_lstate_name(u32 lstate)
12097 {
12098         static const char * const port_logical_names[] = {
12099                 "PORT_NOP",
12100                 "PORT_DOWN",
12101                 "PORT_INIT",
12102                 "PORT_ARMED",
12103                 "PORT_ACTIVE",
12104                 "PORT_ACTIVE_DEFER",
12105         };
12106         if (lstate < ARRAY_SIZE(port_logical_names))
12107                 return port_logical_names[lstate];
12108         return "unknown";
12109 }
12110
12111 /* return the OPA port physical state name */
12112 const char *opa_pstate_name(u32 pstate)
12113 {
12114         static const char * const port_physical_names[] = {
12115                 "PHYS_NOP",
12116                 "reserved1",
12117                 "PHYS_POLL",
12118                 "PHYS_DISABLED",
12119                 "PHYS_TRAINING",
12120                 "PHYS_LINKUP",
12121                 "PHYS_LINK_ERR_RECOVER",
12122                 "PHYS_PHY_TEST",
12123                 "reserved8",
12124                 "PHYS_OFFLINE",
12125                 "PHYS_GANGED",
12126                 "PHYS_TEST",
12127         };
12128         if (pstate < ARRAY_SIZE(port_physical_names))
12129                 return port_physical_names[pstate];
12130         return "unknown";
12131 }
12132
12133 /*
12134  * Read the hardware link state and set the driver's cached value of it.
12135  * Return the (new) current value.
12136  */
12137 u32 get_logical_state(struct hfi1_pportdata *ppd)
12138 {
12139         u32 new_state;
12140
12141         new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12142         if (new_state != ppd->lstate) {
12143                 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12144                             opa_lstate_name(new_state), new_state);
12145                 ppd->lstate = new_state;
12146         }
12147         /*
12148          * Set port status flags in the page mapped into userspace
12149          * memory. Do it here to ensure a reliable state - this is
12150          * the only function called by all state handling code.
12151          * Always set the flags due to the fact that the cache value
12152          * might have been changed explicitly outside of this
12153          * function.
12154          */
12155         if (ppd->statusp) {
12156                 switch (ppd->lstate) {
12157                 case IB_PORT_DOWN:
12158                 case IB_PORT_INIT:
12159                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12160                                            HFI1_STATUS_IB_READY);
12161                         break;
12162                 case IB_PORT_ARMED:
12163                         *ppd->statusp |= HFI1_STATUS_IB_CONF;
12164                         break;
12165                 case IB_PORT_ACTIVE:
12166                         *ppd->statusp |= HFI1_STATUS_IB_READY;
12167                         break;
12168                 }
12169         }
12170         return ppd->lstate;
12171 }
12172
12173 /**
12174  * wait_logical_linkstate - wait for an IB link state change to occur
12175  * @ppd: port device
12176  * @state: the state to wait for
12177  * @msecs: the number of milliseconds to wait
12178  *
12179  * Wait up to msecs milliseconds for IB link state change to occur.
12180  * For now, take the easy polling route.
12181  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12182  */
12183 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12184                                   int msecs)
12185 {
12186         unsigned long timeout;
12187
12188         timeout = jiffies + msecs_to_jiffies(msecs);
12189         while (1) {
12190                 if (get_logical_state(ppd) == state)
12191                         return 0;
12192                 if (time_after(jiffies, timeout))
12193                         break;
12194                 msleep(20);
12195         }
12196         dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12197
12198         return -ETIMEDOUT;
12199 }
12200
12201 u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12202 {
12203         u32 pstate;
12204         u32 ib_pstate;
12205
12206         pstate = read_physical_state(ppd->dd);
12207         ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12208         if (ppd->last_pstate != ib_pstate) {
12209                 dd_dev_info(ppd->dd,
12210                             "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12211                             __func__, opa_pstate_name(ib_pstate), ib_pstate,
12212                             pstate);
12213                 ppd->last_pstate = ib_pstate;
12214         }
12215         return ib_pstate;
12216 }
12217
12218 /*
12219  * Read/modify/write ASIC_QSFP register bits as selected by mask
12220  * data: 0 or 1 in the positions depending on what needs to be written
12221  * dir: 0 for read, 1 for write
12222  * mask: select by setting
12223  *      I2CCLK  (bit 0)
12224  *      I2CDATA (bit 1)
12225  */
12226 u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12227                   u32 mask)
12228 {
12229         u64 qsfp_oe, target_oe;
12230
12231         target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12232         if (mask) {
12233                 /* We are writing register bits, so lock access */
12234                 dir &= mask;
12235                 data &= mask;
12236
12237                 qsfp_oe = read_csr(dd, target_oe);
12238                 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12239                 write_csr(dd, target_oe, qsfp_oe);
12240         }
12241         /* We are exclusively reading bits here, but it is unlikely
12242          * we'll get valid data when we set the direction of the pin
12243          * in the same call, so read should call this function again
12244          * to get valid data
12245          */
12246         return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12247 }
12248
12249 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12250 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12251
12252 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12253 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12254
12255 int hfi1_init_ctxt(struct send_context *sc)
12256 {
12257         if (sc) {
12258                 struct hfi1_devdata *dd = sc->dd;
12259                 u64 reg;
12260                 u8 set = (sc->type == SC_USER ?
12261                           HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12262                           HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12263                 reg = read_kctxt_csr(dd, sc->hw_context,
12264                                      SEND_CTXT_CHECK_ENABLE);
12265                 if (set)
12266                         CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12267                 else
12268                         SET_STATIC_RATE_CONTROL_SMASK(reg);
12269                 write_kctxt_csr(dd, sc->hw_context,
12270                                 SEND_CTXT_CHECK_ENABLE, reg);
12271         }
12272         return 0;
12273 }
12274
12275 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12276 {
12277         int ret = 0;
12278         u64 reg;
12279
12280         if (dd->icode != ICODE_RTL_SILICON) {
12281                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12282                         dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12283                                     __func__);
12284                 return -EINVAL;
12285         }
12286         reg = read_csr(dd, ASIC_STS_THERM);
12287         temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12288                       ASIC_STS_THERM_CURR_TEMP_MASK);
12289         temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12290                         ASIC_STS_THERM_LO_TEMP_MASK);
12291         temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12292                         ASIC_STS_THERM_HI_TEMP_MASK);
12293         temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12294                           ASIC_STS_THERM_CRIT_TEMP_MASK);
12295         /* triggers is a 3-bit value - 1 bit per trigger. */
12296         temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12297
12298         return ret;
12299 }
12300
12301 /* ========================================================================= */
12302
12303 /*
12304  * Enable/disable chip from delivering interrupts.
12305  */
12306 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12307 {
12308         int i;
12309
12310         /*
12311          * In HFI, the mask needs to be 1 to allow interrupts.
12312          */
12313         if (enable) {
12314                 /* enable all interrupts */
12315                 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12316                         write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12317
12318                 init_qsfp_int(dd);
12319         } else {
12320                 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12321                         write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12322         }
12323 }
12324
12325 /*
12326  * Clear all interrupt sources on the chip.
12327  */
12328 static void clear_all_interrupts(struct hfi1_devdata *dd)
12329 {
12330         int i;
12331
12332         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12333                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12334
12335         write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12336         write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12337         write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12338         write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12339         write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12340         write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12341         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12342         for (i = 0; i < dd->chip_send_contexts; i++)
12343                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12344         for (i = 0; i < dd->chip_sdma_engines; i++)
12345                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12346
12347         write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12348         write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12349         write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12350 }
12351
12352 /* Move to pcie.c? */
12353 static void disable_intx(struct pci_dev *pdev)
12354 {
12355         pci_intx(pdev, 0);
12356 }
12357
12358 static void clean_up_interrupts(struct hfi1_devdata *dd)
12359 {
12360         int i;
12361
12362         /* remove irqs - must happen before disabling/turning off */
12363         if (dd->num_msix_entries) {
12364                 /* MSI-X */
12365                 struct hfi1_msix_entry *me = dd->msix_entries;
12366
12367                 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12368                         if (!me->arg) /* => no irq, no affinity */
12369                                 continue;
12370                         hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
12371                         free_irq(me->msix.vector, me->arg);
12372                 }
12373         } else {
12374                 /* INTx */
12375                 if (dd->requested_intx_irq) {
12376                         free_irq(dd->pcidev->irq, dd);
12377                         dd->requested_intx_irq = 0;
12378                 }
12379         }
12380
12381         /* turn off interrupts */
12382         if (dd->num_msix_entries) {
12383                 /* MSI-X */
12384                 pci_disable_msix(dd->pcidev);
12385         } else {
12386                 /* INTx */
12387                 disable_intx(dd->pcidev);
12388         }
12389
12390         /* clean structures */
12391         kfree(dd->msix_entries);
12392         dd->msix_entries = NULL;
12393         dd->num_msix_entries = 0;
12394 }
12395
12396 /*
12397  * Remap the interrupt source from the general handler to the given MSI-X
12398  * interrupt.
12399  */
12400 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12401 {
12402         u64 reg;
12403         int m, n;
12404
12405         /* clear from the handled mask of the general interrupt */
12406         m = isrc / 64;
12407         n = isrc % 64;
12408         dd->gi_mask[m] &= ~((u64)1 << n);
12409
12410         /* direct the chip source to the given MSI-X interrupt */
12411         m = isrc / 8;
12412         n = isrc % 8;
12413         reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12414         reg &= ~((u64)0xff << (8 * n));
12415         reg |= ((u64)msix_intr & 0xff) << (8 * n);
12416         write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12417 }
12418
12419 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12420                                   int engine, int msix_intr)
12421 {
12422         /*
12423          * SDMA engine interrupt sources grouped by type, rather than
12424          * engine.  Per-engine interrupts are as follows:
12425          *      SDMA
12426          *      SDMAProgress
12427          *      SDMAIdle
12428          */
12429         remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12430                    msix_intr);
12431         remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12432                    msix_intr);
12433         remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12434                    msix_intr);
12435 }
12436
12437 static int request_intx_irq(struct hfi1_devdata *dd)
12438 {
12439         int ret;
12440
12441         snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12442                  dd->unit);
12443         ret = request_irq(dd->pcidev->irq, general_interrupt,
12444                           IRQF_SHARED, dd->intx_name, dd);
12445         if (ret)
12446                 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12447                            ret);
12448         else
12449                 dd->requested_intx_irq = 1;
12450         return ret;
12451 }
12452
12453 static int request_msix_irqs(struct hfi1_devdata *dd)
12454 {
12455         int first_general, last_general;
12456         int first_sdma, last_sdma;
12457         int first_rx, last_rx;
12458         int i, ret = 0;
12459
12460         /* calculate the ranges we are going to use */
12461         first_general = 0;
12462         last_general = first_general + 1;
12463         first_sdma = last_general;
12464         last_sdma = first_sdma + dd->num_sdma;
12465         first_rx = last_sdma;
12466         last_rx = first_rx + dd->n_krcv_queues;
12467
12468         /*
12469          * Sanity check - the code expects all SDMA chip source
12470          * interrupts to be in the same CSR, starting at bit 0.  Verify
12471          * that this is true by checking the bit location of the start.
12472          */
12473         BUILD_BUG_ON(IS_SDMA_START % 64);
12474
12475         for (i = 0; i < dd->num_msix_entries; i++) {
12476                 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12477                 const char *err_info;
12478                 irq_handler_t handler;
12479                 irq_handler_t thread = NULL;
12480                 void *arg;
12481                 int idx;
12482                 struct hfi1_ctxtdata *rcd = NULL;
12483                 struct sdma_engine *sde = NULL;
12484
12485                 /* obtain the arguments to request_irq */
12486                 if (first_general <= i && i < last_general) {
12487                         idx = i - first_general;
12488                         handler = general_interrupt;
12489                         arg = dd;
12490                         snprintf(me->name, sizeof(me->name),
12491                                  DRIVER_NAME "_%d", dd->unit);
12492                         err_info = "general";
12493                         me->type = IRQ_GENERAL;
12494                 } else if (first_sdma <= i && i < last_sdma) {
12495                         idx = i - first_sdma;
12496                         sde = &dd->per_sdma[idx];
12497                         handler = sdma_interrupt;
12498                         arg = sde;
12499                         snprintf(me->name, sizeof(me->name),
12500                                  DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12501                         err_info = "sdma";
12502                         remap_sdma_interrupts(dd, idx, i);
12503                         me->type = IRQ_SDMA;
12504                 } else if (first_rx <= i && i < last_rx) {
12505                         idx = i - first_rx;
12506                         rcd = dd->rcd[idx];
12507                         /* no interrupt if no rcd */
12508                         if (!rcd)
12509                                 continue;
12510                         /*
12511                          * Set the interrupt register and mask for this
12512                          * context's interrupt.
12513                          */
12514                         rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12515                         rcd->imask = ((u64)1) <<
12516                                         ((IS_RCVAVAIL_START + idx) % 64);
12517                         handler = receive_context_interrupt;
12518                         thread = receive_context_thread;
12519                         arg = rcd;
12520                         snprintf(me->name, sizeof(me->name),
12521                                  DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12522                         err_info = "receive context";
12523                         remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12524                         me->type = IRQ_RCVCTXT;
12525                 } else {
12526                         /* not in our expected range - complain, then
12527                          * ignore it
12528                          */
12529                         dd_dev_err(dd,
12530                                    "Unexpected extra MSI-X interrupt %d\n", i);
12531                         continue;
12532                 }
12533                 /* no argument, no interrupt */
12534                 if (!arg)
12535                         continue;
12536                 /* make sure the name is terminated */
12537                 me->name[sizeof(me->name) - 1] = 0;
12538
12539                 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12540                                            me->name, arg);
12541                 if (ret) {
12542                         dd_dev_err(dd,
12543                                    "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12544                                    err_info, me->msix.vector, idx, ret);
12545                         return ret;
12546                 }
12547                 /*
12548                  * assign arg after request_irq call, so it will be
12549                  * cleaned up
12550                  */
12551                 me->arg = arg;
12552
12553                 ret = hfi1_get_irq_affinity(dd, me);
12554                 if (ret)
12555                         dd_dev_err(dd,
12556                                    "unable to pin IRQ %d\n", ret);
12557         }
12558
12559         return ret;
12560 }
12561
12562 /*
12563  * Set the general handler to accept all interrupts, remap all
12564  * chip interrupts back to MSI-X 0.
12565  */
12566 static void reset_interrupts(struct hfi1_devdata *dd)
12567 {
12568         int i;
12569
12570         /* all interrupts handled by the general handler */
12571         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12572                 dd->gi_mask[i] = ~(u64)0;
12573
12574         /* all chip interrupts map to MSI-X 0 */
12575         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12576                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12577 }
12578
12579 static int set_up_interrupts(struct hfi1_devdata *dd)
12580 {
12581         struct hfi1_msix_entry *entries;
12582         u32 total, request;
12583         int i, ret;
12584         int single_interrupt = 0; /* we expect to have all the interrupts */
12585
12586         /*
12587          * Interrupt count:
12588          *      1 general, "slow path" interrupt (includes the SDMA engines
12589          *              slow source, SDMACleanupDone)
12590          *      N interrupts - one per used SDMA engine
12591          *      M interrupt - one per kernel receive context
12592          */
12593         total = 1 + dd->num_sdma + dd->n_krcv_queues;
12594
12595         entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12596         if (!entries) {
12597                 ret = -ENOMEM;
12598                 goto fail;
12599         }
12600         /* 1-1 MSI-X entry assignment */
12601         for (i = 0; i < total; i++)
12602                 entries[i].msix.entry = i;
12603
12604         /* ask for MSI-X interrupts */
12605         request = total;
12606         request_msix(dd, &request, entries);
12607
12608         if (request == 0) {
12609                 /* using INTx */
12610                 /* dd->num_msix_entries already zero */
12611                 kfree(entries);
12612                 single_interrupt = 1;
12613                 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12614         } else {
12615                 /* using MSI-X */
12616                 dd->num_msix_entries = request;
12617                 dd->msix_entries = entries;
12618
12619                 if (request != total) {
12620                         /* using MSI-X, with reduced interrupts */
12621                         dd_dev_err(
12622                                 dd,
12623                                 "cannot handle reduced interrupt case, want %u, got %u\n",
12624                                 total, request);
12625                         ret = -EINVAL;
12626                         goto fail;
12627                 }
12628                 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12629         }
12630
12631         /* mask all interrupts */
12632         set_intr_state(dd, 0);
12633         /* clear all pending interrupts */
12634         clear_all_interrupts(dd);
12635
12636         /* reset general handler mask, chip MSI-X mappings */
12637         reset_interrupts(dd);
12638
12639         if (single_interrupt)
12640                 ret = request_intx_irq(dd);
12641         else
12642                 ret = request_msix_irqs(dd);
12643         if (ret)
12644                 goto fail;
12645
12646         return 0;
12647
12648 fail:
12649         clean_up_interrupts(dd);
12650         return ret;
12651 }
12652
12653 /*
12654  * Set up context values in dd.  Sets:
12655  *
12656  *      num_rcv_contexts - number of contexts being used
12657  *      n_krcv_queues - number of kernel contexts
12658  *      first_user_ctxt - first non-kernel context in array of contexts
12659  *      freectxts  - number of free user contexts
12660  *      num_send_contexts - number of PIO send contexts being used
12661  */
12662 static int set_up_context_variables(struct hfi1_devdata *dd)
12663 {
12664         int num_kernel_contexts;
12665         int total_contexts;
12666         int ret;
12667         unsigned ngroups;
12668
12669         /*
12670          * Kernel contexts: (to be fixed later):
12671          * - min or 2 or 1 context/numa
12672          * - Context 0 - control context (VL15/multicast/error)
12673          * - Context 1 - default context
12674          */
12675         if (n_krcvqs)
12676                 /*
12677                  * Don't count context 0 in n_krcvqs since
12678                  * is isn't used for normal verbs traffic.
12679                  *
12680                  * krcvqs will reflect number of kernel
12681                  * receive contexts above 0.
12682                  */
12683                 num_kernel_contexts = n_krcvqs + MIN_KERNEL_KCTXTS - 1;
12684         else
12685                 num_kernel_contexts = num_online_nodes() + 1;
12686         num_kernel_contexts =
12687                 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12688         /*
12689          * Every kernel receive context needs an ACK send context.
12690          * one send context is allocated for each VL{0-7} and VL15
12691          */
12692         if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12693                 dd_dev_err(dd,
12694                            "Reducing # kernel rcv contexts to: %d, from %d\n",
12695                            (int)(dd->chip_send_contexts - num_vls - 1),
12696                            (int)num_kernel_contexts);
12697                 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12698         }
12699         /*
12700          * User contexts: (to be fixed later)
12701          *      - default to 1 user context per CPU if num_user_contexts is
12702          *        negative
12703          */
12704         if (num_user_contexts < 0)
12705                 num_user_contexts = num_online_cpus();
12706
12707         total_contexts = num_kernel_contexts + num_user_contexts;
12708
12709         /*
12710          * Adjust the counts given a global max.
12711          */
12712         if (total_contexts > dd->chip_rcv_contexts) {
12713                 dd_dev_err(dd,
12714                            "Reducing # user receive contexts to: %d, from %d\n",
12715                            (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12716                            (int)num_user_contexts);
12717                 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12718                 /* recalculate */
12719                 total_contexts = num_kernel_contexts + num_user_contexts;
12720         }
12721
12722         /* the first N are kernel contexts, the rest are user contexts */
12723         dd->num_rcv_contexts = total_contexts;
12724         dd->n_krcv_queues = num_kernel_contexts;
12725         dd->first_user_ctxt = num_kernel_contexts;
12726         dd->num_user_contexts = num_user_contexts;
12727         dd->freectxts = num_user_contexts;
12728         dd_dev_info(dd,
12729                     "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12730                     (int)dd->chip_rcv_contexts,
12731                     (int)dd->num_rcv_contexts,
12732                     (int)dd->n_krcv_queues,
12733                     (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12734
12735         /*
12736          * Receive array allocation:
12737          *   All RcvArray entries are divided into groups of 8. This
12738          *   is required by the hardware and will speed up writes to
12739          *   consecutive entries by using write-combining of the entire
12740          *   cacheline.
12741          *
12742          *   The number of groups are evenly divided among all contexts.
12743          *   any left over groups will be given to the first N user
12744          *   contexts.
12745          */
12746         dd->rcv_entries.group_size = RCV_INCREMENT;
12747         ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12748         dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12749         dd->rcv_entries.nctxt_extra = ngroups -
12750                 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12751         dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12752                     dd->rcv_entries.ngroups,
12753                     dd->rcv_entries.nctxt_extra);
12754         if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12755             MAX_EAGER_ENTRIES * 2) {
12756                 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12757                         dd->rcv_entries.group_size;
12758                 dd_dev_info(dd,
12759                             "RcvArray group count too high, change to %u\n",
12760                             dd->rcv_entries.ngroups);
12761                 dd->rcv_entries.nctxt_extra = 0;
12762         }
12763         /*
12764          * PIO send contexts
12765          */
12766         ret = init_sc_pools_and_sizes(dd);
12767         if (ret >= 0) { /* success */
12768                 dd->num_send_contexts = ret;
12769                 dd_dev_info(
12770                         dd,
12771                         "send contexts: chip %d, used %d (kernel %d, ack %d, user %d)\n",
12772                         dd->chip_send_contexts,
12773                         dd->num_send_contexts,
12774                         dd->sc_sizes[SC_KERNEL].count,
12775                         dd->sc_sizes[SC_ACK].count,
12776                         dd->sc_sizes[SC_USER].count);
12777                 ret = 0;        /* success */
12778         }
12779
12780         return ret;
12781 }
12782
12783 /*
12784  * Set the device/port partition key table. The MAD code
12785  * will ensure that, at least, the partial management
12786  * partition key is present in the table.
12787  */
12788 static void set_partition_keys(struct hfi1_pportdata *ppd)
12789 {
12790         struct hfi1_devdata *dd = ppd->dd;
12791         u64 reg = 0;
12792         int i;
12793
12794         dd_dev_info(dd, "Setting partition keys\n");
12795         for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12796                 reg |= (ppd->pkeys[i] &
12797                         RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12798                         ((i % 4) *
12799                          RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12800                 /* Each register holds 4 PKey values. */
12801                 if ((i % 4) == 3) {
12802                         write_csr(dd, RCV_PARTITION_KEY +
12803                                   ((i - 3) * 2), reg);
12804                         reg = 0;
12805                 }
12806         }
12807
12808         /* Always enable HW pkeys check when pkeys table is set */
12809         add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12810 }
12811
12812 /*
12813  * These CSRs and memories are uninitialized on reset and must be
12814  * written before reading to set the ECC/parity bits.
12815  *
12816  * NOTE: All user context CSRs that are not mmaped write-only
12817  * (e.g. the TID flows) must be initialized even if the driver never
12818  * reads them.
12819  */
12820 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12821 {
12822         int i, j;
12823
12824         /* CceIntMap */
12825         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12826                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12827
12828         /* SendCtxtCreditReturnAddr */
12829         for (i = 0; i < dd->chip_send_contexts; i++)
12830                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12831
12832         /* PIO Send buffers */
12833         /* SDMA Send buffers */
12834         /*
12835          * These are not normally read, and (presently) have no method
12836          * to be read, so are not pre-initialized
12837          */
12838
12839         /* RcvHdrAddr */
12840         /* RcvHdrTailAddr */
12841         /* RcvTidFlowTable */
12842         for (i = 0; i < dd->chip_rcv_contexts; i++) {
12843                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12844                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12845                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12846                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
12847         }
12848
12849         /* RcvArray */
12850         for (i = 0; i < dd->chip_rcv_array_count; i++)
12851                 write_csr(dd, RCV_ARRAY + (8 * i),
12852                           RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12853
12854         /* RcvQPMapTable */
12855         for (i = 0; i < 32; i++)
12856                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12857 }
12858
12859 /*
12860  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12861  */
12862 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12863                              u64 ctrl_bits)
12864 {
12865         unsigned long timeout;
12866         u64 reg;
12867
12868         /* is the condition present? */
12869         reg = read_csr(dd, CCE_STATUS);
12870         if ((reg & status_bits) == 0)
12871                 return;
12872
12873         /* clear the condition */
12874         write_csr(dd, CCE_CTRL, ctrl_bits);
12875
12876         /* wait for the condition to clear */
12877         timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
12878         while (1) {
12879                 reg = read_csr(dd, CCE_STATUS);
12880                 if ((reg & status_bits) == 0)
12881                         return;
12882                 if (time_after(jiffies, timeout)) {
12883                         dd_dev_err(dd,
12884                                    "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
12885                                    status_bits, reg & status_bits);
12886                         return;
12887                 }
12888                 udelay(1);
12889         }
12890 }
12891
12892 /* set CCE CSRs to chip reset defaults */
12893 static void reset_cce_csrs(struct hfi1_devdata *dd)
12894 {
12895         int i;
12896
12897         /* CCE_REVISION read-only */
12898         /* CCE_REVISION2 read-only */
12899         /* CCE_CTRL - bits clear automatically */
12900         /* CCE_STATUS read-only, use CceCtrl to clear */
12901         clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
12902         clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
12903         clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
12904         for (i = 0; i < CCE_NUM_SCRATCH; i++)
12905                 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
12906         /* CCE_ERR_STATUS read-only */
12907         write_csr(dd, CCE_ERR_MASK, 0);
12908         write_csr(dd, CCE_ERR_CLEAR, ~0ull);
12909         /* CCE_ERR_FORCE leave alone */
12910         for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
12911                 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
12912         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
12913         /* CCE_PCIE_CTRL leave alone */
12914         for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
12915                 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
12916                 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
12917                           CCE_MSIX_TABLE_UPPER_RESETCSR);
12918         }
12919         for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
12920                 /* CCE_MSIX_PBA read-only */
12921                 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
12922                 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
12923         }
12924         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12925                 write_csr(dd, CCE_INT_MAP, 0);
12926         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
12927                 /* CCE_INT_STATUS read-only */
12928                 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
12929                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
12930                 /* CCE_INT_FORCE leave alone */
12931                 /* CCE_INT_BLOCKED read-only */
12932         }
12933         for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
12934                 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
12935 }
12936
12937 /* set ASIC CSRs to chip reset defaults */
12938 static void reset_asic_csrs(struct hfi1_devdata *dd)
12939 {
12940         int i;
12941
12942         /*
12943          * If the HFIs are shared between separate nodes or VMs,
12944          * then more will need to be done here.  One idea is a module
12945          * parameter that returns early, letting the first power-on or
12946          * a known first load do the reset and blocking all others.
12947          */
12948
12949         if (!(dd->flags & HFI1_DO_INIT_ASIC))
12950                 return;
12951
12952         if (dd->icode != ICODE_FPGA_EMULATION) {
12953                 /* emulation does not have an SBus - leave these alone */
12954                 /*
12955                  * All writes to ASIC_CFG_SBUS_REQUEST do something.
12956                  * Notes:
12957                  * o The reset is not zero if aimed at the core.  See the
12958                  *   SBus documentation for details.
12959                  * o If the SBus firmware has been updated (e.g. by the BIOS),
12960                  *   will the reset revert that?
12961                  */
12962                 /* ASIC_CFG_SBUS_REQUEST leave alone */
12963                 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
12964         }
12965         /* ASIC_SBUS_RESULT read-only */
12966         write_csr(dd, ASIC_STS_SBUS_COUNTERS, 0);
12967         for (i = 0; i < ASIC_NUM_SCRATCH; i++)
12968                 write_csr(dd, ASIC_CFG_SCRATCH + (8 * i), 0);
12969         write_csr(dd, ASIC_CFG_MUTEX, 0);       /* this will clear it */
12970
12971         /* We might want to retain this state across FLR if we ever use it */
12972         write_csr(dd, ASIC_CFG_DRV_STR, 0);
12973
12974         /* ASIC_CFG_THERM_POLL_EN leave alone */
12975         /* ASIC_STS_THERM read-only */
12976         /* ASIC_CFG_RESET leave alone */
12977
12978         write_csr(dd, ASIC_PCIE_SD_HOST_CMD, 0);
12979         /* ASIC_PCIE_SD_HOST_STATUS read-only */
12980         write_csr(dd, ASIC_PCIE_SD_INTRPT_DATA_CODE, 0);
12981         write_csr(dd, ASIC_PCIE_SD_INTRPT_ENABLE, 0);
12982         /* ASIC_PCIE_SD_INTRPT_PROGRESS read-only */
12983         write_csr(dd, ASIC_PCIE_SD_INTRPT_STATUS, ~0ull); /* clear */
12984         /* ASIC_HFI0_PCIE_SD_INTRPT_RSPD_DATA read-only */
12985         /* ASIC_HFI1_PCIE_SD_INTRPT_RSPD_DATA read-only */
12986         for (i = 0; i < 16; i++)
12987                 write_csr(dd, ASIC_PCIE_SD_INTRPT_LIST + (8 * i), 0);
12988
12989         /* ASIC_GPIO_IN read-only */
12990         write_csr(dd, ASIC_GPIO_OE, 0);
12991         write_csr(dd, ASIC_GPIO_INVERT, 0);
12992         write_csr(dd, ASIC_GPIO_OUT, 0);
12993         write_csr(dd, ASIC_GPIO_MASK, 0);
12994         /* ASIC_GPIO_STATUS read-only */
12995         write_csr(dd, ASIC_GPIO_CLEAR, ~0ull);
12996         /* ASIC_GPIO_FORCE leave alone */
12997
12998         /* ASIC_QSFP1_IN read-only */
12999         write_csr(dd, ASIC_QSFP1_OE, 0);
13000         write_csr(dd, ASIC_QSFP1_INVERT, 0);
13001         write_csr(dd, ASIC_QSFP1_OUT, 0);
13002         write_csr(dd, ASIC_QSFP1_MASK, 0);
13003         /* ASIC_QSFP1_STATUS read-only */
13004         write_csr(dd, ASIC_QSFP1_CLEAR, ~0ull);
13005         /* ASIC_QSFP1_FORCE leave alone */
13006
13007         /* ASIC_QSFP2_IN read-only */
13008         write_csr(dd, ASIC_QSFP2_OE, 0);
13009         write_csr(dd, ASIC_QSFP2_INVERT, 0);
13010         write_csr(dd, ASIC_QSFP2_OUT, 0);
13011         write_csr(dd, ASIC_QSFP2_MASK, 0);
13012         /* ASIC_QSFP2_STATUS read-only */
13013         write_csr(dd, ASIC_QSFP2_CLEAR, ~0ull);
13014         /* ASIC_QSFP2_FORCE leave alone */
13015
13016         write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_RESETCSR);
13017         /* this also writes a NOP command, clearing paging mode */
13018         write_csr(dd, ASIC_EEP_ADDR_CMD, 0);
13019         write_csr(dd, ASIC_EEP_DATA, 0);
13020 }
13021
13022 /* set MISC CSRs to chip reset defaults */
13023 static void reset_misc_csrs(struct hfi1_devdata *dd)
13024 {
13025         int i;
13026
13027         for (i = 0; i < 32; i++) {
13028                 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13029                 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13030                 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13031         }
13032         /*
13033          * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13034          * only be written 128-byte chunks
13035          */
13036         /* init RSA engine to clear lingering errors */
13037         write_csr(dd, MISC_CFG_RSA_CMD, 1);
13038         write_csr(dd, MISC_CFG_RSA_MU, 0);
13039         write_csr(dd, MISC_CFG_FW_CTRL, 0);
13040         /* MISC_STS_8051_DIGEST read-only */
13041         /* MISC_STS_SBM_DIGEST read-only */
13042         /* MISC_STS_PCIE_DIGEST read-only */
13043         /* MISC_STS_FAB_DIGEST read-only */
13044         /* MISC_ERR_STATUS read-only */
13045         write_csr(dd, MISC_ERR_MASK, 0);
13046         write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13047         /* MISC_ERR_FORCE leave alone */
13048 }
13049
13050 /* set TXE CSRs to chip reset defaults */
13051 static void reset_txe_csrs(struct hfi1_devdata *dd)
13052 {
13053         int i;
13054
13055         /*
13056          * TXE Kernel CSRs
13057          */
13058         write_csr(dd, SEND_CTRL, 0);
13059         __cm_reset(dd, 0);      /* reset CM internal state */
13060         /* SEND_CONTEXTS read-only */
13061         /* SEND_DMA_ENGINES read-only */
13062         /* SEND_PIO_MEM_SIZE read-only */
13063         /* SEND_DMA_MEM_SIZE read-only */
13064         write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13065         pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13066         /* SEND_PIO_ERR_STATUS read-only */
13067         write_csr(dd, SEND_PIO_ERR_MASK, 0);
13068         write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13069         /* SEND_PIO_ERR_FORCE leave alone */
13070         /* SEND_DMA_ERR_STATUS read-only */
13071         write_csr(dd, SEND_DMA_ERR_MASK, 0);
13072         write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13073         /* SEND_DMA_ERR_FORCE leave alone */
13074         /* SEND_EGRESS_ERR_STATUS read-only */
13075         write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13076         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13077         /* SEND_EGRESS_ERR_FORCE leave alone */
13078         write_csr(dd, SEND_BTH_QP, 0);
13079         write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13080         write_csr(dd, SEND_SC2VLT0, 0);
13081         write_csr(dd, SEND_SC2VLT1, 0);
13082         write_csr(dd, SEND_SC2VLT2, 0);
13083         write_csr(dd, SEND_SC2VLT3, 0);
13084         write_csr(dd, SEND_LEN_CHECK0, 0);
13085         write_csr(dd, SEND_LEN_CHECK1, 0);
13086         /* SEND_ERR_STATUS read-only */
13087         write_csr(dd, SEND_ERR_MASK, 0);
13088         write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13089         /* SEND_ERR_FORCE read-only */
13090         for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13091                 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13092         for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13093                 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13094         for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13095                 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13096         for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13097                 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13098         for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13099                 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13100         write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13101         write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13102         /* SEND_CM_CREDIT_USED_STATUS read-only */
13103         write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13104         write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13105         write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13106         write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13107         write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13108         for (i = 0; i < TXE_NUM_DATA_VL; i++)
13109                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13110         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13111         /* SEND_CM_CREDIT_USED_VL read-only */
13112         /* SEND_CM_CREDIT_USED_VL15 read-only */
13113         /* SEND_EGRESS_CTXT_STATUS read-only */
13114         /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13115         write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13116         /* SEND_EGRESS_ERR_INFO read-only */
13117         /* SEND_EGRESS_ERR_SOURCE read-only */
13118
13119         /*
13120          * TXE Per-Context CSRs
13121          */
13122         for (i = 0; i < dd->chip_send_contexts; i++) {
13123                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13124                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13125                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13126                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13127                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13128                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13129                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13130                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13131                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13132                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13133                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13134                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13135         }
13136
13137         /*
13138          * TXE Per-SDMA CSRs
13139          */
13140         for (i = 0; i < dd->chip_sdma_engines; i++) {
13141                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13142                 /* SEND_DMA_STATUS read-only */
13143                 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13144                 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13145                 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13146                 /* SEND_DMA_HEAD read-only */
13147                 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13148                 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13149                 /* SEND_DMA_IDLE_CNT read-only */
13150                 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13151                 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13152                 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13153                 /* SEND_DMA_ENG_ERR_STATUS read-only */
13154                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13155                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13156                 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13157                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13158                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13159                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13160                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13161                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13162                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13163                 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13164         }
13165 }
13166
13167 /*
13168  * Expect on entry:
13169  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13170  */
13171 static void init_rbufs(struct hfi1_devdata *dd)
13172 {
13173         u64 reg;
13174         int count;
13175
13176         /*
13177          * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13178          * clear.
13179          */
13180         count = 0;
13181         while (1) {
13182                 reg = read_csr(dd, RCV_STATUS);
13183                 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13184                             | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13185                         break;
13186                 /*
13187                  * Give up after 1ms - maximum wait time.
13188                  *
13189                  * RBuf size is 148KiB.  Slowest possible is PCIe Gen1 x1 at
13190                  * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13191                  *      148 KB / (66% * 250MB/s) = 920us
13192                  */
13193                 if (count++ > 500) {
13194                         dd_dev_err(dd,
13195                                    "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13196                                    __func__, reg);
13197                         break;
13198                 }
13199                 udelay(2); /* do not busy-wait the CSR */
13200         }
13201
13202         /* start the init - expect RcvCtrl to be 0 */
13203         write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13204
13205         /*
13206          * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13207          * period after the write before RcvStatus.RxRbufInitDone is valid.
13208          * The delay in the first run through the loop below is sufficient and
13209          * required before the first read of RcvStatus.RxRbufInintDone.
13210          */
13211         read_csr(dd, RCV_CTRL);
13212
13213         /* wait for the init to finish */
13214         count = 0;
13215         while (1) {
13216                 /* delay is required first time through - see above */
13217                 udelay(2); /* do not busy-wait the CSR */
13218                 reg = read_csr(dd, RCV_STATUS);
13219                 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13220                         break;
13221
13222                 /* give up after 100us - slowest possible at 33MHz is 73us */
13223                 if (count++ > 50) {
13224                         dd_dev_err(dd,
13225                                    "%s: RcvStatus.RxRbufInit not set, continuing\n",
13226                                    __func__);
13227                         break;
13228                 }
13229         }
13230 }
13231
13232 /* set RXE CSRs to chip reset defaults */
13233 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13234 {
13235         int i, j;
13236
13237         /*
13238          * RXE Kernel CSRs
13239          */
13240         write_csr(dd, RCV_CTRL, 0);
13241         init_rbufs(dd);
13242         /* RCV_STATUS read-only */
13243         /* RCV_CONTEXTS read-only */
13244         /* RCV_ARRAY_CNT read-only */
13245         /* RCV_BUF_SIZE read-only */
13246         write_csr(dd, RCV_BTH_QP, 0);
13247         write_csr(dd, RCV_MULTICAST, 0);
13248         write_csr(dd, RCV_BYPASS, 0);
13249         write_csr(dd, RCV_VL15, 0);
13250         /* this is a clear-down */
13251         write_csr(dd, RCV_ERR_INFO,
13252                   RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13253         /* RCV_ERR_STATUS read-only */
13254         write_csr(dd, RCV_ERR_MASK, 0);
13255         write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13256         /* RCV_ERR_FORCE leave alone */
13257         for (i = 0; i < 32; i++)
13258                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13259         for (i = 0; i < 4; i++)
13260                 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13261         for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13262                 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13263         for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13264                 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13265         for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13266                 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13267                 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13268                 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13269         }
13270         for (i = 0; i < 32; i++)
13271                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13272
13273         /*
13274          * RXE Kernel and User Per-Context CSRs
13275          */
13276         for (i = 0; i < dd->chip_rcv_contexts; i++) {
13277                 /* kernel */
13278                 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13279                 /* RCV_CTXT_STATUS read-only */
13280                 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13281                 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13282                 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13283                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13284                 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13285                 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13286                 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13287                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13288                 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13289                 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13290
13291                 /* user */
13292                 /* RCV_HDR_TAIL read-only */
13293                 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13294                 /* RCV_EGR_INDEX_TAIL read-only */
13295                 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13296                 /* RCV_EGR_OFFSET_TAIL read-only */
13297                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13298                         write_uctxt_csr(dd, i,
13299                                         RCV_TID_FLOW_TABLE + (8 * j), 0);
13300                 }
13301         }
13302 }
13303
13304 /*
13305  * Set sc2vl tables.
13306  *
13307  * They power on to zeros, so to avoid send context errors
13308  * they need to be set:
13309  *
13310  * SC 0-7 -> VL 0-7 (respectively)
13311  * SC 15  -> VL 15
13312  * otherwise
13313  *        -> VL 0
13314  */
13315 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13316 {
13317         int i;
13318         /* init per architecture spec, constrained by hardware capability */
13319
13320         /* HFI maps sent packets */
13321         write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13322                 0,
13323                 0, 0, 1, 1,
13324                 2, 2, 3, 3,
13325                 4, 4, 5, 5,
13326                 6, 6, 7, 7));
13327         write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13328                 1,
13329                 8, 0, 9, 0,
13330                 10, 0, 11, 0,
13331                 12, 0, 13, 0,
13332                 14, 0, 15, 15));
13333         write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13334                 2,
13335                 16, 0, 17, 0,
13336                 18, 0, 19, 0,
13337                 20, 0, 21, 0,
13338                 22, 0, 23, 0));
13339         write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13340                 3,
13341                 24, 0, 25, 0,
13342                 26, 0, 27, 0,
13343                 28, 0, 29, 0,
13344                 30, 0, 31, 0));
13345
13346         /* DC maps received packets */
13347         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13348                 15_0,
13349                 0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13350                 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13351         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13352                 31_16,
13353                 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13354                 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13355
13356         /* initialize the cached sc2vl values consistently with h/w */
13357         for (i = 0; i < 32; i++) {
13358                 if (i < 8 || i == 15)
13359                         *((u8 *)(dd->sc2vl) + i) = (u8)i;
13360                 else
13361                         *((u8 *)(dd->sc2vl) + i) = 0;
13362         }
13363 }
13364
13365 /*
13366  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13367  * depend on the chip going through a power-on reset - a driver may be loaded
13368  * and unloaded many times.
13369  *
13370  * Do not write any CSR values to the chip in this routine - there may be
13371  * a reset following the (possible) FLR in this routine.
13372  *
13373  */
13374 static void init_chip(struct hfi1_devdata *dd)
13375 {
13376         int i;
13377
13378         /*
13379          * Put the HFI CSRs in a known state.
13380          * Combine this with a DC reset.
13381          *
13382          * Stop the device from doing anything while we do a
13383          * reset.  We know there are no other active users of
13384          * the device since we are now in charge.  Turn off
13385          * off all outbound and inbound traffic and make sure
13386          * the device does not generate any interrupts.
13387          */
13388
13389         /* disable send contexts and SDMA engines */
13390         write_csr(dd, SEND_CTRL, 0);
13391         for (i = 0; i < dd->chip_send_contexts; i++)
13392                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13393         for (i = 0; i < dd->chip_sdma_engines; i++)
13394                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13395         /* disable port (turn off RXE inbound traffic) and contexts */
13396         write_csr(dd, RCV_CTRL, 0);
13397         for (i = 0; i < dd->chip_rcv_contexts; i++)
13398                 write_csr(dd, RCV_CTXT_CTRL, 0);
13399         /* mask all interrupt sources */
13400         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13401                 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13402
13403         /*
13404          * DC Reset: do a full DC reset before the register clear.
13405          * A recommended length of time to hold is one CSR read,
13406          * so reread the CceDcCtrl.  Then, hold the DC in reset
13407          * across the clear.
13408          */
13409         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13410         (void)read_csr(dd, CCE_DC_CTRL);
13411
13412         if (use_flr) {
13413                 /*
13414                  * A FLR will reset the SPC core and part of the PCIe.
13415                  * The parts that need to be restored have already been
13416                  * saved.
13417                  */
13418                 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13419
13420                 /* do the FLR, the DC reset will remain */
13421                 hfi1_pcie_flr(dd);
13422
13423                 /* restore command and BARs */
13424                 restore_pci_variables(dd);
13425
13426                 if (is_ax(dd)) {
13427                         dd_dev_info(dd, "Resetting CSRs with FLR\n");
13428                         hfi1_pcie_flr(dd);
13429                         restore_pci_variables(dd);
13430                 }
13431
13432                 reset_asic_csrs(dd);
13433         } else {
13434                 dd_dev_info(dd, "Resetting CSRs with writes\n");
13435                 reset_cce_csrs(dd);
13436                 reset_txe_csrs(dd);
13437                 reset_rxe_csrs(dd);
13438                 reset_asic_csrs(dd);
13439                 reset_misc_csrs(dd);
13440         }
13441         /* clear the DC reset */
13442         write_csr(dd, CCE_DC_CTRL, 0);
13443
13444         /* Set the LED off */
13445         setextled(dd, 0);
13446
13447         /*
13448          * Clear the QSFP reset.
13449          * An FLR enforces a 0 on all out pins. The driver does not touch
13450          * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13451          * anything plugged constantly in reset, if it pays attention
13452          * to RESET_N.
13453          * Prime examples of this are optical cables. Set all pins high.
13454          * I2CCLK and I2CDAT will change per direction, and INT_N and
13455          * MODPRS_N are input only and their value is ignored.
13456          */
13457         write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13458         write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13459 }
13460
13461 static void init_early_variables(struct hfi1_devdata *dd)
13462 {
13463         int i;
13464
13465         /* assign link credit variables */
13466         dd->vau = CM_VAU;
13467         dd->link_credits = CM_GLOBAL_CREDITS;
13468         if (is_ax(dd))
13469                 dd->link_credits--;
13470         dd->vcu = cu_to_vcu(hfi1_cu);
13471         /* enough room for 8 MAD packets plus header - 17K */
13472         dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13473         if (dd->vl15_init > dd->link_credits)
13474                 dd->vl15_init = dd->link_credits;
13475
13476         write_uninitialized_csrs_and_memories(dd);
13477
13478         if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13479                 for (i = 0; i < dd->num_pports; i++) {
13480                         struct hfi1_pportdata *ppd = &dd->pport[i];
13481
13482                         set_partition_keys(ppd);
13483                 }
13484         init_sc2vl_tables(dd);
13485 }
13486
13487 static void init_kdeth_qp(struct hfi1_devdata *dd)
13488 {
13489         /* user changed the KDETH_QP */
13490         if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13491                 /* out of range or illegal value */
13492                 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13493                 kdeth_qp = 0;
13494         }
13495         if (kdeth_qp == 0)      /* not set, or failed range check */
13496                 kdeth_qp = DEFAULT_KDETH_QP;
13497
13498         write_csr(dd, SEND_BTH_QP,
13499                   (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13500                   SEND_BTH_QP_KDETH_QP_SHIFT);
13501
13502         write_csr(dd, RCV_BTH_QP,
13503                   (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13504                   RCV_BTH_QP_KDETH_QP_SHIFT);
13505 }
13506
13507 /**
13508  * init_qpmap_table
13509  * @dd - device data
13510  * @first_ctxt - first context
13511  * @last_ctxt - first context
13512  *
13513  * This return sets the qpn mapping table that
13514  * is indexed by qpn[8:1].
13515  *
13516  * The routine will round robin the 256 settings
13517  * from first_ctxt to last_ctxt.
13518  *
13519  * The first/last looks ahead to having specialized
13520  * receive contexts for mgmt and bypass.  Normal
13521  * verbs traffic will assumed to be on a range
13522  * of receive contexts.
13523  */
13524 static void init_qpmap_table(struct hfi1_devdata *dd,
13525                              u32 first_ctxt,
13526                              u32 last_ctxt)
13527 {
13528         u64 reg = 0;
13529         u64 regno = RCV_QP_MAP_TABLE;
13530         int i;
13531         u64 ctxt = first_ctxt;
13532
13533         for (i = 0; i < 256;) {
13534                 reg |= ctxt << (8 * (i % 8));
13535                 i++;
13536                 ctxt++;
13537                 if (ctxt > last_ctxt)
13538                         ctxt = first_ctxt;
13539                 if (i % 8 == 0) {
13540                         write_csr(dd, regno, reg);
13541                         reg = 0;
13542                         regno += 8;
13543                 }
13544         }
13545         if (i % 8)
13546                 write_csr(dd, regno, reg);
13547
13548         add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13549                         | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13550 }
13551
13552 /**
13553  * init_qos - init RX qos
13554  * @dd - device data
13555  * @first_context
13556  *
13557  * This routine initializes Rule 0 and the
13558  * RSM map table to implement qos.
13559  *
13560  * If all of the limit tests succeed,
13561  * qos is applied based on the array
13562  * interpretation of krcvqs where
13563  * entry 0 is VL0.
13564  *
13565  * The number of vl bits (n) and the number of qpn
13566  * bits (m) are computed to feed both the RSM map table
13567  * and the single rule.
13568  *
13569  */
13570 static void init_qos(struct hfi1_devdata *dd, u32 first_ctxt)
13571 {
13572         u8 max_by_vl = 0;
13573         unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13574         u64 *rsmmap;
13575         u64 reg;
13576         u8  rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
13577
13578         /* validate */
13579         if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13580             num_vls == 1 ||
13581             krcvqsset <= 1)
13582                 goto bail;
13583         for (i = 0; i < min_t(unsigned, num_vls, krcvqsset); i++)
13584                 if (krcvqs[i] > max_by_vl)
13585                         max_by_vl = krcvqs[i];
13586         if (max_by_vl > 32)
13587                 goto bail;
13588         qpns_per_vl = __roundup_pow_of_two(max_by_vl);
13589         /* determine bits vl */
13590         n = ilog2(num_vls);
13591         /* determine bits for qpn */
13592         m = ilog2(qpns_per_vl);
13593         if ((m + n) > 7)
13594                 goto bail;
13595         if (num_vls * qpns_per_vl > dd->chip_rcv_contexts)
13596                 goto bail;
13597         rsmmap = kmalloc_array(NUM_MAP_REGS, sizeof(u64), GFP_KERNEL);
13598         if (!rsmmap)
13599                 goto bail;
13600         memset(rsmmap, rxcontext, NUM_MAP_REGS * sizeof(u64));
13601         /* init the local copy of the table */
13602         for (i = 0, ctxt = first_ctxt; i < num_vls; i++) {
13603                 unsigned tctxt;
13604
13605                 for (qpn = 0, tctxt = ctxt;
13606                      krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13607                         unsigned idx, regoff, regidx;
13608
13609                         /* generate index <= 128 */
13610                         idx = (qpn << n) ^ i;
13611                         regoff = (idx % 8) * 8;
13612                         regidx = idx / 8;
13613                         reg = rsmmap[regidx];
13614                         /* replace 0xff with context number */
13615                         reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13616                                 << regoff);
13617                         reg |= (u64)(tctxt++) << regoff;
13618                         rsmmap[regidx] = reg;
13619                         if (tctxt == ctxt + krcvqs[i])
13620                                 tctxt = ctxt;
13621                 }
13622                 ctxt += krcvqs[i];
13623         }
13624         /* flush cached copies to chip */
13625         for (i = 0; i < NUM_MAP_REGS; i++)
13626                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rsmmap[i]);
13627         /* add rule0 */
13628         write_csr(dd, RCV_RSM_CFG /* + (8 * 0) */,
13629                   RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_MASK <<
13630                   RCV_RSM_CFG_ENABLE_OR_CHAIN_RSM0_SHIFT |
13631                   2ull << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13632         write_csr(dd, RCV_RSM_SELECT /* + (8 * 0) */,
13633                   LRH_BTH_MATCH_OFFSET << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13634                   LRH_SC_MATCH_OFFSET << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13635                   LRH_SC_SELECT_OFFSET << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13636                   ((u64)n) << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13637                   QPN_SELECT_OFFSET << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13638                   ((u64)m + (u64)n) << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13639         write_csr(dd, RCV_RSM_MATCH /* + (8 * 0) */,
13640                   LRH_BTH_MASK << RCV_RSM_MATCH_MASK1_SHIFT |
13641                   LRH_BTH_VALUE << RCV_RSM_MATCH_VALUE1_SHIFT |
13642                   LRH_SC_MASK << RCV_RSM_MATCH_MASK2_SHIFT |
13643                   LRH_SC_VALUE << RCV_RSM_MATCH_VALUE2_SHIFT);
13644         /* Enable RSM */
13645         add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13646         kfree(rsmmap);
13647         /* map everything else to first context */
13648         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, MIN_KERNEL_KCTXTS - 1);
13649         dd->qos_shift = n + 1;
13650         return;
13651 bail:
13652         dd->qos_shift = 1;
13653         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13654 }
13655
13656 static void init_rxe(struct hfi1_devdata *dd)
13657 {
13658         /* enable all receive errors */
13659         write_csr(dd, RCV_ERR_MASK, ~0ull);
13660         /* setup QPN map table - start where VL15 context leaves off */
13661         init_qos(dd, dd->n_krcv_queues > MIN_KERNEL_KCTXTS ?
13662                  MIN_KERNEL_KCTXTS : 0);
13663         /*
13664          * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13665          * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13666          * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
13667          * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13668          * Max_PayLoad_Size set to its minimum of 128.
13669          *
13670          * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13671          * (64 bytes).  Max_Payload_Size is possibly modified upward in
13672          * tune_pcie_caps() which is called after this routine.
13673          */
13674 }
13675
13676 static void init_other(struct hfi1_devdata *dd)
13677 {
13678         /* enable all CCE errors */
13679         write_csr(dd, CCE_ERR_MASK, ~0ull);
13680         /* enable *some* Misc errors */
13681         write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13682         /* enable all DC errors, except LCB */
13683         write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13684         write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13685 }
13686
13687 /*
13688  * Fill out the given AU table using the given CU.  A CU is defined in terms
13689  * AUs.  The table is a an encoding: given the index, how many AUs does that
13690  * represent?
13691  *
13692  * NOTE: Assumes that the register layout is the same for the
13693  * local and remote tables.
13694  */
13695 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13696                                u32 csr0to3, u32 csr4to7)
13697 {
13698         write_csr(dd, csr0to3,
13699                   0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13700                   1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13701                   2ull * cu <<
13702                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13703                   4ull * cu <<
13704                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13705         write_csr(dd, csr4to7,
13706                   8ull * cu <<
13707                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13708                   16ull * cu <<
13709                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13710                   32ull * cu <<
13711                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13712                   64ull * cu <<
13713                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13714 }
13715
13716 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13717 {
13718         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13719                            SEND_CM_LOCAL_AU_TABLE4_TO7);
13720 }
13721
13722 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13723 {
13724         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13725                            SEND_CM_REMOTE_AU_TABLE4_TO7);
13726 }
13727
13728 static void init_txe(struct hfi1_devdata *dd)
13729 {
13730         int i;
13731
13732         /* enable all PIO, SDMA, general, and Egress errors */
13733         write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13734         write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13735         write_csr(dd, SEND_ERR_MASK, ~0ull);
13736         write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13737
13738         /* enable all per-context and per-SDMA engine errors */
13739         for (i = 0; i < dd->chip_send_contexts; i++)
13740                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13741         for (i = 0; i < dd->chip_sdma_engines; i++)
13742                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13743
13744         /* set the local CU to AU mapping */
13745         assign_local_cm_au_table(dd, dd->vcu);
13746
13747         /*
13748          * Set reasonable default for Credit Return Timer
13749          * Don't set on Simulator - causes it to choke.
13750          */
13751         if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13752                 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13753 }
13754
13755 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13756 {
13757         struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13758         unsigned sctxt;
13759         int ret = 0;
13760         u64 reg;
13761
13762         if (!rcd || !rcd->sc) {
13763                 ret = -EINVAL;
13764                 goto done;
13765         }
13766         sctxt = rcd->sc->hw_context;
13767         reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13768                 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13769                  SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13770         /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13771         if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13772                 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13773         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13774         /*
13775          * Enable send-side J_KEY integrity check, unless this is A0 h/w
13776          */
13777         if (!is_ax(dd)) {
13778                 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13779                 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13780                 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13781         }
13782
13783         /* Enable J_KEY check on receive context. */
13784         reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
13785                 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
13786                  RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
13787         write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
13788 done:
13789         return ret;
13790 }
13791
13792 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
13793 {
13794         struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13795         unsigned sctxt;
13796         int ret = 0;
13797         u64 reg;
13798
13799         if (!rcd || !rcd->sc) {
13800                 ret = -EINVAL;
13801                 goto done;
13802         }
13803         sctxt = rcd->sc->hw_context;
13804         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
13805         /*
13806          * Disable send-side J_KEY integrity check, unless this is A0 h/w.
13807          * This check would not have been enabled for A0 h/w, see
13808          * set_ctxt_jkey().
13809          */
13810         if (!is_ax(dd)) {
13811                 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13812                 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13813                 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13814         }
13815         /* Turn off the J_KEY on the receive side */
13816         write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
13817 done:
13818         return ret;
13819 }
13820
13821 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
13822 {
13823         struct hfi1_ctxtdata *rcd;
13824         unsigned sctxt;
13825         int ret = 0;
13826         u64 reg;
13827
13828         if (ctxt < dd->num_rcv_contexts) {
13829                 rcd = dd->rcd[ctxt];
13830         } else {
13831                 ret = -EINVAL;
13832                 goto done;
13833         }
13834         if (!rcd || !rcd->sc) {
13835                 ret = -EINVAL;
13836                 goto done;
13837         }
13838         sctxt = rcd->sc->hw_context;
13839         reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
13840                 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
13841         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
13842         reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13843         reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13844         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13845 done:
13846         return ret;
13847 }
13848
13849 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
13850 {
13851         struct hfi1_ctxtdata *rcd;
13852         unsigned sctxt;
13853         int ret = 0;
13854         u64 reg;
13855
13856         if (ctxt < dd->num_rcv_contexts) {
13857                 rcd = dd->rcd[ctxt];
13858         } else {
13859                 ret = -EINVAL;
13860                 goto done;
13861         }
13862         if (!rcd || !rcd->sc) {
13863                 ret = -EINVAL;
13864                 goto done;
13865         }
13866         sctxt = rcd->sc->hw_context;
13867         reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13868         reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
13869         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13870         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13871 done:
13872         return ret;
13873 }
13874
13875 /*
13876  * Start doing the clean up the the chip. Our clean up happens in multiple
13877  * stages and this is just the first.
13878  */
13879 void hfi1_start_cleanup(struct hfi1_devdata *dd)
13880 {
13881         aspm_exit(dd);
13882         free_cntrs(dd);
13883         free_rcverr(dd);
13884         clean_up_interrupts(dd);
13885 }
13886
13887 #define HFI_BASE_GUID(dev) \
13888         ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
13889
13890 /*
13891  * Certain chip functions need to be initialized only once per asic
13892  * instead of per-device. This function finds the peer device and
13893  * checks whether that chip initialization needs to be done by this
13894  * device.
13895  */
13896 static void asic_should_init(struct hfi1_devdata *dd)
13897 {
13898         unsigned long flags;
13899         struct hfi1_devdata *tmp, *peer = NULL;
13900
13901         spin_lock_irqsave(&hfi1_devs_lock, flags);
13902         /* Find our peer device */
13903         list_for_each_entry(tmp, &hfi1_dev_list, list) {
13904                 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
13905                     dd->unit != tmp->unit) {
13906                         peer = tmp;
13907                         break;
13908                 }
13909         }
13910
13911         /*
13912          * "Claim" the ASIC for initialization if it hasn't been
13913          " "claimed" yet.
13914          */
13915         if (!peer || !(peer->flags & HFI1_DO_INIT_ASIC))
13916                 dd->flags |= HFI1_DO_INIT_ASIC;
13917         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
13918 }
13919
13920 /*
13921  * Set dd->boardname.  Use a generic name if a name is not returned from
13922  * EFI variable space.
13923  *
13924  * Return 0 on success, -ENOMEM if space could not be allocated.
13925  */
13926 static int obtain_boardname(struct hfi1_devdata *dd)
13927 {
13928         /* generic board description */
13929         const char generic[] =
13930                 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
13931         unsigned long size;
13932         int ret;
13933
13934         ret = read_hfi1_efi_var(dd, "description", &size,
13935                                 (void **)&dd->boardname);
13936         if (ret) {
13937                 dd_dev_info(dd, "Board description not found\n");
13938                 /* use generic description */
13939                 dd->boardname = kstrdup(generic, GFP_KERNEL);
13940                 if (!dd->boardname)
13941                         return -ENOMEM;
13942         }
13943         return 0;
13944 }
13945
13946 /*
13947  * Check the interrupt registers to make sure that they are mapped correctly.
13948  * It is intended to help user identify any mismapping by VMM when the driver
13949  * is running in a VM. This function should only be called before interrupt
13950  * is set up properly.
13951  *
13952  * Return 0 on success, -EINVAL on failure.
13953  */
13954 static int check_int_registers(struct hfi1_devdata *dd)
13955 {
13956         u64 reg;
13957         u64 all_bits = ~(u64)0;
13958         u64 mask;
13959
13960         /* Clear CceIntMask[0] to avoid raising any interrupts */
13961         mask = read_csr(dd, CCE_INT_MASK);
13962         write_csr(dd, CCE_INT_MASK, 0ull);
13963         reg = read_csr(dd, CCE_INT_MASK);
13964         if (reg)
13965                 goto err_exit;
13966
13967         /* Clear all interrupt status bits */
13968         write_csr(dd, CCE_INT_CLEAR, all_bits);
13969         reg = read_csr(dd, CCE_INT_STATUS);
13970         if (reg)
13971                 goto err_exit;
13972
13973         /* Set all interrupt status bits */
13974         write_csr(dd, CCE_INT_FORCE, all_bits);
13975         reg = read_csr(dd, CCE_INT_STATUS);
13976         if (reg != all_bits)
13977                 goto err_exit;
13978
13979         /* Restore the interrupt mask */
13980         write_csr(dd, CCE_INT_CLEAR, all_bits);
13981         write_csr(dd, CCE_INT_MASK, mask);
13982
13983         return 0;
13984 err_exit:
13985         write_csr(dd, CCE_INT_MASK, mask);
13986         dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
13987         return -EINVAL;
13988 }
13989
13990 /**
13991  * Allocate and initialize the device structure for the hfi.
13992  * @dev: the pci_dev for hfi1_ib device
13993  * @ent: pci_device_id struct for this dev
13994  *
13995  * Also allocates, initializes, and returns the devdata struct for this
13996  * device instance
13997  *
13998  * This is global, and is called directly at init to set up the
13999  * chip-specific function pointers for later use.
14000  */
14001 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14002                                   const struct pci_device_id *ent)
14003 {
14004         struct hfi1_devdata *dd;
14005         struct hfi1_pportdata *ppd;
14006         u64 reg;
14007         int i, ret;
14008         static const char * const inames[] = { /* implementation names */
14009                 "RTL silicon",
14010                 "RTL VCS simulation",
14011                 "RTL FPGA emulation",
14012                 "Functional simulator"
14013         };
14014         struct pci_dev *parent = pdev->bus->self;
14015
14016         dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14017                                 sizeof(struct hfi1_pportdata));
14018         if (IS_ERR(dd))
14019                 goto bail;
14020         ppd = dd->pport;
14021         for (i = 0; i < dd->num_pports; i++, ppd++) {
14022                 int vl;
14023                 /* init common fields */
14024                 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14025                 /* DC supports 4 link widths */
14026                 ppd->link_width_supported =
14027                         OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14028                         OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14029                 ppd->link_width_downgrade_supported =
14030                         ppd->link_width_supported;
14031                 /* start out enabling only 4X */
14032                 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14033                 ppd->link_width_downgrade_enabled =
14034                                         ppd->link_width_downgrade_supported;
14035                 /* link width active is 0 when link is down */
14036                 /* link width downgrade active is 0 when link is down */
14037
14038                 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14039                     num_vls > HFI1_MAX_VLS_SUPPORTED) {
14040                         hfi1_early_err(&pdev->dev,
14041                                        "Invalid num_vls %u, using %u VLs\n",
14042                                     num_vls, HFI1_MAX_VLS_SUPPORTED);
14043                         num_vls = HFI1_MAX_VLS_SUPPORTED;
14044                 }
14045                 ppd->vls_supported = num_vls;
14046                 ppd->vls_operational = ppd->vls_supported;
14047                 ppd->actual_vls_operational = ppd->vls_supported;
14048                 /* Set the default MTU. */
14049                 for (vl = 0; vl < num_vls; vl++)
14050                         dd->vld[vl].mtu = hfi1_max_mtu;
14051                 dd->vld[15].mtu = MAX_MAD_PACKET;
14052                 /*
14053                  * Set the initial values to reasonable default, will be set
14054                  * for real when link is up.
14055                  */
14056                 ppd->lstate = IB_PORT_DOWN;
14057                 ppd->overrun_threshold = 0x4;
14058                 ppd->phy_error_threshold = 0xf;
14059                 ppd->port_crc_mode_enabled = link_crc_mask;
14060                 /* initialize supported LTP CRC mode */
14061                 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14062                 /* initialize enabled LTP CRC mode */
14063                 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14064                 /* start in offline */
14065                 ppd->host_link_state = HLS_DN_OFFLINE;
14066                 init_vl_arb_caches(ppd);
14067                 ppd->last_pstate = 0xff; /* invalid value */
14068         }
14069
14070         dd->link_default = HLS_DN_POLL;
14071
14072         /*
14073          * Do remaining PCIe setup and save PCIe values in dd.
14074          * Any error printing is already done by the init code.
14075          * On return, we have the chip mapped.
14076          */
14077         ret = hfi1_pcie_ddinit(dd, pdev, ent);
14078         if (ret < 0)
14079                 goto bail_free;
14080
14081         /* verify that reads actually work, save revision for reset check */
14082         dd->revision = read_csr(dd, CCE_REVISION);
14083         if (dd->revision == ~(u64)0) {
14084                 dd_dev_err(dd, "cannot read chip CSRs\n");
14085                 ret = -EINVAL;
14086                 goto bail_cleanup;
14087         }
14088         dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14089                         & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14090         dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14091                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
14092
14093         /*
14094          * Check interrupt registers mapping if the driver has no access to
14095          * the upstream component. In this case, it is likely that the driver
14096          * is running in a VM.
14097          */
14098         if (!parent) {
14099                 ret = check_int_registers(dd);
14100                 if (ret)
14101                         goto bail_cleanup;
14102         }
14103
14104         /*
14105          * obtain the hardware ID - NOT related to unit, which is a
14106          * software enumeration
14107          */
14108         reg = read_csr(dd, CCE_REVISION2);
14109         dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14110                                         & CCE_REVISION2_HFI_ID_MASK;
14111         /* the variable size will remove unwanted bits */
14112         dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14113         dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14114         dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14115                     dd->icode < ARRAY_SIZE(inames) ?
14116                     inames[dd->icode] : "unknown", (int)dd->irev);
14117
14118         /* speeds the hardware can support */
14119         dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14120         /* speeds allowed to run at */
14121         dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14122         /* give a reasonable active value, will be set on link up */
14123         dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14124
14125         dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14126         dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14127         dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14128         dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14129         dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14130         /* fix up link widths for emulation _p */
14131         ppd = dd->pport;
14132         if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14133                 ppd->link_width_supported =
14134                         ppd->link_width_enabled =
14135                         ppd->link_width_downgrade_supported =
14136                         ppd->link_width_downgrade_enabled =
14137                                 OPA_LINK_WIDTH_1X;
14138         }
14139         /* insure num_vls isn't larger than number of sdma engines */
14140         if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14141                 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14142                            num_vls, dd->chip_sdma_engines);
14143                 num_vls = dd->chip_sdma_engines;
14144                 ppd->vls_supported = dd->chip_sdma_engines;
14145                 ppd->vls_operational = ppd->vls_supported;
14146         }
14147
14148         /*
14149          * Convert the ns parameter to the 64 * cclocks used in the CSR.
14150          * Limit the max if larger than the field holds.  If timeout is
14151          * non-zero, then the calculated field will be at least 1.
14152          *
14153          * Must be after icode is set up - the cclock rate depends
14154          * on knowing the hardware being used.
14155          */
14156         dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14157         if (dd->rcv_intr_timeout_csr >
14158                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14159                 dd->rcv_intr_timeout_csr =
14160                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14161         else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14162                 dd->rcv_intr_timeout_csr = 1;
14163
14164         /* needs to be done before we look for the peer device */
14165         read_guid(dd);
14166
14167         /* should this device init the ASIC block? */
14168         asic_should_init(dd);
14169
14170         /* obtain chip sizes, reset chip CSRs */
14171         init_chip(dd);
14172
14173         /* read in the PCIe link speed information */
14174         ret = pcie_speeds(dd);
14175         if (ret)
14176                 goto bail_cleanup;
14177
14178         /* Needs to be called before hfi1_firmware_init */
14179         get_platform_config(dd);
14180
14181         /* read in firmware */
14182         ret = hfi1_firmware_init(dd);
14183         if (ret)
14184                 goto bail_cleanup;
14185
14186         /*
14187          * In general, the PCIe Gen3 transition must occur after the
14188          * chip has been idled (so it won't initiate any PCIe transactions
14189          * e.g. an interrupt) and before the driver changes any registers
14190          * (the transition will reset the registers).
14191          *
14192          * In particular, place this call after:
14193          * - init_chip()     - the chip will not initiate any PCIe transactions
14194          * - pcie_speeds()   - reads the current link speed
14195          * - hfi1_firmware_init() - the needed firmware is ready to be
14196          *                          downloaded
14197          */
14198         ret = do_pcie_gen3_transition(dd);
14199         if (ret)
14200                 goto bail_cleanup;
14201
14202         /* start setting dd values and adjusting CSRs */
14203         init_early_variables(dd);
14204
14205         parse_platform_config(dd);
14206
14207         ret = obtain_boardname(dd);
14208         if (ret)
14209                 goto bail_cleanup;
14210
14211         snprintf(dd->boardversion, BOARD_VERS_MAX,
14212                  "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14213                  HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14214                  (u32)dd->majrev,
14215                  (u32)dd->minrev,
14216                  (dd->revision >> CCE_REVISION_SW_SHIFT)
14217                     & CCE_REVISION_SW_MASK);
14218
14219         ret = set_up_context_variables(dd);
14220         if (ret)
14221                 goto bail_cleanup;
14222
14223         /* set initial RXE CSRs */
14224         init_rxe(dd);
14225         /* set initial TXE CSRs */
14226         init_txe(dd);
14227         /* set initial non-RXE, non-TXE CSRs */
14228         init_other(dd);
14229         /* set up KDETH QP prefix in both RX and TX CSRs */
14230         init_kdeth_qp(dd);
14231
14232         ret = hfi1_dev_affinity_init(dd);
14233         if (ret)
14234                 goto bail_cleanup;
14235
14236         /* send contexts must be set up before receive contexts */
14237         ret = init_send_contexts(dd);
14238         if (ret)
14239                 goto bail_cleanup;
14240
14241         ret = hfi1_create_ctxts(dd);
14242         if (ret)
14243                 goto bail_cleanup;
14244
14245         dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14246         /*
14247          * rcd[0] is guaranteed to be valid by this point. Also, all
14248          * context are using the same value, as per the module parameter.
14249          */
14250         dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14251
14252         ret = init_pervl_scs(dd);
14253         if (ret)
14254                 goto bail_cleanup;
14255
14256         /* sdma init */
14257         for (i = 0; i < dd->num_pports; ++i) {
14258                 ret = sdma_init(dd, i);
14259                 if (ret)
14260                         goto bail_cleanup;
14261         }
14262
14263         /* use contexts created by hfi1_create_ctxts */
14264         ret = set_up_interrupts(dd);
14265         if (ret)
14266                 goto bail_cleanup;
14267
14268         /* set up LCB access - must be after set_up_interrupts() */
14269         init_lcb_access(dd);
14270
14271         snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14272                  dd->base_guid & 0xFFFFFF);
14273
14274         dd->oui1 = dd->base_guid >> 56 & 0xFF;
14275         dd->oui2 = dd->base_guid >> 48 & 0xFF;
14276         dd->oui3 = dd->base_guid >> 40 & 0xFF;
14277
14278         ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14279         if (ret)
14280                 goto bail_clear_intr;
14281         check_fabric_firmware_versions(dd);
14282
14283         thermal_init(dd);
14284
14285         ret = init_cntrs(dd);
14286         if (ret)
14287                 goto bail_clear_intr;
14288
14289         ret = init_rcverr(dd);
14290         if (ret)
14291                 goto bail_free_cntrs;
14292
14293         ret = eprom_init(dd);
14294         if (ret)
14295                 goto bail_free_rcverr;
14296
14297         goto bail;
14298
14299 bail_free_rcverr:
14300         free_rcverr(dd);
14301 bail_free_cntrs:
14302         free_cntrs(dd);
14303 bail_clear_intr:
14304         clean_up_interrupts(dd);
14305 bail_cleanup:
14306         hfi1_pcie_ddcleanup(dd);
14307 bail_free:
14308         hfi1_free_devdata(dd);
14309         dd = ERR_PTR(ret);
14310 bail:
14311         return dd;
14312 }
14313
14314 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14315                         u32 dw_len)
14316 {
14317         u32 delta_cycles;
14318         u32 current_egress_rate = ppd->current_egress_rate;
14319         /* rates here are in units of 10^6 bits/sec */
14320
14321         if (desired_egress_rate == -1)
14322                 return 0; /* shouldn't happen */
14323
14324         if (desired_egress_rate >= current_egress_rate)
14325                 return 0; /* we can't help go faster, only slower */
14326
14327         delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14328                         egress_cycles(dw_len * 4, current_egress_rate);
14329
14330         return (u16)delta_cycles;
14331 }
14332
14333 /**
14334  * create_pbc - build a pbc for transmission
14335  * @flags: special case flags or-ed in built pbc
14336  * @srate: static rate
14337  * @vl: vl
14338  * @dwlen: dword length (header words + data words + pbc words)
14339  *
14340  * Create a PBC with the given flags, rate, VL, and length.
14341  *
14342  * NOTE: The PBC created will not insert any HCRC - all callers but one are
14343  * for verbs, which does not use this PSM feature.  The lone other caller
14344  * is for the diagnostic interface which calls this if the user does not
14345  * supply their own PBC.
14346  */
14347 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14348                u32 dw_len)
14349 {
14350         u64 pbc, delay = 0;
14351
14352         if (unlikely(srate_mbs))
14353                 delay = delay_cycles(ppd, srate_mbs, dw_len);
14354
14355         pbc = flags
14356                 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14357                 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14358                 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14359                 | (dw_len & PBC_LENGTH_DWS_MASK)
14360                         << PBC_LENGTH_DWS_SHIFT;
14361
14362         return pbc;
14363 }
14364
14365 #define SBUS_THERMAL    0x4f
14366 #define SBUS_THERM_MONITOR_MODE 0x1
14367
14368 #define THERM_FAILURE(dev, ret, reason) \
14369         dd_dev_err((dd),                                                \
14370                    "Thermal sensor initialization failed: %s (%d)\n",   \
14371                    (reason), (ret))
14372
14373 /*
14374  * Initialize the Avago Thermal sensor.
14375  *
14376  * After initialization, enable polling of thermal sensor through
14377  * SBus interface. In order for this to work, the SBus Master
14378  * firmware has to be loaded due to the fact that the HW polling
14379  * logic uses SBus interrupts, which are not supported with
14380  * default firmware. Otherwise, no data will be returned through
14381  * the ASIC_STS_THERM CSR.
14382  */
14383 static int thermal_init(struct hfi1_devdata *dd)
14384 {
14385         int ret = 0;
14386
14387         if (dd->icode != ICODE_RTL_SILICON ||
14388             !(dd->flags & HFI1_DO_INIT_ASIC))
14389                 return ret;
14390
14391         acquire_hw_mutex(dd);
14392         dd_dev_info(dd, "Initializing thermal sensor\n");
14393         /* Disable polling of thermal readings */
14394         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14395         msleep(100);
14396         /* Thermal Sensor Initialization */
14397         /*    Step 1: Reset the Thermal SBus Receiver */
14398         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14399                                 RESET_SBUS_RECEIVER, 0);
14400         if (ret) {
14401                 THERM_FAILURE(dd, ret, "Bus Reset");
14402                 goto done;
14403         }
14404         /*    Step 2: Set Reset bit in Thermal block */
14405         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14406                                 WRITE_SBUS_RECEIVER, 0x1);
14407         if (ret) {
14408                 THERM_FAILURE(dd, ret, "Therm Block Reset");
14409                 goto done;
14410         }
14411         /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
14412         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14413                                 WRITE_SBUS_RECEIVER, 0x32);
14414         if (ret) {
14415                 THERM_FAILURE(dd, ret, "Write Clock Div");
14416                 goto done;
14417         }
14418         /*    Step 4: Select temperature mode */
14419         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14420                                 WRITE_SBUS_RECEIVER,
14421                                 SBUS_THERM_MONITOR_MODE);
14422         if (ret) {
14423                 THERM_FAILURE(dd, ret, "Write Mode Sel");
14424                 goto done;
14425         }
14426         /*    Step 5: De-assert block reset and start conversion */
14427         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14428                                 WRITE_SBUS_RECEIVER, 0x2);
14429         if (ret) {
14430                 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14431                 goto done;
14432         }
14433         /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
14434         msleep(22);
14435
14436         /* Enable polling of thermal readings */
14437         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14438 done:
14439         release_hw_mutex(dd);
14440         return ret;
14441 }
14442
14443 static void handle_temp_err(struct hfi1_devdata *dd)
14444 {
14445         struct hfi1_pportdata *ppd = &dd->pport[0];
14446         /*
14447          * Thermal Critical Interrupt
14448          * Put the device into forced freeze mode, take link down to
14449          * offline, and put DC into reset.
14450          */
14451         dd_dev_emerg(dd,
14452                      "Critical temperature reached! Forcing device into freeze mode!\n");
14453         dd->flags |= HFI1_FORCED_FREEZE;
14454         start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
14455         /*
14456          * Shut DC down as much and as quickly as possible.
14457          *
14458          * Step 1: Take the link down to OFFLINE. This will cause the
14459          *         8051 to put the Serdes in reset. However, we don't want to
14460          *         go through the entire link state machine since we want to
14461          *         shutdown ASAP. Furthermore, this is not a graceful shutdown
14462          *         but rather an attempt to save the chip.
14463          *         Code below is almost the same as quiet_serdes() but avoids
14464          *         all the extra work and the sleeps.
14465          */
14466         ppd->driver_link_ready = 0;
14467         ppd->link_enabled = 0;
14468         set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14469                                 PLS_OFFLINE);
14470         /*
14471          * Step 2: Shutdown LCB and 8051
14472          *         After shutdown, do not restore DC_CFG_RESET value.
14473          */
14474         dc_shutdown(dd);
14475 }