2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains all of the code that is specific to the HFI chip
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
69 #define NUM_IB_PORTS 1
72 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
73 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
75 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
76 module_param(num_vls, uint, S_IRUGO);
77 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
80 * Default time to aggregate two 10K packets from the idle state
81 * (timer not running). The timer starts at the end of the first packet,
82 * so only the time for one 10K packet and header plus a bit extra is needed.
83 * 10 * 1024 + 64 header byte = 10304 byte
84 * 10304 byte / 12.5 GB/s = 824.32ns
86 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
87 module_param(rcv_intr_timeout, uint, S_IRUGO);
88 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
90 uint rcv_intr_count = 16; /* same as qib */
91 module_param(rcv_intr_count, uint, S_IRUGO);
92 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
94 ushort link_crc_mask = SUPPORTED_CRCS;
95 module_param(link_crc_mask, ushort, S_IRUGO);
96 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
99 module_param_named(loopback, loopback, uint, S_IRUGO);
100 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
102 /* Other driver tunables */
103 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
104 static ushort crc_14b_sideband = 1;
105 static uint use_flr = 1;
106 uint quick_linkup; /* skip LNI */
109 u64 flag; /* the flag */
110 char *str; /* description string */
111 u16 extra; /* extra information */
116 /* str must be a string constant */
117 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
118 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
120 /* Send Error Consequences */
121 #define SEC_WRITE_DROPPED 0x1
122 #define SEC_PACKET_DROPPED 0x2
123 #define SEC_SC_HALTED 0x4 /* per-context only */
124 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
126 #define DEFAULT_KRCVQS 2
127 #define MIN_KERNEL_KCTXTS 2
128 #define FIRST_KERNEL_KCTXT 1
131 * RSM instance allocation
133 * 1 - User Fecn Handling
136 #define RSM_INS_VERBS 0
137 #define RSM_INS_FECN 1
138 #define RSM_INS_VNIC 2
140 /* Bit offset into the GUID which carries HFI id information */
141 #define GUID_HFI_INDEX_SHIFT 39
143 /* extract the emulation revision */
144 #define emulator_rev(dd) ((dd)->irev >> 8)
145 /* parallel and serial emulation versions are 3 and 4 respectively */
146 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
147 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
149 /* RSM fields for Verbs */
151 #define IB_PACKET_TYPE 2ull
152 #define QW_SHIFT 6ull
154 #define QPN_WIDTH 7ull
156 /* LRH.BTH: QW 0, OFFSET 48 - for match */
157 #define LRH_BTH_QW 0ull
158 #define LRH_BTH_BIT_OFFSET 48ull
159 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
160 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
161 #define LRH_BTH_SELECT
162 #define LRH_BTH_MASK 3ull
163 #define LRH_BTH_VALUE 2ull
165 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
166 #define LRH_SC_QW 0ull
167 #define LRH_SC_BIT_OFFSET 56ull
168 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
169 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
170 #define LRH_SC_MASK 128ull
171 #define LRH_SC_VALUE 0ull
173 /* SC[n..0] QW 0, OFFSET 60 - for select */
174 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
176 /* QPN[m+n:1] QW 1, OFFSET 1 */
177 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
179 /* RSM fields for Vnic */
180 /* L2_TYPE: QW 0, OFFSET 61 - for match */
181 #define L2_TYPE_QW 0ull
182 #define L2_TYPE_BIT_OFFSET 61ull
183 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
184 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
185 #define L2_TYPE_MASK 3ull
186 #define L2_16B_VALUE 2ull
188 /* L4_TYPE QW 1, OFFSET 0 - for match */
189 #define L4_TYPE_QW 1ull
190 #define L4_TYPE_BIT_OFFSET 0ull
191 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
192 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
193 #define L4_16B_TYPE_MASK 0xFFull
194 #define L4_16B_ETH_VALUE 0x78ull
196 /* 16B VESWID - for select */
197 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
198 /* 16B ENTROPY - for select */
199 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
201 /* defines to build power on SC2VL table */
213 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
214 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
215 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
216 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
217 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
218 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
219 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
220 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
223 #define DC_SC_VL_VAL( \
242 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
243 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
244 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
245 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
246 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
247 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
248 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
249 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
250 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
251 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
252 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
253 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
254 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
255 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
256 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
257 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
260 /* all CceStatus sub-block freeze bits */
261 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
262 | CCE_STATUS_RXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_FROZE_SMASK \
264 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
265 /* all CceStatus sub-block TXE pause bits */
266 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
267 | CCE_STATUS_TXE_PAUSED_SMASK \
268 | CCE_STATUS_SDMA_PAUSED_SMASK)
269 /* all CceStatus sub-block RXE pause bits */
270 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
272 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
273 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
278 static struct flag_table cce_err_status_flags[] = {
279 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
280 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
281 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
282 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
283 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
284 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
285 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
286 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
287 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
288 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
289 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
290 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
291 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
292 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
293 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
294 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
295 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
296 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
297 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
298 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
299 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
300 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
301 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
302 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
303 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
304 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
305 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
306 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
307 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
308 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
309 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
310 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
311 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
312 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
313 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
314 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
315 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
316 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
317 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
318 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
319 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
320 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
321 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
322 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
323 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
324 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
325 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
326 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
327 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
328 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
329 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
330 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
331 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
332 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
333 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
334 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
335 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
336 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
337 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
338 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
339 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
340 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
341 /*31*/ FLAG_ENTRY0("LATriggered",
342 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
343 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
344 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
345 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
346 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
347 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
348 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
349 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
350 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
351 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
352 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
353 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
354 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
355 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
356 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
357 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
358 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
359 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
360 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
367 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
368 static struct flag_table misc_err_status_flags[] = {
369 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
370 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
371 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
372 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
373 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
374 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
375 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
376 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
377 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
378 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
379 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
380 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
381 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
385 * TXE PIO Error flags and consequences
387 static struct flag_table pio_err_status_flags[] = {
388 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
390 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
391 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
393 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
394 /* 2*/ FLAG_ENTRY("PioCsrParity",
396 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
397 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
399 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
400 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
402 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
403 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
405 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
406 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
408 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
409 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
411 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
412 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
414 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
415 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
417 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
418 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
420 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
421 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
423 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
424 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
426 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
427 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
429 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
430 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
432 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
433 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
435 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
436 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
438 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
439 /*17*/ FLAG_ENTRY("PioInitSmIn",
441 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
442 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
444 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
445 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
447 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
448 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
450 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
451 /*21*/ FLAG_ENTRY("PioWriteDataParity",
453 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
454 /*22*/ FLAG_ENTRY("PioStateMachine",
456 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
457 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
458 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
459 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
460 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
461 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
462 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
463 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
465 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
466 /*26*/ FLAG_ENTRY("PioVlfSopParity",
468 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
469 /*27*/ FLAG_ENTRY("PioVlFifoParity",
471 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
472 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
474 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
475 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
477 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
479 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
481 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
482 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
484 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
485 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
487 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
488 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
490 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
494 /* TXE PIO errors that cause an SPC freeze */
495 #define ALL_PIO_FREEZE_ERR \
496 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
524 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
527 * TXE SDMA Error flags
529 static struct flag_table sdma_err_status_flags[] = {
530 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
531 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
532 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
533 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
534 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
535 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
536 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
537 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
541 /* TXE SDMA errors that cause an SPC freeze */
542 #define ALL_SDMA_FREEZE_ERR \
543 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
545 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
547 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
548 #define PORT_DISCARD_EGRESS_ERRS \
549 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
551 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
554 * TXE Egress Error flags
556 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
557 static struct flag_table egress_err_status_flags[] = {
558 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
559 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
561 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
562 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
563 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
564 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
566 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
567 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
568 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
569 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
571 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
572 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
573 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
574 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
575 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
576 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
577 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
578 SEES(TX_SDMA0_DISALLOWED_PACKET)),
579 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
580 SEES(TX_SDMA1_DISALLOWED_PACKET)),
581 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
582 SEES(TX_SDMA2_DISALLOWED_PACKET)),
583 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
584 SEES(TX_SDMA3_DISALLOWED_PACKET)),
585 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
586 SEES(TX_SDMA4_DISALLOWED_PACKET)),
587 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
588 SEES(TX_SDMA5_DISALLOWED_PACKET)),
589 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
590 SEES(TX_SDMA6_DISALLOWED_PACKET)),
591 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
592 SEES(TX_SDMA7_DISALLOWED_PACKET)),
593 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
594 SEES(TX_SDMA8_DISALLOWED_PACKET)),
595 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
596 SEES(TX_SDMA9_DISALLOWED_PACKET)),
597 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
598 SEES(TX_SDMA10_DISALLOWED_PACKET)),
599 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
600 SEES(TX_SDMA11_DISALLOWED_PACKET)),
601 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
602 SEES(TX_SDMA12_DISALLOWED_PACKET)),
603 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
604 SEES(TX_SDMA13_DISALLOWED_PACKET)),
605 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
606 SEES(TX_SDMA14_DISALLOWED_PACKET)),
607 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
608 SEES(TX_SDMA15_DISALLOWED_PACKET)),
609 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
610 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
611 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
612 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
613 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
614 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
615 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
616 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
617 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
618 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
619 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
620 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
621 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
622 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
623 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
624 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
625 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
626 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
627 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
628 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
629 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
630 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
631 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
632 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
633 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
634 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
635 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
636 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
637 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
638 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
639 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
640 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
641 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
642 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
643 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
644 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
645 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
646 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
647 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
648 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
649 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
650 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
651 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
655 * TXE Egress Error Info flags
657 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
658 static struct flag_table egress_err_info_flags[] = {
659 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
660 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
661 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
663 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
664 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
665 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
666 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
667 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
668 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
669 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
670 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
671 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
672 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
673 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
674 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
675 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
676 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
677 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
678 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
679 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
680 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
683 /* TXE Egress errors that cause an SPC freeze */
684 #define ALL_TXE_EGRESS_FREEZE_ERR \
685 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
686 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
688 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
689 | SEES(TX_LAUNCH_CSR_PARITY) \
690 | SEES(TX_SBRD_CTL_CSR_PARITY) \
691 | SEES(TX_CONFIG_PARITY) \
692 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
700 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
701 | SEES(TX_CREDIT_RETURN_PARITY))
704 * TXE Send error flags
706 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
707 static struct flag_table send_err_status_flags[] = {
708 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
709 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
710 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
714 * TXE Send Context Error flags and consequences
716 static struct flag_table sc_err_status_flags[] = {
717 /* 0*/ FLAG_ENTRY("InconsistentSop",
718 SEC_PACKET_DROPPED | SEC_SC_HALTED,
719 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
720 /* 1*/ FLAG_ENTRY("DisallowedPacket",
721 SEC_PACKET_DROPPED | SEC_SC_HALTED,
722 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
723 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
724 SEC_WRITE_DROPPED | SEC_SC_HALTED,
725 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
726 /* 3*/ FLAG_ENTRY("WriteOverflow",
727 SEC_WRITE_DROPPED | SEC_SC_HALTED,
728 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
729 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
730 SEC_WRITE_DROPPED | SEC_SC_HALTED,
731 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
736 * RXE Receive Error flags
738 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
739 static struct flag_table rxe_err_status_flags[] = {
740 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
741 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
742 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
743 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
744 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
745 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
746 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
747 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
748 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
749 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
750 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
751 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
752 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
753 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
754 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
755 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
756 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
757 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
758 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
759 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
760 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
761 RXES(RBUF_BLOCK_LIST_READ_UNC)),
762 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
763 RXES(RBUF_BLOCK_LIST_READ_COR)),
764 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
765 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
766 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
767 RXES(RBUF_CSR_QENT_CNT_PARITY)),
768 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
769 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
770 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
771 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
772 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
773 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
774 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
775 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
776 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
777 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
778 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
779 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
780 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
781 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
782 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
783 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
784 RXES(RBUF_FL_INITDONE_PARITY)),
785 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
786 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
787 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
788 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
789 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
790 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
791 RXES(LOOKUP_DES_PART1_UNC_COR)),
792 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
793 RXES(LOOKUP_DES_PART2_PARITY)),
794 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
795 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
796 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
797 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
798 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
799 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
800 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
801 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
802 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
803 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
804 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
805 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
806 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
807 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
808 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
809 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
810 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
811 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
812 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
813 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
814 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
815 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
818 /* RXE errors that will trigger an SPC freeze */
819 #define ALL_RXE_FREEZE_ERR \
820 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
863 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
865 #define RXE_FREEZE_ABORT_MASK \
866 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
868 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
873 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
874 static struct flag_table dcc_err_flags[] = {
875 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
876 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
877 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
878 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
879 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
880 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
881 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
882 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
883 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
884 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
885 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
886 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
887 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
888 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
889 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
890 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
891 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
892 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
893 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
894 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
895 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
896 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
897 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
898 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
899 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
900 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
901 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
902 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
903 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
904 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
905 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
906 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
907 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
908 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
909 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
910 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
911 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
912 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
913 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
914 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
915 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
916 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
917 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
918 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
919 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
920 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
926 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
927 static struct flag_table lcb_err_flags[] = {
928 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
929 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
930 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
931 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
932 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
933 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
934 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
935 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
936 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
937 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
938 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
939 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
940 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
941 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
942 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
943 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
944 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
945 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
946 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
947 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
948 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
949 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
950 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
951 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
952 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
953 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
954 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
955 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
956 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
957 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
958 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
959 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
960 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
961 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
962 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
963 LCBE(REDUNDANT_FLIT_PARITY_ERR))
969 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
970 static struct flag_table dc8051_err_flags[] = {
971 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
972 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
973 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
974 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
975 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
976 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
977 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
978 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
979 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
980 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
981 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
985 * DC8051 Information Error flags
987 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
989 static struct flag_table dc8051_info_err_flags[] = {
990 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
991 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
992 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
993 FLAG_ENTRY0("Serdes internal loopback failure",
994 FAILED_SERDES_INTERNAL_LOOPBACK),
995 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
996 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
997 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
998 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
999 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1001 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1002 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
1003 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1004 FLAG_ENTRY0("External Device Request Timeout",
1005 EXTERNAL_DEVICE_REQ_TIMEOUT),
1009 * DC8051 Information Host Information flags
1011 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1013 static struct flag_table dc8051_info_host_msg_flags[] = {
1014 FLAG_ENTRY0("Host request done", 0x0001),
1015 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1016 FLAG_ENTRY0("BC SMA message", 0x0004),
1017 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1018 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1019 FLAG_ENTRY0("External device config request", 0x0020),
1020 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1021 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1022 FLAG_ENTRY0("Link going down", 0x0100),
1023 FLAG_ENTRY0("Link width downgraded", 0x0200),
1026 static u32 encoded_size(u32 size);
1027 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1028 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1029 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1031 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1032 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1033 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1034 u8 *remote_tx_rate, u16 *link_widths);
1035 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
1036 u8 *flag_bits, u16 *link_widths);
1037 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1039 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1040 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1041 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1042 u8 *tx_polarity_inversion,
1043 u8 *rx_polarity_inversion, u8 *max_rate);
1044 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1045 unsigned int context, u64 err_status);
1046 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1047 static void handle_dcc_err(struct hfi1_devdata *dd,
1048 unsigned int context, u64 err_status);
1049 static void handle_lcb_err(struct hfi1_devdata *dd,
1050 unsigned int context, u64 err_status);
1051 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1058 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1059 static void set_partition_keys(struct hfi1_pportdata *ppd);
1060 static const char *link_state_name(u32 state);
1061 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1063 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1065 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066 static int thermal_init(struct hfi1_devdata *dd);
1068 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1071 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1073 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1074 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1075 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1077 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1079 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1080 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1081 static void handle_temp_err(struct hfi1_devdata *dd);
1082 static void dc_shutdown(struct hfi1_devdata *dd);
1083 static void dc_start(struct hfi1_devdata *dd);
1084 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1086 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1087 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1088 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1091 * Error interrupt table entry. This is used as input to the interrupt
1092 * "clear down" routine used for all second tier error interrupt register.
1093 * Second tier interrupt registers have a single bit representing them
1094 * in the top-level CceIntStatus.
1096 struct err_reg_info {
1097 u32 status; /* status CSR offset */
1098 u32 clear; /* clear CSR offset */
1099 u32 mask; /* mask CSR offset */
1100 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1104 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1105 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1106 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1109 * Helpers for building HFI and DC error interrupt table entries. Different
1110 * helpers are needed because of inconsistent register names.
1112 #define EE(reg, handler, desc) \
1113 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1115 #define DC_EE1(reg, handler, desc) \
1116 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1117 #define DC_EE2(reg, handler, desc) \
1118 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1121 * Table of the "misc" grouping of error interrupts. Each entry refers to
1122 * another register containing more information.
1124 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1125 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1126 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1127 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1128 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1129 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1130 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1131 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1132 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1133 /* the rest are reserved */
1137 * Index into the Various section of the interrupt sources
1138 * corresponding to the Critical Temperature interrupt.
1140 #define TCRIT_INT_SOURCE 4
1143 * SDMA error interrupt entry - refers to another register containing more
1146 static const struct err_reg_info sdma_eng_err =
1147 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1149 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1150 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1151 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1152 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1153 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1154 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1155 /* rest are reserved */
1159 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1160 * register can not be derived from the MTU value because 10K is not
1161 * a power of 2. Therefore, we need a constant. Everything else can
1164 #define DCC_CFG_PORT_MTU_CAP_10240 7
1167 * Table of the DC grouping of error interrupts. Each entry refers to
1168 * another register containing more information.
1170 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1171 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1172 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1173 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1174 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1175 /* the rest are reserved */
1185 * csr to read for name (if applicable)
1190 * offset into dd or ppd to store the counter's value
1200 * accessor for stat element, context either dd or ppd
1202 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1203 int mode, u64 data);
1206 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1207 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1209 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1219 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1221 (counter * 8 + RCV_COUNTER_ARRAY32), \
1222 0, flags | CNTR_32BIT, \
1223 port_access_u32_csr)
1225 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1227 (counter * 8 + RCV_COUNTER_ARRAY32), \
1228 0, flags | CNTR_32BIT, \
1232 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1234 (counter * 8 + RCV_COUNTER_ARRAY64), \
1236 port_access_u64_csr)
1238 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1240 (counter * 8 + RCV_COUNTER_ARRAY64), \
1244 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1245 #define OVR_ELM(ctx) \
1246 CNTR_ELEM("RcvHdrOvr" #ctx, \
1247 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1248 0, CNTR_NORMAL, port_access_u64_csr)
1251 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1253 (counter * 8 + SEND_COUNTER_ARRAY32), \
1254 0, flags | CNTR_32BIT, \
1255 port_access_u32_csr)
1258 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1260 (counter * 8 + SEND_COUNTER_ARRAY64), \
1262 port_access_u64_csr)
1264 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1266 counter * 8 + SEND_COUNTER_ARRAY64, \
1272 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1274 (counter * 8 + CCE_COUNTER_ARRAY32), \
1275 0, flags | CNTR_32BIT, \
1278 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1280 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1281 0, flags | CNTR_32BIT, \
1285 #define DC_PERF_CNTR(name, counter, flags) \
1292 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1300 #define SW_IBP_CNTR(name, cntr) \
1308 * hfi_addr_from_offset - return addr for readq/writeq
1309 * @dd - the dd device
1310 * @offset - the offset of the CSR within bar0
1312 * This routine selects the appropriate base address
1313 * based on the indicated offset.
1315 static inline void __iomem *hfi1_addr_from_offset(
1316 const struct hfi1_devdata *dd,
1319 if (offset >= dd->base2_start)
1320 return dd->kregbase2 + (offset - dd->base2_start);
1321 return dd->kregbase1 + offset;
1325 * read_csr - read CSR at the indicated offset
1326 * @dd - the dd device
1327 * @offset - the offset of the CSR within bar0
1329 * Return: the value read or all FF's if there
1332 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1334 if (dd->flags & HFI1_PRESENT)
1335 return readq(hfi1_addr_from_offset(dd, offset));
1340 * write_csr - write CSR at the indicated offset
1341 * @dd - the dd device
1342 * @offset - the offset of the CSR within bar0
1343 * @value - value to write
1345 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1347 if (dd->flags & HFI1_PRESENT) {
1348 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1350 /* avoid write to RcvArray */
1351 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1353 writeq(value, base);
1358 * get_csr_addr - return te iomem address for offset
1359 * @dd - the dd device
1360 * @offset - the offset of the CSR within bar0
1362 * Return: The iomem address to use in subsequent
1363 * writeq/readq operations.
1365 void __iomem *get_csr_addr(
1366 const struct hfi1_devdata *dd,
1369 if (dd->flags & HFI1_PRESENT)
1370 return hfi1_addr_from_offset(dd, offset);
1374 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1375 int mode, u64 value)
1379 if (mode == CNTR_MODE_R) {
1380 ret = read_csr(dd, csr);
1381 } else if (mode == CNTR_MODE_W) {
1382 write_csr(dd, csr, value);
1385 dd_dev_err(dd, "Invalid cntr register access mode");
1389 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1394 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1395 void *context, int vl, int mode, u64 data)
1397 struct hfi1_devdata *dd = context;
1398 u64 csr = entry->csr;
1400 if (entry->flags & CNTR_SDMA) {
1401 if (vl == CNTR_INVALID_VL)
1405 if (vl != CNTR_INVALID_VL)
1408 return read_write_csr(dd, csr, mode, data);
1411 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1412 void *context, int idx, int mode, u64 data)
1414 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1416 if (dd->per_sdma && idx < dd->num_sdma)
1417 return dd->per_sdma[idx].err_cnt;
1421 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1422 void *context, int idx, int mode, u64 data)
1424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1426 if (dd->per_sdma && idx < dd->num_sdma)
1427 return dd->per_sdma[idx].sdma_int_cnt;
1431 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1432 void *context, int idx, int mode, u64 data)
1434 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1436 if (dd->per_sdma && idx < dd->num_sdma)
1437 return dd->per_sdma[idx].idle_int_cnt;
1441 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1442 void *context, int idx, int mode,
1445 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1447 if (dd->per_sdma && idx < dd->num_sdma)
1448 return dd->per_sdma[idx].progress_int_cnt;
1452 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1453 int vl, int mode, u64 data)
1455 struct hfi1_devdata *dd = context;
1458 u64 csr = entry->csr;
1460 if (entry->flags & CNTR_VL) {
1461 if (vl == CNTR_INVALID_VL)
1465 if (vl != CNTR_INVALID_VL)
1469 val = read_write_csr(dd, csr, mode, data);
1473 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1474 int vl, int mode, u64 data)
1476 struct hfi1_devdata *dd = context;
1477 u32 csr = entry->csr;
1480 if (vl != CNTR_INVALID_VL)
1482 if (mode == CNTR_MODE_R)
1483 ret = read_lcb_csr(dd, csr, &data);
1484 else if (mode == CNTR_MODE_W)
1485 ret = write_lcb_csr(dd, csr, data);
1488 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1492 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1497 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1498 int vl, int mode, u64 data)
1500 struct hfi1_pportdata *ppd = context;
1502 if (vl != CNTR_INVALID_VL)
1504 return read_write_csr(ppd->dd, entry->csr, mode, data);
1507 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1508 void *context, int vl, int mode, u64 data)
1510 struct hfi1_pportdata *ppd = context;
1512 u64 csr = entry->csr;
1514 if (entry->flags & CNTR_VL) {
1515 if (vl == CNTR_INVALID_VL)
1519 if (vl != CNTR_INVALID_VL)
1522 val = read_write_csr(ppd->dd, csr, mode, data);
1526 /* Software defined */
1527 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1532 if (mode == CNTR_MODE_R) {
1534 } else if (mode == CNTR_MODE_W) {
1538 dd_dev_err(dd, "Invalid cntr sw access mode");
1542 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1547 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1548 int vl, int mode, u64 data)
1550 struct hfi1_pportdata *ppd = context;
1552 if (vl != CNTR_INVALID_VL)
1554 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1557 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1558 int vl, int mode, u64 data)
1560 struct hfi1_pportdata *ppd = context;
1562 if (vl != CNTR_INVALID_VL)
1564 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1567 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1568 void *context, int vl, int mode,
1571 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1573 if (vl != CNTR_INVALID_VL)
1575 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1578 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1579 void *context, int vl, int mode, u64 data)
1581 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1585 if (vl == CNTR_INVALID_VL)
1586 counter = &ppd->port_xmit_discards;
1587 else if (vl >= 0 && vl < C_VL_COUNT)
1588 counter = &ppd->port_xmit_discards_vl[vl];
1592 return read_write_sw(ppd->dd, counter, mode, data);
1595 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1596 void *context, int vl, int mode,
1599 struct hfi1_pportdata *ppd = context;
1601 if (vl != CNTR_INVALID_VL)
1604 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1608 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1609 void *context, int vl, int mode, u64 data)
1611 struct hfi1_pportdata *ppd = context;
1613 if (vl != CNTR_INVALID_VL)
1616 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1620 u64 get_all_cpu_total(u64 __percpu *cntr)
1625 for_each_possible_cpu(cpu)
1626 counter += *per_cpu_ptr(cntr, cpu);
1630 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1632 int vl, int mode, u64 data)
1636 if (vl != CNTR_INVALID_VL)
1639 if (mode == CNTR_MODE_R) {
1640 ret = get_all_cpu_total(cntr) - *z_val;
1641 } else if (mode == CNTR_MODE_W) {
1642 /* A write can only zero the counter */
1644 *z_val = get_all_cpu_total(cntr);
1646 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1648 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1655 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1656 void *context, int vl, int mode, u64 data)
1658 struct hfi1_devdata *dd = context;
1660 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1664 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1665 void *context, int vl, int mode, u64 data)
1667 struct hfi1_devdata *dd = context;
1669 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1673 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1674 void *context, int vl, int mode, u64 data)
1676 struct hfi1_devdata *dd = context;
1678 return dd->verbs_dev.n_piowait;
1681 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1682 void *context, int vl, int mode, u64 data)
1684 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1686 return dd->verbs_dev.n_piodrain;
1689 static u64 access_sw_ctx0_seq_drop(const struct cntr_entry *entry,
1690 void *context, int vl, int mode, u64 data)
1692 struct hfi1_devdata *dd = context;
1694 return dd->ctx0_seq_drop;
1697 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1698 void *context, int vl, int mode, u64 data)
1700 struct hfi1_devdata *dd = context;
1702 return dd->verbs_dev.n_txwait;
1705 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1706 void *context, int vl, int mode, u64 data)
1708 struct hfi1_devdata *dd = context;
1710 return dd->verbs_dev.n_kmem_wait;
1713 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1714 void *context, int vl, int mode, u64 data)
1716 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1718 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1722 /* Software counters for the error status bits within MISC_ERR_STATUS */
1723 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1724 void *context, int vl, int mode,
1727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1729 return dd->misc_err_status_cnt[12];
1732 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1733 void *context, int vl, int mode,
1736 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1738 return dd->misc_err_status_cnt[11];
1741 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1742 void *context, int vl, int mode,
1745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1747 return dd->misc_err_status_cnt[10];
1750 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1751 void *context, int vl,
1754 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1756 return dd->misc_err_status_cnt[9];
1759 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1760 void *context, int vl, int mode,
1763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1765 return dd->misc_err_status_cnt[8];
1768 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1769 const struct cntr_entry *entry,
1770 void *context, int vl, int mode, u64 data)
1772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1774 return dd->misc_err_status_cnt[7];
1777 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1778 void *context, int vl,
1781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1783 return dd->misc_err_status_cnt[6];
1786 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1787 void *context, int vl, int mode,
1790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1792 return dd->misc_err_status_cnt[5];
1795 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1796 void *context, int vl, int mode,
1799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1801 return dd->misc_err_status_cnt[4];
1804 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1805 void *context, int vl,
1808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1810 return dd->misc_err_status_cnt[3];
1813 static u64 access_misc_csr_write_bad_addr_err_cnt(
1814 const struct cntr_entry *entry,
1815 void *context, int vl, int mode, u64 data)
1817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1819 return dd->misc_err_status_cnt[2];
1822 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1823 void *context, int vl,
1826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1828 return dd->misc_err_status_cnt[1];
1831 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1832 void *context, int vl, int mode,
1835 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1837 return dd->misc_err_status_cnt[0];
1841 * Software counter for the aggregate of
1842 * individual CceErrStatus counters
1844 static u64 access_sw_cce_err_status_aggregated_cnt(
1845 const struct cntr_entry *entry,
1846 void *context, int vl, int mode, u64 data)
1848 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1850 return dd->sw_cce_err_status_aggregate;
1854 * Software counters corresponding to each of the
1855 * error status bits within CceErrStatus
1857 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1858 void *context, int vl, int mode,
1861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1863 return dd->cce_err_status_cnt[40];
1866 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1867 void *context, int vl, int mode,
1870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1872 return dd->cce_err_status_cnt[39];
1875 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1876 void *context, int vl, int mode,
1879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1881 return dd->cce_err_status_cnt[38];
1884 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1885 void *context, int vl, int mode,
1888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1890 return dd->cce_err_status_cnt[37];
1893 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1894 void *context, int vl, int mode,
1897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1899 return dd->cce_err_status_cnt[36];
1902 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1903 const struct cntr_entry *entry,
1904 void *context, int vl, int mode, u64 data)
1906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1908 return dd->cce_err_status_cnt[35];
1911 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1912 const struct cntr_entry *entry,
1913 void *context, int vl, int mode, u64 data)
1915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1917 return dd->cce_err_status_cnt[34];
1920 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1921 void *context, int vl,
1924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1926 return dd->cce_err_status_cnt[33];
1929 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1930 void *context, int vl, int mode,
1933 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1935 return dd->cce_err_status_cnt[32];
1938 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1939 void *context, int vl, int mode, u64 data)
1941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1943 return dd->cce_err_status_cnt[31];
1946 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1947 void *context, int vl, int mode,
1950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1952 return dd->cce_err_status_cnt[30];
1955 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1956 void *context, int vl, int mode,
1959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1961 return dd->cce_err_status_cnt[29];
1964 static u64 access_pcic_transmit_back_parity_err_cnt(
1965 const struct cntr_entry *entry,
1966 void *context, int vl, int mode, u64 data)
1968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1970 return dd->cce_err_status_cnt[28];
1973 static u64 access_pcic_transmit_front_parity_err_cnt(
1974 const struct cntr_entry *entry,
1975 void *context, int vl, int mode, u64 data)
1977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1979 return dd->cce_err_status_cnt[27];
1982 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1983 void *context, int vl, int mode,
1986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1988 return dd->cce_err_status_cnt[26];
1991 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1992 void *context, int vl, int mode,
1995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1997 return dd->cce_err_status_cnt[25];
2000 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
2001 void *context, int vl, int mode,
2004 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2006 return dd->cce_err_status_cnt[24];
2009 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2010 void *context, int vl, int mode,
2013 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2015 return dd->cce_err_status_cnt[23];
2018 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2019 void *context, int vl,
2022 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2024 return dd->cce_err_status_cnt[22];
2027 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2028 void *context, int vl, int mode,
2031 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2033 return dd->cce_err_status_cnt[21];
2036 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2037 const struct cntr_entry *entry,
2038 void *context, int vl, int mode, u64 data)
2040 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2042 return dd->cce_err_status_cnt[20];
2045 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2046 void *context, int vl,
2049 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2051 return dd->cce_err_status_cnt[19];
2054 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2055 void *context, int vl, int mode,
2058 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2060 return dd->cce_err_status_cnt[18];
2063 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2064 void *context, int vl, int mode,
2067 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2069 return dd->cce_err_status_cnt[17];
2072 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2073 void *context, int vl, int mode,
2076 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2078 return dd->cce_err_status_cnt[16];
2081 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2082 void *context, int vl, int mode,
2085 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2087 return dd->cce_err_status_cnt[15];
2090 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2091 void *context, int vl,
2094 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2096 return dd->cce_err_status_cnt[14];
2099 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2100 void *context, int vl, int mode,
2103 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2105 return dd->cce_err_status_cnt[13];
2108 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2109 const struct cntr_entry *entry,
2110 void *context, int vl, int mode, u64 data)
2112 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2114 return dd->cce_err_status_cnt[12];
2117 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2118 const struct cntr_entry *entry,
2119 void *context, int vl, int mode, u64 data)
2121 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2123 return dd->cce_err_status_cnt[11];
2126 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2127 const struct cntr_entry *entry,
2128 void *context, int vl, int mode, u64 data)
2130 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2132 return dd->cce_err_status_cnt[10];
2135 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2136 const struct cntr_entry *entry,
2137 void *context, int vl, int mode, u64 data)
2139 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141 return dd->cce_err_status_cnt[9];
2144 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2145 const struct cntr_entry *entry,
2146 void *context, int vl, int mode, u64 data)
2148 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150 return dd->cce_err_status_cnt[8];
2153 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2154 void *context, int vl,
2157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159 return dd->cce_err_status_cnt[7];
2162 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2163 const struct cntr_entry *entry,
2164 void *context, int vl, int mode, u64 data)
2166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2168 return dd->cce_err_status_cnt[6];
2171 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2172 void *context, int vl, int mode,
2175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2177 return dd->cce_err_status_cnt[5];
2180 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2181 void *context, int vl, int mode,
2184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2186 return dd->cce_err_status_cnt[4];
2189 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2190 const struct cntr_entry *entry,
2191 void *context, int vl, int mode, u64 data)
2193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2195 return dd->cce_err_status_cnt[3];
2198 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2199 void *context, int vl,
2202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2204 return dd->cce_err_status_cnt[2];
2207 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2208 void *context, int vl,
2211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2213 return dd->cce_err_status_cnt[1];
2216 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2217 void *context, int vl, int mode,
2220 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2222 return dd->cce_err_status_cnt[0];
2226 * Software counters corresponding to each of the
2227 * error status bits within RcvErrStatus
2229 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2230 void *context, int vl, int mode,
2233 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2235 return dd->rcv_err_status_cnt[63];
2238 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2239 void *context, int vl,
2242 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2244 return dd->rcv_err_status_cnt[62];
2247 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2248 void *context, int vl, int mode,
2251 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2253 return dd->rcv_err_status_cnt[61];
2256 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2257 void *context, int vl, int mode,
2260 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2262 return dd->rcv_err_status_cnt[60];
2265 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2266 void *context, int vl,
2269 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2271 return dd->rcv_err_status_cnt[59];
2274 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2275 void *context, int vl,
2278 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2280 return dd->rcv_err_status_cnt[58];
2283 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2284 void *context, int vl, int mode,
2287 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2289 return dd->rcv_err_status_cnt[57];
2292 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2293 void *context, int vl, int mode,
2296 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2298 return dd->rcv_err_status_cnt[56];
2301 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2302 void *context, int vl, int mode,
2305 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2307 return dd->rcv_err_status_cnt[55];
2310 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2311 const struct cntr_entry *entry,
2312 void *context, int vl, int mode, u64 data)
2314 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2316 return dd->rcv_err_status_cnt[54];
2319 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2320 const struct cntr_entry *entry,
2321 void *context, int vl, int mode, u64 data)
2323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2325 return dd->rcv_err_status_cnt[53];
2328 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2329 void *context, int vl,
2332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2334 return dd->rcv_err_status_cnt[52];
2337 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2338 void *context, int vl,
2341 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2343 return dd->rcv_err_status_cnt[51];
2346 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2347 void *context, int vl,
2350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2352 return dd->rcv_err_status_cnt[50];
2355 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2356 void *context, int vl,
2359 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2361 return dd->rcv_err_status_cnt[49];
2364 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2365 void *context, int vl,
2368 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2370 return dd->rcv_err_status_cnt[48];
2373 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2374 void *context, int vl,
2377 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2379 return dd->rcv_err_status_cnt[47];
2382 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2383 void *context, int vl, int mode,
2386 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2388 return dd->rcv_err_status_cnt[46];
2391 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2392 const struct cntr_entry *entry,
2393 void *context, int vl, int mode, u64 data)
2395 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2397 return dd->rcv_err_status_cnt[45];
2400 static u64 access_rx_lookup_csr_parity_err_cnt(
2401 const struct cntr_entry *entry,
2402 void *context, int vl, int mode, u64 data)
2404 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2406 return dd->rcv_err_status_cnt[44];
2409 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2410 const struct cntr_entry *entry,
2411 void *context, int vl, int mode, u64 data)
2413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2415 return dd->rcv_err_status_cnt[43];
2418 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2419 const struct cntr_entry *entry,
2420 void *context, int vl, int mode, u64 data)
2422 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2424 return dd->rcv_err_status_cnt[42];
2427 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2428 const struct cntr_entry *entry,
2429 void *context, int vl, int mode, u64 data)
2431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2433 return dd->rcv_err_status_cnt[41];
2436 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2437 const struct cntr_entry *entry,
2438 void *context, int vl, int mode, u64 data)
2440 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2442 return dd->rcv_err_status_cnt[40];
2445 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2446 const struct cntr_entry *entry,
2447 void *context, int vl, int mode, u64 data)
2449 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2451 return dd->rcv_err_status_cnt[39];
2454 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2455 const struct cntr_entry *entry,
2456 void *context, int vl, int mode, u64 data)
2458 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2460 return dd->rcv_err_status_cnt[38];
2463 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2464 const struct cntr_entry *entry,
2465 void *context, int vl, int mode, u64 data)
2467 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2469 return dd->rcv_err_status_cnt[37];
2472 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2473 const struct cntr_entry *entry,
2474 void *context, int vl, int mode, u64 data)
2476 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2478 return dd->rcv_err_status_cnt[36];
2481 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2482 const struct cntr_entry *entry,
2483 void *context, int vl, int mode, u64 data)
2485 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2487 return dd->rcv_err_status_cnt[35];
2490 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2491 const struct cntr_entry *entry,
2492 void *context, int vl, int mode, u64 data)
2494 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2496 return dd->rcv_err_status_cnt[34];
2499 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2500 const struct cntr_entry *entry,
2501 void *context, int vl, int mode, u64 data)
2503 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2505 return dd->rcv_err_status_cnt[33];
2508 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2509 void *context, int vl, int mode,
2512 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2514 return dd->rcv_err_status_cnt[32];
2517 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2518 void *context, int vl, int mode,
2521 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2523 return dd->rcv_err_status_cnt[31];
2526 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2527 void *context, int vl, int mode,
2530 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2532 return dd->rcv_err_status_cnt[30];
2535 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2536 void *context, int vl, int mode,
2539 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2541 return dd->rcv_err_status_cnt[29];
2544 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2545 void *context, int vl,
2548 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2550 return dd->rcv_err_status_cnt[28];
2553 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2554 const struct cntr_entry *entry,
2555 void *context, int vl, int mode, u64 data)
2557 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2559 return dd->rcv_err_status_cnt[27];
2562 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2563 const struct cntr_entry *entry,
2564 void *context, int vl, int mode, u64 data)
2566 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2568 return dd->rcv_err_status_cnt[26];
2571 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2572 const struct cntr_entry *entry,
2573 void *context, int vl, int mode, u64 data)
2575 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2577 return dd->rcv_err_status_cnt[25];
2580 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2581 const struct cntr_entry *entry,
2582 void *context, int vl, int mode, u64 data)
2584 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2586 return dd->rcv_err_status_cnt[24];
2589 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2590 const struct cntr_entry *entry,
2591 void *context, int vl, int mode, u64 data)
2593 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2595 return dd->rcv_err_status_cnt[23];
2598 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2599 const struct cntr_entry *entry,
2600 void *context, int vl, int mode, u64 data)
2602 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2604 return dd->rcv_err_status_cnt[22];
2607 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2608 const struct cntr_entry *entry,
2609 void *context, int vl, int mode, u64 data)
2611 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2613 return dd->rcv_err_status_cnt[21];
2616 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2617 const struct cntr_entry *entry,
2618 void *context, int vl, int mode, u64 data)
2620 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2622 return dd->rcv_err_status_cnt[20];
2625 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2626 const struct cntr_entry *entry,
2627 void *context, int vl, int mode, u64 data)
2629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2631 return dd->rcv_err_status_cnt[19];
2634 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2635 void *context, int vl,
2638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2640 return dd->rcv_err_status_cnt[18];
2643 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2644 void *context, int vl,
2647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2649 return dd->rcv_err_status_cnt[17];
2652 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2653 const struct cntr_entry *entry,
2654 void *context, int vl, int mode, u64 data)
2656 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2658 return dd->rcv_err_status_cnt[16];
2661 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2662 const struct cntr_entry *entry,
2663 void *context, int vl, int mode, u64 data)
2665 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2667 return dd->rcv_err_status_cnt[15];
2670 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2671 void *context, int vl,
2674 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2676 return dd->rcv_err_status_cnt[14];
2679 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2680 void *context, int vl,
2683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2685 return dd->rcv_err_status_cnt[13];
2688 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2689 void *context, int vl, int mode,
2692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2694 return dd->rcv_err_status_cnt[12];
2697 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2698 void *context, int vl, int mode,
2701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2703 return dd->rcv_err_status_cnt[11];
2706 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2707 void *context, int vl, int mode,
2710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2712 return dd->rcv_err_status_cnt[10];
2715 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2716 void *context, int vl, int mode,
2719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721 return dd->rcv_err_status_cnt[9];
2724 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2725 void *context, int vl, int mode,
2728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730 return dd->rcv_err_status_cnt[8];
2733 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2734 const struct cntr_entry *entry,
2735 void *context, int vl, int mode, u64 data)
2737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739 return dd->rcv_err_status_cnt[7];
2742 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2743 const struct cntr_entry *entry,
2744 void *context, int vl, int mode, u64 data)
2746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2748 return dd->rcv_err_status_cnt[6];
2751 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2752 void *context, int vl, int mode,
2755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2757 return dd->rcv_err_status_cnt[5];
2760 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2761 void *context, int vl, int mode,
2764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2766 return dd->rcv_err_status_cnt[4];
2769 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2770 void *context, int vl, int mode,
2773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2775 return dd->rcv_err_status_cnt[3];
2778 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2779 void *context, int vl, int mode,
2782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2784 return dd->rcv_err_status_cnt[2];
2787 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2788 void *context, int vl, int mode,
2791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2793 return dd->rcv_err_status_cnt[1];
2796 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2797 void *context, int vl, int mode,
2800 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2802 return dd->rcv_err_status_cnt[0];
2806 * Software counters corresponding to each of the
2807 * error status bits within SendPioErrStatus
2809 static u64 access_pio_pec_sop_head_parity_err_cnt(
2810 const struct cntr_entry *entry,
2811 void *context, int vl, int mode, u64 data)
2813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2815 return dd->send_pio_err_status_cnt[35];
2818 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2819 const struct cntr_entry *entry,
2820 void *context, int vl, int mode, u64 data)
2822 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2824 return dd->send_pio_err_status_cnt[34];
2827 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2828 const struct cntr_entry *entry,
2829 void *context, int vl, int mode, u64 data)
2831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2833 return dd->send_pio_err_status_cnt[33];
2836 static u64 access_pio_current_free_cnt_parity_err_cnt(
2837 const struct cntr_entry *entry,
2838 void *context, int vl, int mode, u64 data)
2840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2842 return dd->send_pio_err_status_cnt[32];
2845 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2846 void *context, int vl, int mode,
2849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2851 return dd->send_pio_err_status_cnt[31];
2854 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2855 void *context, int vl, int mode,
2858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2860 return dd->send_pio_err_status_cnt[30];
2863 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2864 void *context, int vl, int mode,
2867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2869 return dd->send_pio_err_status_cnt[29];
2872 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2873 const struct cntr_entry *entry,
2874 void *context, int vl, int mode, u64 data)
2876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2878 return dd->send_pio_err_status_cnt[28];
2881 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2882 void *context, int vl, int mode,
2885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2887 return dd->send_pio_err_status_cnt[27];
2890 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2891 void *context, int vl, int mode,
2894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2896 return dd->send_pio_err_status_cnt[26];
2899 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2900 void *context, int vl,
2903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2905 return dd->send_pio_err_status_cnt[25];
2908 static u64 access_pio_block_qw_count_parity_err_cnt(
2909 const struct cntr_entry *entry,
2910 void *context, int vl, int mode, u64 data)
2912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2914 return dd->send_pio_err_status_cnt[24];
2917 static u64 access_pio_write_qw_valid_parity_err_cnt(
2918 const struct cntr_entry *entry,
2919 void *context, int vl, int mode, u64 data)
2921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2923 return dd->send_pio_err_status_cnt[23];
2926 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2927 void *context, int vl, int mode,
2930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2932 return dd->send_pio_err_status_cnt[22];
2935 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2936 void *context, int vl,
2939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2941 return dd->send_pio_err_status_cnt[21];
2944 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2945 void *context, int vl,
2948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2950 return dd->send_pio_err_status_cnt[20];
2953 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2954 void *context, int vl,
2957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2959 return dd->send_pio_err_status_cnt[19];
2962 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2963 const struct cntr_entry *entry,
2964 void *context, int vl, int mode, u64 data)
2966 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2968 return dd->send_pio_err_status_cnt[18];
2971 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2972 void *context, int vl, int mode,
2975 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2977 return dd->send_pio_err_status_cnt[17];
2980 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2981 void *context, int vl, int mode,
2984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2986 return dd->send_pio_err_status_cnt[16];
2989 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2990 const struct cntr_entry *entry,
2991 void *context, int vl, int mode, u64 data)
2993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2995 return dd->send_pio_err_status_cnt[15];
2998 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2999 const struct cntr_entry *entry,
3000 void *context, int vl, int mode, u64 data)
3002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3004 return dd->send_pio_err_status_cnt[14];
3007 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
3008 const struct cntr_entry *entry,
3009 void *context, int vl, int mode, u64 data)
3011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3013 return dd->send_pio_err_status_cnt[13];
3016 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3017 const struct cntr_entry *entry,
3018 void *context, int vl, int mode, u64 data)
3020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3022 return dd->send_pio_err_status_cnt[12];
3025 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3026 const struct cntr_entry *entry,
3027 void *context, int vl, int mode, u64 data)
3029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3031 return dd->send_pio_err_status_cnt[11];
3034 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3035 const struct cntr_entry *entry,
3036 void *context, int vl, int mode, u64 data)
3038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040 return dd->send_pio_err_status_cnt[10];
3043 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3044 const struct cntr_entry *entry,
3045 void *context, int vl, int mode, u64 data)
3047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049 return dd->send_pio_err_status_cnt[9];
3052 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3053 const struct cntr_entry *entry,
3054 void *context, int vl, int mode, u64 data)
3056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3058 return dd->send_pio_err_status_cnt[8];
3061 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3062 const struct cntr_entry *entry,
3063 void *context, int vl, int mode, u64 data)
3065 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3067 return dd->send_pio_err_status_cnt[7];
3070 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3071 void *context, int vl, int mode,
3074 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3076 return dd->send_pio_err_status_cnt[6];
3079 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3080 void *context, int vl, int mode,
3083 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3085 return dd->send_pio_err_status_cnt[5];
3088 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3089 void *context, int vl, int mode,
3092 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3094 return dd->send_pio_err_status_cnt[4];
3097 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3098 void *context, int vl, int mode,
3101 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3103 return dd->send_pio_err_status_cnt[3];
3106 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3107 void *context, int vl, int mode,
3110 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3112 return dd->send_pio_err_status_cnt[2];
3115 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3116 void *context, int vl,
3119 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3121 return dd->send_pio_err_status_cnt[1];
3124 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3125 void *context, int vl, int mode,
3128 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3130 return dd->send_pio_err_status_cnt[0];
3134 * Software counters corresponding to each of the
3135 * error status bits within SendDmaErrStatus
3137 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3138 const struct cntr_entry *entry,
3139 void *context, int vl, int mode, u64 data)
3141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3143 return dd->send_dma_err_status_cnt[3];
3146 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3147 const struct cntr_entry *entry,
3148 void *context, int vl, int mode, u64 data)
3150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3152 return dd->send_dma_err_status_cnt[2];
3155 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3156 void *context, int vl, int mode,
3159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3161 return dd->send_dma_err_status_cnt[1];
3164 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3165 void *context, int vl, int mode,
3168 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3170 return dd->send_dma_err_status_cnt[0];
3174 * Software counters corresponding to each of the
3175 * error status bits within SendEgressErrStatus
3177 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3178 const struct cntr_entry *entry,
3179 void *context, int vl, int mode, u64 data)
3181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3183 return dd->send_egress_err_status_cnt[63];
3186 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3187 const struct cntr_entry *entry,
3188 void *context, int vl, int mode, u64 data)
3190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3192 return dd->send_egress_err_status_cnt[62];
3195 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3196 void *context, int vl, int mode,
3199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3201 return dd->send_egress_err_status_cnt[61];
3204 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3205 void *context, int vl,
3208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3210 return dd->send_egress_err_status_cnt[60];
3213 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3214 const struct cntr_entry *entry,
3215 void *context, int vl, int mode, u64 data)
3217 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3219 return dd->send_egress_err_status_cnt[59];
3222 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3223 void *context, int vl, int mode,
3226 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3228 return dd->send_egress_err_status_cnt[58];
3231 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3232 void *context, int vl, int mode,
3235 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3237 return dd->send_egress_err_status_cnt[57];
3240 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3241 void *context, int vl, int mode,
3244 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3246 return dd->send_egress_err_status_cnt[56];
3249 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3250 void *context, int vl, int mode,
3253 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3255 return dd->send_egress_err_status_cnt[55];
3258 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3259 void *context, int vl, int mode,
3262 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3264 return dd->send_egress_err_status_cnt[54];
3267 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3268 void *context, int vl, int mode,
3271 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3273 return dd->send_egress_err_status_cnt[53];
3276 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3277 void *context, int vl, int mode,
3280 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3282 return dd->send_egress_err_status_cnt[52];
3285 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3286 void *context, int vl, int mode,
3289 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3291 return dd->send_egress_err_status_cnt[51];
3294 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3295 void *context, int vl, int mode,
3298 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3300 return dd->send_egress_err_status_cnt[50];
3303 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3304 void *context, int vl, int mode,
3307 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3309 return dd->send_egress_err_status_cnt[49];
3312 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3313 void *context, int vl, int mode,
3316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3318 return dd->send_egress_err_status_cnt[48];
3321 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3322 void *context, int vl, int mode,
3325 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3327 return dd->send_egress_err_status_cnt[47];
3330 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3331 void *context, int vl, int mode,
3334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3336 return dd->send_egress_err_status_cnt[46];
3339 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3340 void *context, int vl, int mode,
3343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3345 return dd->send_egress_err_status_cnt[45];
3348 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3349 void *context, int vl,
3352 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3354 return dd->send_egress_err_status_cnt[44];
3357 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3358 const struct cntr_entry *entry,
3359 void *context, int vl, int mode, u64 data)
3361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3363 return dd->send_egress_err_status_cnt[43];
3366 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3367 void *context, int vl, int mode,
3370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3372 return dd->send_egress_err_status_cnt[42];
3375 static u64 access_tx_credit_return_partiy_err_cnt(
3376 const struct cntr_entry *entry,
3377 void *context, int vl, int mode, u64 data)
3379 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3381 return dd->send_egress_err_status_cnt[41];
3384 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3385 const struct cntr_entry *entry,
3386 void *context, int vl, int mode, u64 data)
3388 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3390 return dd->send_egress_err_status_cnt[40];
3393 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3394 const struct cntr_entry *entry,
3395 void *context, int vl, int mode, u64 data)
3397 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3399 return dd->send_egress_err_status_cnt[39];
3402 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3403 const struct cntr_entry *entry,
3404 void *context, int vl, int mode, u64 data)
3406 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3408 return dd->send_egress_err_status_cnt[38];
3411 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3412 const struct cntr_entry *entry,
3413 void *context, int vl, int mode, u64 data)
3415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3417 return dd->send_egress_err_status_cnt[37];
3420 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3421 const struct cntr_entry *entry,
3422 void *context, int vl, int mode, u64 data)
3424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3426 return dd->send_egress_err_status_cnt[36];
3429 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3430 const struct cntr_entry *entry,
3431 void *context, int vl, int mode, u64 data)
3433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3435 return dd->send_egress_err_status_cnt[35];
3438 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3439 const struct cntr_entry *entry,
3440 void *context, int vl, int mode, u64 data)
3442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3444 return dd->send_egress_err_status_cnt[34];
3447 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3448 const struct cntr_entry *entry,
3449 void *context, int vl, int mode, u64 data)
3451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3453 return dd->send_egress_err_status_cnt[33];
3456 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3457 const struct cntr_entry *entry,
3458 void *context, int vl, int mode, u64 data)
3460 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3462 return dd->send_egress_err_status_cnt[32];
3465 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3466 const struct cntr_entry *entry,
3467 void *context, int vl, int mode, u64 data)
3469 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3471 return dd->send_egress_err_status_cnt[31];
3474 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3475 const struct cntr_entry *entry,
3476 void *context, int vl, int mode, u64 data)
3478 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3480 return dd->send_egress_err_status_cnt[30];
3483 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3484 const struct cntr_entry *entry,
3485 void *context, int vl, int mode, u64 data)
3487 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3489 return dd->send_egress_err_status_cnt[29];
3492 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3493 const struct cntr_entry *entry,
3494 void *context, int vl, int mode, u64 data)
3496 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3498 return dd->send_egress_err_status_cnt[28];
3501 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3502 const struct cntr_entry *entry,
3503 void *context, int vl, int mode, u64 data)
3505 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3507 return dd->send_egress_err_status_cnt[27];
3510 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3511 const struct cntr_entry *entry,
3512 void *context, int vl, int mode, u64 data)
3514 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3516 return dd->send_egress_err_status_cnt[26];
3519 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3520 const struct cntr_entry *entry,
3521 void *context, int vl, int mode, u64 data)
3523 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3525 return dd->send_egress_err_status_cnt[25];
3528 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3529 const struct cntr_entry *entry,
3530 void *context, int vl, int mode, u64 data)
3532 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3534 return dd->send_egress_err_status_cnt[24];
3537 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3538 const struct cntr_entry *entry,
3539 void *context, int vl, int mode, u64 data)
3541 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3543 return dd->send_egress_err_status_cnt[23];
3546 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3547 const struct cntr_entry *entry,
3548 void *context, int vl, int mode, u64 data)
3550 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3552 return dd->send_egress_err_status_cnt[22];
3555 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3556 const struct cntr_entry *entry,
3557 void *context, int vl, int mode, u64 data)
3559 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3561 return dd->send_egress_err_status_cnt[21];
3564 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3565 const struct cntr_entry *entry,
3566 void *context, int vl, int mode, u64 data)
3568 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3570 return dd->send_egress_err_status_cnt[20];
3573 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3574 const struct cntr_entry *entry,
3575 void *context, int vl, int mode, u64 data)
3577 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3579 return dd->send_egress_err_status_cnt[19];
3582 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3583 const struct cntr_entry *entry,
3584 void *context, int vl, int mode, u64 data)
3586 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3588 return dd->send_egress_err_status_cnt[18];
3591 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3592 const struct cntr_entry *entry,
3593 void *context, int vl, int mode, u64 data)
3595 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3597 return dd->send_egress_err_status_cnt[17];
3600 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3601 const struct cntr_entry *entry,
3602 void *context, int vl, int mode, u64 data)
3604 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3606 return dd->send_egress_err_status_cnt[16];
3609 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3610 void *context, int vl, int mode,
3613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3615 return dd->send_egress_err_status_cnt[15];
3618 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3619 void *context, int vl,
3622 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3624 return dd->send_egress_err_status_cnt[14];
3627 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3628 void *context, int vl, int mode,
3631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3633 return dd->send_egress_err_status_cnt[13];
3636 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3637 void *context, int vl, int mode,
3640 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3642 return dd->send_egress_err_status_cnt[12];
3645 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3646 const struct cntr_entry *entry,
3647 void *context, int vl, int mode, u64 data)
3649 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3651 return dd->send_egress_err_status_cnt[11];
3654 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3655 void *context, int vl, int mode,
3658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660 return dd->send_egress_err_status_cnt[10];
3663 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3664 void *context, int vl, int mode,
3667 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3669 return dd->send_egress_err_status_cnt[9];
3672 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3673 const struct cntr_entry *entry,
3674 void *context, int vl, int mode, u64 data)
3676 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3678 return dd->send_egress_err_status_cnt[8];
3681 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3682 const struct cntr_entry *entry,
3683 void *context, int vl, int mode, u64 data)
3685 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3687 return dd->send_egress_err_status_cnt[7];
3690 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3691 void *context, int vl, int mode,
3694 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3696 return dd->send_egress_err_status_cnt[6];
3699 static u64 access_tx_incorrect_link_state_err_cnt(
3700 const struct cntr_entry *entry,
3701 void *context, int vl, int mode, u64 data)
3703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3705 return dd->send_egress_err_status_cnt[5];
3708 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3709 void *context, int vl, int mode,
3712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3714 return dd->send_egress_err_status_cnt[4];
3717 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3718 const struct cntr_entry *entry,
3719 void *context, int vl, int mode, u64 data)
3721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3723 return dd->send_egress_err_status_cnt[3];
3726 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3727 void *context, int vl, int mode,
3730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3732 return dd->send_egress_err_status_cnt[2];
3735 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3736 const struct cntr_entry *entry,
3737 void *context, int vl, int mode, u64 data)
3739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3741 return dd->send_egress_err_status_cnt[1];
3744 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3745 const struct cntr_entry *entry,
3746 void *context, int vl, int mode, u64 data)
3748 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3750 return dd->send_egress_err_status_cnt[0];
3754 * Software counters corresponding to each of the
3755 * error status bits within SendErrStatus
3757 static u64 access_send_csr_write_bad_addr_err_cnt(
3758 const struct cntr_entry *entry,
3759 void *context, int vl, int mode, u64 data)
3761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3763 return dd->send_err_status_cnt[2];
3766 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3767 void *context, int vl,
3770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3772 return dd->send_err_status_cnt[1];
3775 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3776 void *context, int vl, int mode,
3779 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3781 return dd->send_err_status_cnt[0];
3785 * Software counters corresponding to each of the
3786 * error status bits within SendCtxtErrStatus
3788 static u64 access_pio_write_out_of_bounds_err_cnt(
3789 const struct cntr_entry *entry,
3790 void *context, int vl, int mode, u64 data)
3792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3794 return dd->sw_ctxt_err_status_cnt[4];
3797 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3798 void *context, int vl, int mode,
3801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3803 return dd->sw_ctxt_err_status_cnt[3];
3806 static u64 access_pio_write_crosses_boundary_err_cnt(
3807 const struct cntr_entry *entry,
3808 void *context, int vl, int mode, u64 data)
3810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3812 return dd->sw_ctxt_err_status_cnt[2];
3815 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3816 void *context, int vl,
3819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3821 return dd->sw_ctxt_err_status_cnt[1];
3824 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3825 void *context, int vl, int mode,
3828 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3830 return dd->sw_ctxt_err_status_cnt[0];
3834 * Software counters corresponding to each of the
3835 * error status bits within SendDmaEngErrStatus
3837 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3838 const struct cntr_entry *entry,
3839 void *context, int vl, int mode, u64 data)
3841 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3843 return dd->sw_send_dma_eng_err_status_cnt[23];
3846 static u64 access_sdma_header_storage_cor_err_cnt(
3847 const struct cntr_entry *entry,
3848 void *context, int vl, int mode, u64 data)
3850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3852 return dd->sw_send_dma_eng_err_status_cnt[22];
3855 static u64 access_sdma_packet_tracking_cor_err_cnt(
3856 const struct cntr_entry *entry,
3857 void *context, int vl, int mode, u64 data)
3859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3861 return dd->sw_send_dma_eng_err_status_cnt[21];
3864 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3865 void *context, int vl, int mode,
3868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3870 return dd->sw_send_dma_eng_err_status_cnt[20];
3873 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3874 void *context, int vl, int mode,
3877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3879 return dd->sw_send_dma_eng_err_status_cnt[19];
3882 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3883 const struct cntr_entry *entry,
3884 void *context, int vl, int mode, u64 data)
3886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3888 return dd->sw_send_dma_eng_err_status_cnt[18];
3891 static u64 access_sdma_header_storage_unc_err_cnt(
3892 const struct cntr_entry *entry,
3893 void *context, int vl, int mode, u64 data)
3895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3897 return dd->sw_send_dma_eng_err_status_cnt[17];
3900 static u64 access_sdma_packet_tracking_unc_err_cnt(
3901 const struct cntr_entry *entry,
3902 void *context, int vl, int mode, u64 data)
3904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3906 return dd->sw_send_dma_eng_err_status_cnt[16];
3909 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3910 void *context, int vl, int mode,
3913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3915 return dd->sw_send_dma_eng_err_status_cnt[15];
3918 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3919 void *context, int vl, int mode,
3922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3924 return dd->sw_send_dma_eng_err_status_cnt[14];
3927 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3928 void *context, int vl, int mode,
3931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3933 return dd->sw_send_dma_eng_err_status_cnt[13];
3936 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3937 void *context, int vl, int mode,
3940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3942 return dd->sw_send_dma_eng_err_status_cnt[12];
3945 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3946 void *context, int vl, int mode,
3949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3951 return dd->sw_send_dma_eng_err_status_cnt[11];
3954 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3955 void *context, int vl, int mode,
3958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3960 return dd->sw_send_dma_eng_err_status_cnt[10];
3963 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3964 void *context, int vl, int mode,
3967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3969 return dd->sw_send_dma_eng_err_status_cnt[9];
3972 static u64 access_sdma_packet_desc_overflow_err_cnt(
3973 const struct cntr_entry *entry,
3974 void *context, int vl, int mode, u64 data)
3976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3978 return dd->sw_send_dma_eng_err_status_cnt[8];
3981 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3982 void *context, int vl,
3985 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3987 return dd->sw_send_dma_eng_err_status_cnt[7];
3990 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3991 void *context, int vl, int mode, u64 data)
3993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3995 return dd->sw_send_dma_eng_err_status_cnt[6];
3998 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3999 void *context, int vl, int mode,
4002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4004 return dd->sw_send_dma_eng_err_status_cnt[5];
4007 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
4008 void *context, int vl, int mode,
4011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4013 return dd->sw_send_dma_eng_err_status_cnt[4];
4016 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4017 const struct cntr_entry *entry,
4018 void *context, int vl, int mode, u64 data)
4020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4022 return dd->sw_send_dma_eng_err_status_cnt[3];
4025 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4026 void *context, int vl, int mode,
4029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4031 return dd->sw_send_dma_eng_err_status_cnt[2];
4034 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4035 void *context, int vl, int mode,
4038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4040 return dd->sw_send_dma_eng_err_status_cnt[1];
4043 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4044 void *context, int vl, int mode,
4047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4049 return dd->sw_send_dma_eng_err_status_cnt[0];
4052 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4053 void *context, int vl, int mode,
4056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4059 u64 csr = entry->csr;
4061 val = read_write_csr(dd, csr, mode, data);
4062 if (mode == CNTR_MODE_R) {
4063 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4064 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4065 } else if (mode == CNTR_MODE_W) {
4066 dd->sw_rcv_bypass_packet_errors = 0;
4068 dd_dev_err(dd, "Invalid cntr register access mode");
4074 #define def_access_sw_cpu(cntr) \
4075 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4076 void *context, int vl, int mode, u64 data) \
4078 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4079 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4080 ppd->ibport_data.rvp.cntr, vl, \
4084 def_access_sw_cpu(rc_acks);
4085 def_access_sw_cpu(rc_qacks);
4086 def_access_sw_cpu(rc_delayed_comp);
4088 #define def_access_ibp_counter(cntr) \
4089 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4090 void *context, int vl, int mode, u64 data) \
4092 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4094 if (vl != CNTR_INVALID_VL) \
4097 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4101 def_access_ibp_counter(loop_pkts);
4102 def_access_ibp_counter(rc_resends);
4103 def_access_ibp_counter(rnr_naks);
4104 def_access_ibp_counter(other_naks);
4105 def_access_ibp_counter(rc_timeouts);
4106 def_access_ibp_counter(pkt_drops);
4107 def_access_ibp_counter(dmawait);
4108 def_access_ibp_counter(rc_seqnak);
4109 def_access_ibp_counter(rc_dupreq);
4110 def_access_ibp_counter(rdma_seq);
4111 def_access_ibp_counter(unaligned);
4112 def_access_ibp_counter(seq_naks);
4114 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4115 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4116 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4118 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4120 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4121 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4123 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4125 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4126 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4127 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4128 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4129 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4131 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4133 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4135 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4137 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4139 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4141 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4142 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4143 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4144 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4145 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4147 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4148 access_dc_rcv_err_cnt),
4149 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4151 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4153 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4155 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4156 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4157 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4158 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4160 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4161 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4162 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4164 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4166 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4168 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4170 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4172 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4174 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4176 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4177 CNTR_SYNTH | CNTR_VL),
4178 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4179 CNTR_SYNTH | CNTR_VL),
4180 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4181 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4182 CNTR_SYNTH | CNTR_VL),
4183 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4184 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4185 CNTR_SYNTH | CNTR_VL),
4186 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4188 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4189 CNTR_SYNTH | CNTR_VL),
4190 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4192 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4193 CNTR_SYNTH | CNTR_VL),
4195 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4197 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4199 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4201 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4203 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4205 [C_DC_CRC_MULT_LN] =
4206 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4208 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4210 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4212 [C_DC_SEQ_CRC_CNT] =
4213 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4215 [C_DC_ESC0_ONLY_CNT] =
4216 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4218 [C_DC_ESC0_PLUS1_CNT] =
4219 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4221 [C_DC_ESC0_PLUS2_CNT] =
4222 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4224 [C_DC_REINIT_FROM_PEER_CNT] =
4225 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4227 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4229 [C_DC_MISC_FLG_CNT] =
4230 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4232 [C_DC_PRF_GOOD_LTP_CNT] =
4233 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4234 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4235 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4237 [C_DC_PRF_RX_FLIT_CNT] =
4238 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4239 [C_DC_PRF_TX_FLIT_CNT] =
4240 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4241 [C_DC_PRF_CLK_CNTR] =
4242 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4243 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4244 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4245 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4246 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4248 [C_DC_PG_STS_TX_SBE_CNT] =
4249 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4250 [C_DC_PG_STS_TX_MBE_CNT] =
4251 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4253 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4254 access_sw_cpu_intr),
4255 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4256 access_sw_cpu_rcv_limit),
4257 [C_SW_CTX0_SEQ_DROP] = CNTR_ELEM("SeqDrop0", 0, 0, CNTR_NORMAL,
4258 access_sw_ctx0_seq_drop),
4259 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4260 access_sw_vtx_wait),
4261 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4262 access_sw_pio_wait),
4263 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4264 access_sw_pio_drain),
4265 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4266 access_sw_kmem_wait),
4267 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4268 access_sw_send_schedule),
4269 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4270 SEND_DMA_DESC_FETCHED_CNT, 0,
4271 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4272 dev_access_u32_csr),
4273 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4274 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4275 access_sde_int_cnt),
4276 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4277 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4278 access_sde_err_cnt),
4279 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4280 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4281 access_sde_idle_int_cnt),
4282 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4283 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4284 access_sde_progress_int_cnt),
4285 /* MISC_ERR_STATUS */
4286 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4288 access_misc_pll_lock_fail_err_cnt),
4289 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4291 access_misc_mbist_fail_err_cnt),
4292 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4294 access_misc_invalid_eep_cmd_err_cnt),
4295 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4297 access_misc_efuse_done_parity_err_cnt),
4298 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4300 access_misc_efuse_write_err_cnt),
4301 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4303 access_misc_efuse_read_bad_addr_err_cnt),
4304 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4306 access_misc_efuse_csr_parity_err_cnt),
4307 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4309 access_misc_fw_auth_failed_err_cnt),
4310 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4312 access_misc_key_mismatch_err_cnt),
4313 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4315 access_misc_sbus_write_failed_err_cnt),
4316 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4318 access_misc_csr_write_bad_addr_err_cnt),
4319 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4321 access_misc_csr_read_bad_addr_err_cnt),
4322 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4324 access_misc_csr_parity_err_cnt),
4326 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4328 access_sw_cce_err_status_aggregated_cnt),
4329 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4331 access_cce_msix_csr_parity_err_cnt),
4332 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4334 access_cce_int_map_unc_err_cnt),
4335 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4337 access_cce_int_map_cor_err_cnt),
4338 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4340 access_cce_msix_table_unc_err_cnt),
4341 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4343 access_cce_msix_table_cor_err_cnt),
4344 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4346 access_cce_rxdma_conv_fifo_parity_err_cnt),
4347 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4349 access_cce_rcpl_async_fifo_parity_err_cnt),
4350 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4352 access_cce_seg_write_bad_addr_err_cnt),
4353 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4355 access_cce_seg_read_bad_addr_err_cnt),
4356 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4358 access_la_triggered_cnt),
4359 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4361 access_cce_trgt_cpl_timeout_err_cnt),
4362 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4364 access_pcic_receive_parity_err_cnt),
4365 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4367 access_pcic_transmit_back_parity_err_cnt),
4368 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4370 access_pcic_transmit_front_parity_err_cnt),
4371 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4373 access_pcic_cpl_dat_q_unc_err_cnt),
4374 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4376 access_pcic_cpl_hd_q_unc_err_cnt),
4377 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4379 access_pcic_post_dat_q_unc_err_cnt),
4380 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4382 access_pcic_post_hd_q_unc_err_cnt),
4383 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4385 access_pcic_retry_sot_mem_unc_err_cnt),
4386 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4388 access_pcic_retry_mem_unc_err),
4389 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4391 access_pcic_n_post_dat_q_parity_err_cnt),
4392 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4394 access_pcic_n_post_h_q_parity_err_cnt),
4395 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4397 access_pcic_cpl_dat_q_cor_err_cnt),
4398 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4400 access_pcic_cpl_hd_q_cor_err_cnt),
4401 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4403 access_pcic_post_dat_q_cor_err_cnt),
4404 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4406 access_pcic_post_hd_q_cor_err_cnt),
4407 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4409 access_pcic_retry_sot_mem_cor_err_cnt),
4410 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4412 access_pcic_retry_mem_cor_err_cnt),
4413 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4414 "CceCli1AsyncFifoDbgParityError", 0, 0,
4416 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4417 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4418 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4420 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4422 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4423 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4425 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4426 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4427 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4429 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4430 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4432 access_cce_cli2_async_fifo_parity_err_cnt),
4433 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4435 access_cce_csr_cfg_bus_parity_err_cnt),
4436 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4438 access_cce_cli0_async_fifo_parity_err_cnt),
4439 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4441 access_cce_rspd_data_parity_err_cnt),
4442 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4444 access_cce_trgt_access_err_cnt),
4445 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4447 access_cce_trgt_async_fifo_parity_err_cnt),
4448 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4450 access_cce_csr_write_bad_addr_err_cnt),
4451 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4453 access_cce_csr_read_bad_addr_err_cnt),
4454 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4456 access_ccs_csr_parity_err_cnt),
4459 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4461 access_rx_csr_parity_err_cnt),
4462 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4464 access_rx_csr_write_bad_addr_err_cnt),
4465 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4467 access_rx_csr_read_bad_addr_err_cnt),
4468 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4470 access_rx_dma_csr_unc_err_cnt),
4471 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4473 access_rx_dma_dq_fsm_encoding_err_cnt),
4474 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4476 access_rx_dma_eq_fsm_encoding_err_cnt),
4477 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4479 access_rx_dma_csr_parity_err_cnt),
4480 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4482 access_rx_rbuf_data_cor_err_cnt),
4483 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4485 access_rx_rbuf_data_unc_err_cnt),
4486 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4488 access_rx_dma_data_fifo_rd_cor_err_cnt),
4489 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4491 access_rx_dma_data_fifo_rd_unc_err_cnt),
4492 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4494 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4495 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4497 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4498 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4500 access_rx_rbuf_desc_part2_cor_err_cnt),
4501 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4503 access_rx_rbuf_desc_part2_unc_err_cnt),
4504 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4506 access_rx_rbuf_desc_part1_cor_err_cnt),
4507 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4509 access_rx_rbuf_desc_part1_unc_err_cnt),
4510 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4512 access_rx_hq_intr_fsm_err_cnt),
4513 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4515 access_rx_hq_intr_csr_parity_err_cnt),
4516 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4518 access_rx_lookup_csr_parity_err_cnt),
4519 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4521 access_rx_lookup_rcv_array_cor_err_cnt),
4522 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4524 access_rx_lookup_rcv_array_unc_err_cnt),
4525 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4527 access_rx_lookup_des_part2_parity_err_cnt),
4528 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4530 access_rx_lookup_des_part1_unc_cor_err_cnt),
4531 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4533 access_rx_lookup_des_part1_unc_err_cnt),
4534 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4536 access_rx_rbuf_next_free_buf_cor_err_cnt),
4537 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4539 access_rx_rbuf_next_free_buf_unc_err_cnt),
4540 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4541 "RxRbufFlInitWrAddrParityErr", 0, 0,
4543 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4544 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4546 access_rx_rbuf_fl_initdone_parity_err_cnt),
4547 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4549 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4550 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4552 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4553 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4555 access_rx_rbuf_empty_err_cnt),
4556 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4558 access_rx_rbuf_full_err_cnt),
4559 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4561 access_rbuf_bad_lookup_err_cnt),
4562 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4564 access_rbuf_ctx_id_parity_err_cnt),
4565 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4567 access_rbuf_csr_qeopdw_parity_err_cnt),
4568 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4569 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4571 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4572 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4573 "RxRbufCsrQTlPtrParityErr", 0, 0,
4575 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4576 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4578 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4579 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4581 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4582 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4584 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4585 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4587 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4588 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4589 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4591 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4592 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4594 access_rx_rbuf_block_list_read_cor_err_cnt),
4595 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4597 access_rx_rbuf_block_list_read_unc_err_cnt),
4598 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4600 access_rx_rbuf_lookup_des_cor_err_cnt),
4601 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4603 access_rx_rbuf_lookup_des_unc_err_cnt),
4604 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4605 "RxRbufLookupDesRegUncCorErr", 0, 0,
4607 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4608 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4610 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4611 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4613 access_rx_rbuf_free_list_cor_err_cnt),
4614 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4616 access_rx_rbuf_free_list_unc_err_cnt),
4617 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4619 access_rx_rcv_fsm_encoding_err_cnt),
4620 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4622 access_rx_dma_flag_cor_err_cnt),
4623 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4625 access_rx_dma_flag_unc_err_cnt),
4626 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4628 access_rx_dc_sop_eop_parity_err_cnt),
4629 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4631 access_rx_rcv_csr_parity_err_cnt),
4632 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4634 access_rx_rcv_qp_map_table_cor_err_cnt),
4635 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4637 access_rx_rcv_qp_map_table_unc_err_cnt),
4638 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4640 access_rx_rcv_data_cor_err_cnt),
4641 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4643 access_rx_rcv_data_unc_err_cnt),
4644 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4646 access_rx_rcv_hdr_cor_err_cnt),
4647 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4649 access_rx_rcv_hdr_unc_err_cnt),
4650 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4652 access_rx_dc_intf_parity_err_cnt),
4653 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4655 access_rx_dma_csr_cor_err_cnt),
4656 /* SendPioErrStatus */
4657 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4659 access_pio_pec_sop_head_parity_err_cnt),
4660 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4662 access_pio_pcc_sop_head_parity_err_cnt),
4663 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4665 access_pio_last_returned_cnt_parity_err_cnt),
4666 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4668 access_pio_current_free_cnt_parity_err_cnt),
4669 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4671 access_pio_reserved_31_err_cnt),
4672 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4674 access_pio_reserved_30_err_cnt),
4675 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4677 access_pio_ppmc_sop_len_err_cnt),
4678 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4680 access_pio_ppmc_bqc_mem_parity_err_cnt),
4681 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4683 access_pio_vl_fifo_parity_err_cnt),
4684 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4686 access_pio_vlf_sop_parity_err_cnt),
4687 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4689 access_pio_vlf_v1_len_parity_err_cnt),
4690 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4692 access_pio_block_qw_count_parity_err_cnt),
4693 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4695 access_pio_write_qw_valid_parity_err_cnt),
4696 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4698 access_pio_state_machine_err_cnt),
4699 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4701 access_pio_write_data_parity_err_cnt),
4702 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4704 access_pio_host_addr_mem_cor_err_cnt),
4705 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4707 access_pio_host_addr_mem_unc_err_cnt),
4708 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4710 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4711 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4713 access_pio_init_sm_in_err_cnt),
4714 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4716 access_pio_ppmc_pbl_fifo_err_cnt),
4717 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4719 access_pio_credit_ret_fifo_parity_err_cnt),
4720 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4722 access_pio_v1_len_mem_bank1_cor_err_cnt),
4723 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4725 access_pio_v1_len_mem_bank0_cor_err_cnt),
4726 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4728 access_pio_v1_len_mem_bank1_unc_err_cnt),
4729 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4731 access_pio_v1_len_mem_bank0_unc_err_cnt),
4732 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4734 access_pio_sm_pkt_reset_parity_err_cnt),
4735 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4737 access_pio_pkt_evict_fifo_parity_err_cnt),
4738 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4739 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4741 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4742 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4744 access_pio_sbrdctl_crrel_parity_err_cnt),
4745 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4747 access_pio_pec_fifo_parity_err_cnt),
4748 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4750 access_pio_pcc_fifo_parity_err_cnt),
4751 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4753 access_pio_sb_mem_fifo1_err_cnt),
4754 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4756 access_pio_sb_mem_fifo0_err_cnt),
4757 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4759 access_pio_csr_parity_err_cnt),
4760 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4762 access_pio_write_addr_parity_err_cnt),
4763 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4765 access_pio_write_bad_ctxt_err_cnt),
4766 /* SendDmaErrStatus */
4767 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4769 access_sdma_pcie_req_tracking_cor_err_cnt),
4770 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4772 access_sdma_pcie_req_tracking_unc_err_cnt),
4773 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4775 access_sdma_csr_parity_err_cnt),
4776 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4778 access_sdma_rpy_tag_err_cnt),
4779 /* SendEgressErrStatus */
4780 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4782 access_tx_read_pio_memory_csr_unc_err_cnt),
4783 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4785 access_tx_read_sdma_memory_csr_err_cnt),
4786 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4788 access_tx_egress_fifo_cor_err_cnt),
4789 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4791 access_tx_read_pio_memory_cor_err_cnt),
4792 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4794 access_tx_read_sdma_memory_cor_err_cnt),
4795 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4797 access_tx_sb_hdr_cor_err_cnt),
4798 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4800 access_tx_credit_overrun_err_cnt),
4801 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4803 access_tx_launch_fifo8_cor_err_cnt),
4804 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4806 access_tx_launch_fifo7_cor_err_cnt),
4807 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4809 access_tx_launch_fifo6_cor_err_cnt),
4810 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4812 access_tx_launch_fifo5_cor_err_cnt),
4813 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4815 access_tx_launch_fifo4_cor_err_cnt),
4816 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4818 access_tx_launch_fifo3_cor_err_cnt),
4819 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4821 access_tx_launch_fifo2_cor_err_cnt),
4822 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4824 access_tx_launch_fifo1_cor_err_cnt),
4825 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4827 access_tx_launch_fifo0_cor_err_cnt),
4828 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4830 access_tx_credit_return_vl_err_cnt),
4831 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4833 access_tx_hcrc_insertion_err_cnt),
4834 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4836 access_tx_egress_fifo_unc_err_cnt),
4837 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4839 access_tx_read_pio_memory_unc_err_cnt),
4840 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4842 access_tx_read_sdma_memory_unc_err_cnt),
4843 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4845 access_tx_sb_hdr_unc_err_cnt),
4846 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4848 access_tx_credit_return_partiy_err_cnt),
4849 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4851 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4852 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4854 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4855 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4857 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4858 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4860 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4861 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4863 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4864 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4866 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4867 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4869 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4870 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4872 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4873 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4875 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4876 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4878 access_tx_sdma15_disallowed_packet_err_cnt),
4879 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4881 access_tx_sdma14_disallowed_packet_err_cnt),
4882 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4884 access_tx_sdma13_disallowed_packet_err_cnt),
4885 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4887 access_tx_sdma12_disallowed_packet_err_cnt),
4888 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4890 access_tx_sdma11_disallowed_packet_err_cnt),
4891 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4893 access_tx_sdma10_disallowed_packet_err_cnt),
4894 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4896 access_tx_sdma9_disallowed_packet_err_cnt),
4897 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4899 access_tx_sdma8_disallowed_packet_err_cnt),
4900 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4902 access_tx_sdma7_disallowed_packet_err_cnt),
4903 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4905 access_tx_sdma6_disallowed_packet_err_cnt),
4906 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4908 access_tx_sdma5_disallowed_packet_err_cnt),
4909 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4911 access_tx_sdma4_disallowed_packet_err_cnt),
4912 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4914 access_tx_sdma3_disallowed_packet_err_cnt),
4915 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4917 access_tx_sdma2_disallowed_packet_err_cnt),
4918 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4920 access_tx_sdma1_disallowed_packet_err_cnt),
4921 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4923 access_tx_sdma0_disallowed_packet_err_cnt),
4924 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4926 access_tx_config_parity_err_cnt),
4927 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4929 access_tx_sbrd_ctl_csr_parity_err_cnt),
4930 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4932 access_tx_launch_csr_parity_err_cnt),
4933 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4935 access_tx_illegal_vl_err_cnt),
4936 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4937 "TxSbrdCtlStateMachineParityErr", 0, 0,
4939 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4940 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4942 access_egress_reserved_10_err_cnt),
4943 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4945 access_egress_reserved_9_err_cnt),
4946 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4948 access_tx_sdma_launch_intf_parity_err_cnt),
4949 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4951 access_tx_pio_launch_intf_parity_err_cnt),
4952 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4954 access_egress_reserved_6_err_cnt),
4955 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4957 access_tx_incorrect_link_state_err_cnt),
4958 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4960 access_tx_linkdown_err_cnt),
4961 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4962 "EgressFifoUnderrunOrParityErr", 0, 0,
4964 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4965 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4967 access_egress_reserved_2_err_cnt),
4968 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4970 access_tx_pkt_integrity_mem_unc_err_cnt),
4971 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4973 access_tx_pkt_integrity_mem_cor_err_cnt),
4975 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4977 access_send_csr_write_bad_addr_err_cnt),
4978 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4980 access_send_csr_read_bad_addr_err_cnt),
4981 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4983 access_send_csr_parity_cnt),
4984 /* SendCtxtErrStatus */
4985 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4987 access_pio_write_out_of_bounds_err_cnt),
4988 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4990 access_pio_write_overflow_err_cnt),
4991 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4993 access_pio_write_crosses_boundary_err_cnt),
4994 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4996 access_pio_disallowed_packet_err_cnt),
4997 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4999 access_pio_inconsistent_sop_err_cnt),
5000 /* SendDmaEngErrStatus */
5001 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
5003 access_sdma_header_request_fifo_cor_err_cnt),
5004 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
5006 access_sdma_header_storage_cor_err_cnt),
5007 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
5009 access_sdma_packet_tracking_cor_err_cnt),
5010 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5012 access_sdma_assembly_cor_err_cnt),
5013 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5015 access_sdma_desc_table_cor_err_cnt),
5016 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5018 access_sdma_header_request_fifo_unc_err_cnt),
5019 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5021 access_sdma_header_storage_unc_err_cnt),
5022 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5024 access_sdma_packet_tracking_unc_err_cnt),
5025 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5027 access_sdma_assembly_unc_err_cnt),
5028 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5030 access_sdma_desc_table_unc_err_cnt),
5031 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5033 access_sdma_timeout_err_cnt),
5034 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5036 access_sdma_header_length_err_cnt),
5037 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5039 access_sdma_header_address_err_cnt),
5040 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5042 access_sdma_header_select_err_cnt),
5043 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5045 access_sdma_reserved_9_err_cnt),
5046 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5048 access_sdma_packet_desc_overflow_err_cnt),
5049 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5051 access_sdma_length_mismatch_err_cnt),
5052 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5054 access_sdma_halt_err_cnt),
5055 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5057 access_sdma_mem_read_err_cnt),
5058 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5060 access_sdma_first_desc_err_cnt),
5061 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5063 access_sdma_tail_out_of_bounds_err_cnt),
5064 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5066 access_sdma_too_long_err_cnt),
5067 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5069 access_sdma_gen_mismatch_err_cnt),
5070 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5072 access_sdma_wrong_dw_err_cnt),
5075 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5076 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5078 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5080 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5082 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5084 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5086 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5088 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5090 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5091 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5092 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5093 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5094 CNTR_SYNTH | CNTR_VL),
5095 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5096 CNTR_SYNTH | CNTR_VL),
5097 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5098 CNTR_SYNTH | CNTR_VL),
5099 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5100 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5101 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5102 access_sw_link_dn_cnt),
5103 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5104 access_sw_link_up_cnt),
5105 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5106 access_sw_unknown_frame_cnt),
5107 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5108 access_sw_xmit_discards),
5109 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5110 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5111 access_sw_xmit_discards),
5112 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5113 access_xmit_constraint_errs),
5114 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5115 access_rcv_constraint_errs),
5116 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5117 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5118 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5119 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5120 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5121 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5122 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5123 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5124 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5125 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5126 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5127 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5128 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5129 access_sw_cpu_rc_acks),
5130 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5131 access_sw_cpu_rc_qacks),
5132 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5133 access_sw_cpu_rc_delayed_comp),
5134 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5135 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5136 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5137 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5138 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5139 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5140 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5141 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5142 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5143 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5144 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5145 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5146 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5147 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5148 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5149 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5150 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5151 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5152 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5153 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5154 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5155 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5156 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5157 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5158 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5159 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5160 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5161 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5162 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5163 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5164 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5165 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5166 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5167 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5168 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5169 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5170 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5171 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5172 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5173 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5174 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5175 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5176 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5177 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5178 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5179 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5180 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5181 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5182 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5183 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5184 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5185 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5186 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5187 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5188 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5189 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5190 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5191 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5192 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5193 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5194 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5195 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5196 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5197 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5198 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5199 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5200 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5201 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5202 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5203 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5204 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5205 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5206 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5207 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5208 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5209 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5210 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5211 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5212 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5213 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5216 /* ======================================================================== */
5218 /* return true if this is chip revision revision a */
5219 int is_ax(struct hfi1_devdata *dd)
5222 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5223 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5224 return (chip_rev_minor & 0xf0) == 0;
5227 /* return true if this is chip revision revision b */
5228 int is_bx(struct hfi1_devdata *dd)
5231 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5232 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5233 return (chip_rev_minor & 0xF0) == 0x10;
5237 * Append string s to buffer buf. Arguments curp and len are the current
5238 * position and remaining length, respectively.
5240 * return 0 on success, 1 on out of room
5242 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5246 int result = 0; /* success */
5249 /* add a comma, if first in the buffer */
5252 result = 1; /* out of room */
5259 /* copy the string */
5260 while ((c = *s++) != 0) {
5262 result = 1; /* out of room */
5270 /* write return values */
5278 * Using the given flag table, print a comma separated string into
5279 * the buffer. End in '*' if the buffer is too short.
5281 static char *flag_string(char *buf, int buf_len, u64 flags,
5282 struct flag_table *table, int table_size)
5290 /* make sure there is at least 2 so we can form "*" */
5294 len--; /* leave room for a nul */
5295 for (i = 0; i < table_size; i++) {
5296 if (flags & table[i].flag) {
5297 no_room = append_str(buf, &p, &len, table[i].str);
5300 flags &= ~table[i].flag;
5304 /* any undocumented bits left? */
5305 if (!no_room && flags) {
5306 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5307 no_room = append_str(buf, &p, &len, extra);
5310 /* add * if ran out of room */
5312 /* may need to back up to add space for a '*' */
5318 /* add final nul - space already allocated above */
5323 /* first 8 CCE error interrupt source names */
5324 static const char * const cce_misc_names[] = {
5325 "CceErrInt", /* 0 */
5326 "RxeErrInt", /* 1 */
5327 "MiscErrInt", /* 2 */
5328 "Reserved3", /* 3 */
5329 "PioErrInt", /* 4 */
5330 "SDmaErrInt", /* 5 */
5331 "EgressErrInt", /* 6 */
5336 * Return the miscellaneous error interrupt name.
5338 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5340 if (source < ARRAY_SIZE(cce_misc_names))
5341 strncpy(buf, cce_misc_names[source], bsize);
5343 snprintf(buf, bsize, "Reserved%u",
5344 source + IS_GENERAL_ERR_START);
5350 * Return the SDMA engine error interrupt name.
5352 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5354 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5359 * Return the send context error interrupt name.
5361 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5363 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5367 static const char * const various_names[] = {
5376 * Return the various interrupt name.
5378 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5380 if (source < ARRAY_SIZE(various_names))
5381 strncpy(buf, various_names[source], bsize);
5383 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5388 * Return the DC interrupt name.
5390 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5392 static const char * const dc_int_names[] = {
5396 "lbm" /* local block merge */
5399 if (source < ARRAY_SIZE(dc_int_names))
5400 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5402 snprintf(buf, bsize, "DCInt%u", source);
5406 static const char * const sdma_int_names[] = {
5413 * Return the SDMA engine interrupt name.
5415 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5417 /* what interrupt */
5418 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5420 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5422 if (likely(what < 3))
5423 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5425 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5430 * Return the receive available interrupt name.
5432 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5434 snprintf(buf, bsize, "RcvAvailInt%u", source);
5439 * Return the receive urgent interrupt name.
5441 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5443 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5448 * Return the send credit interrupt name.
5450 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5452 snprintf(buf, bsize, "SendCreditInt%u", source);
5457 * Return the reserved interrupt name.
5459 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5461 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5465 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5467 return flag_string(buf, buf_len, flags,
5468 cce_err_status_flags,
5469 ARRAY_SIZE(cce_err_status_flags));
5472 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5474 return flag_string(buf, buf_len, flags,
5475 rxe_err_status_flags,
5476 ARRAY_SIZE(rxe_err_status_flags));
5479 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5481 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5482 ARRAY_SIZE(misc_err_status_flags));
5485 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5487 return flag_string(buf, buf_len, flags,
5488 pio_err_status_flags,
5489 ARRAY_SIZE(pio_err_status_flags));
5492 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5494 return flag_string(buf, buf_len, flags,
5495 sdma_err_status_flags,
5496 ARRAY_SIZE(sdma_err_status_flags));
5499 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5501 return flag_string(buf, buf_len, flags,
5502 egress_err_status_flags,
5503 ARRAY_SIZE(egress_err_status_flags));
5506 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5508 return flag_string(buf, buf_len, flags,
5509 egress_err_info_flags,
5510 ARRAY_SIZE(egress_err_info_flags));
5513 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5515 return flag_string(buf, buf_len, flags,
5516 send_err_status_flags,
5517 ARRAY_SIZE(send_err_status_flags));
5520 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5526 * For most these errors, there is nothing that can be done except
5527 * report or record it.
5529 dd_dev_info(dd, "CCE Error: %s\n",
5530 cce_err_status_string(buf, sizeof(buf), reg));
5532 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5533 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5534 /* this error requires a manual drop into SPC freeze mode */
5536 start_freeze_handling(dd->pport, FREEZE_SELF);
5539 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5540 if (reg & (1ull << i)) {
5541 incr_cntr64(&dd->cce_err_status_cnt[i]);
5542 /* maintain a counter over all cce_err_status errors */
5543 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5549 * Check counters for receive errors that do not have an interrupt
5550 * associated with them.
5552 #define RCVERR_CHECK_TIME 10
5553 static void update_rcverr_timer(unsigned long opaque)
5555 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5556 struct hfi1_pportdata *ppd = dd->pport;
5557 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5559 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5560 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5561 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5562 set_link_down_reason(
5563 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5564 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5565 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5567 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5569 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5572 static int init_rcverr(struct hfi1_devdata *dd)
5574 setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5575 /* Assume the hardware counter has been reset */
5576 dd->rcv_ovfl_cnt = 0;
5577 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5580 static void free_rcverr(struct hfi1_devdata *dd)
5582 if (dd->rcverr_timer.data)
5583 del_timer_sync(&dd->rcverr_timer);
5584 dd->rcverr_timer.data = 0;
5587 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5592 dd_dev_info(dd, "Receive Error: %s\n",
5593 rxe_err_status_string(buf, sizeof(buf), reg));
5595 if (reg & ALL_RXE_FREEZE_ERR) {
5599 * Freeze mode recovery is disabled for the errors
5600 * in RXE_FREEZE_ABORT_MASK
5602 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5603 flags = FREEZE_ABORT;
5605 start_freeze_handling(dd->pport, flags);
5608 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5609 if (reg & (1ull << i))
5610 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5614 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5619 dd_dev_info(dd, "Misc Error: %s",
5620 misc_err_status_string(buf, sizeof(buf), reg));
5621 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5622 if (reg & (1ull << i))
5623 incr_cntr64(&dd->misc_err_status_cnt[i]);
5627 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5632 dd_dev_info(dd, "PIO Error: %s\n",
5633 pio_err_status_string(buf, sizeof(buf), reg));
5635 if (reg & ALL_PIO_FREEZE_ERR)
5636 start_freeze_handling(dd->pport, 0);
5638 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5639 if (reg & (1ull << i))
5640 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5644 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5649 dd_dev_info(dd, "SDMA Error: %s\n",
5650 sdma_err_status_string(buf, sizeof(buf), reg));
5652 if (reg & ALL_SDMA_FREEZE_ERR)
5653 start_freeze_handling(dd->pport, 0);
5655 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5656 if (reg & (1ull << i))
5657 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5661 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5663 incr_cntr64(&ppd->port_xmit_discards);
5666 static void count_port_inactive(struct hfi1_devdata *dd)
5668 __count_port_discards(dd->pport);
5672 * We have had a "disallowed packet" error during egress. Determine the
5673 * integrity check which failed, and update relevant error counter, etc.
5675 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5676 * bit of state per integrity check, and so we can miss the reason for an
5677 * egress error if more than one packet fails the same integrity check
5678 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5680 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5683 struct hfi1_pportdata *ppd = dd->pport;
5684 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5685 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5688 /* clear down all observed info as quickly as possible after read */
5689 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5692 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5693 info, egress_err_info_string(buf, sizeof(buf), info), src);
5695 /* Eventually add other counters for each bit */
5696 if (info & PORT_DISCARD_EGRESS_ERRS) {
5700 * Count all applicable bits as individual errors and
5701 * attribute them to the packet that triggered this handler.
5702 * This may not be completely accurate due to limitations
5703 * on the available hardware error information. There is
5704 * a single information register and any number of error
5705 * packets may have occurred and contributed to it before
5706 * this routine is called. This means that:
5707 * a) If multiple packets with the same error occur before
5708 * this routine is called, earlier packets are missed.
5709 * There is only a single bit for each error type.
5710 * b) Errors may not be attributed to the correct VL.
5711 * The driver is attributing all bits in the info register
5712 * to the packet that triggered this call, but bits
5713 * could be an accumulation of different packets with
5715 * c) A single error packet may have multiple counts attached
5716 * to it. There is no way for the driver to know if
5717 * multiple bits set in the info register are due to a
5718 * single packet or multiple packets. The driver assumes
5721 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5722 for (i = 0; i < weight; i++) {
5723 __count_port_discards(ppd);
5724 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5725 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5727 incr_cntr64(&ppd->port_xmit_discards_vl
5734 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5735 * register. Does it represent a 'port inactive' error?
5737 static inline int port_inactive_err(u64 posn)
5739 return (posn >= SEES(TX_LINKDOWN) &&
5740 posn <= SEES(TX_INCORRECT_LINK_STATE));
5744 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5745 * register. Does it represent a 'disallowed packet' error?
5747 static inline int disallowed_pkt_err(int posn)
5749 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5750 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5754 * Input value is a bit position of one of the SDMA engine disallowed
5755 * packet errors. Return which engine. Use of this must be guarded by
5756 * disallowed_pkt_err().
5758 static inline int disallowed_pkt_engine(int posn)
5760 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5764 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5767 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5769 struct sdma_vl_map *m;
5773 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5777 m = rcu_dereference(dd->sdma_map);
5778 vl = m->engine_to_vl[engine];
5785 * Translate the send context (sofware index) into a VL. Return -1 if the
5786 * translation cannot be done.
5788 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5790 struct send_context_info *sci;
5791 struct send_context *sc;
5794 sci = &dd->send_contexts[sw_index];
5796 /* there is no information for user (PSM) and ack contexts */
5797 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5803 if (dd->vld[15].sc == sc)
5805 for (i = 0; i < num_vls; i++)
5806 if (dd->vld[i].sc == sc)
5812 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5814 u64 reg_copy = reg, handled = 0;
5818 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5819 start_freeze_handling(dd->pport, 0);
5820 else if (is_ax(dd) &&
5821 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5822 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5823 start_freeze_handling(dd->pport, 0);
5826 int posn = fls64(reg_copy);
5827 /* fls64() returns a 1-based offset, we want it zero based */
5828 int shift = posn - 1;
5829 u64 mask = 1ULL << shift;
5831 if (port_inactive_err(shift)) {
5832 count_port_inactive(dd);
5834 } else if (disallowed_pkt_err(shift)) {
5835 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5837 handle_send_egress_err_info(dd, vl);
5846 dd_dev_info(dd, "Egress Error: %s\n",
5847 egress_err_status_string(buf, sizeof(buf), reg));
5849 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5850 if (reg & (1ull << i))
5851 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5855 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5860 dd_dev_info(dd, "Send Error: %s\n",
5861 send_err_status_string(buf, sizeof(buf), reg));
5863 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5864 if (reg & (1ull << i))
5865 incr_cntr64(&dd->send_err_status_cnt[i]);
5870 * The maximum number of times the error clear down will loop before
5871 * blocking a repeating error. This value is arbitrary.
5873 #define MAX_CLEAR_COUNT 20
5876 * Clear and handle an error register. All error interrupts are funneled
5877 * through here to have a central location to correctly handle single-
5878 * or multi-shot errors.
5880 * For non per-context registers, call this routine with a context value
5881 * of 0 so the per-context offset is zero.
5883 * If the handler loops too many times, assume that something is wrong
5884 * and can't be fixed, so mask the error bits.
5886 static void interrupt_clear_down(struct hfi1_devdata *dd,
5888 const struct err_reg_info *eri)
5893 /* read in a loop until no more errors are seen */
5896 reg = read_kctxt_csr(dd, context, eri->status);
5899 write_kctxt_csr(dd, context, eri->clear, reg);
5900 if (likely(eri->handler))
5901 eri->handler(dd, context, reg);
5903 if (count > MAX_CLEAR_COUNT) {
5906 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5909 * Read-modify-write so any other masked bits
5912 mask = read_kctxt_csr(dd, context, eri->mask);
5914 write_kctxt_csr(dd, context, eri->mask, mask);
5921 * CCE block "misc" interrupt. Source is < 16.
5923 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5925 const struct err_reg_info *eri = &misc_errs[source];
5928 interrupt_clear_down(dd, 0, eri);
5930 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5935 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5937 return flag_string(buf, buf_len, flags,
5938 sc_err_status_flags,
5939 ARRAY_SIZE(sc_err_status_flags));
5943 * Send context error interrupt. Source (hw_context) is < 160.
5945 * All send context errors cause the send context to halt. The normal
5946 * clear-down mechanism cannot be used because we cannot clear the
5947 * error bits until several other long-running items are done first.
5948 * This is OK because with the context halted, nothing else is going
5949 * to happen on it anyway.
5951 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5952 unsigned int hw_context)
5954 struct send_context_info *sci;
5955 struct send_context *sc;
5960 unsigned long irq_flags;
5962 sw_index = dd->hw_to_sw[hw_context];
5963 if (sw_index >= dd->num_send_contexts) {
5965 "out of range sw index %u for send context %u\n",
5966 sw_index, hw_context);
5969 sci = &dd->send_contexts[sw_index];
5970 spin_lock_irqsave(&dd->sc_lock, irq_flags);
5973 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5974 sw_index, hw_context);
5975 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5979 /* tell the software that a halt has begun */
5980 sc_stop(sc, SCF_HALTED);
5982 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5984 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5985 send_context_err_status_string(flags, sizeof(flags),
5988 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5989 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5992 * Automatically restart halted kernel contexts out of interrupt
5993 * context. User contexts must ask the driver to restart the context.
5995 if (sc->type != SC_USER)
5996 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5997 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6000 * Update the counters for the corresponding status bits.
6001 * Note that these particular counters are aggregated over all
6004 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6005 if (status & (1ull << i))
6006 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6010 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6011 unsigned int source, u64 status)
6013 struct sdma_engine *sde;
6016 sde = &dd->per_sdma[source];
6017 #ifdef CONFIG_SDMA_VERBOSITY
6018 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6019 slashstrip(__FILE__), __LINE__, __func__);
6020 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6021 sde->this_idx, source, (unsigned long long)status);
6024 sdma_engine_error(sde, status);
6027 * Update the counters for the corresponding status bits.
6028 * Note that these particular counters are aggregated over
6029 * all 16 DMA engines.
6031 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6032 if (status & (1ull << i))
6033 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6038 * CCE block SDMA error interrupt. Source is < 16.
6040 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6042 #ifdef CONFIG_SDMA_VERBOSITY
6043 struct sdma_engine *sde = &dd->per_sdma[source];
6045 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6046 slashstrip(__FILE__), __LINE__, __func__);
6047 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6049 sdma_dumpstate(sde);
6051 interrupt_clear_down(dd, source, &sdma_eng_err);
6055 * CCE block "various" interrupt. Source is < 8.
6057 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6059 const struct err_reg_info *eri = &various_err[source];
6062 * TCritInt cannot go through interrupt_clear_down()
6063 * because it is not a second tier interrupt. The handler
6064 * should be called directly.
6066 if (source == TCRIT_INT_SOURCE)
6067 handle_temp_err(dd);
6068 else if (eri->handler)
6069 interrupt_clear_down(dd, 0, eri);
6072 "%s: Unimplemented/reserved interrupt %d\n",
6076 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6078 /* src_ctx is always zero */
6079 struct hfi1_pportdata *ppd = dd->pport;
6080 unsigned long flags;
6081 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6083 if (reg & QSFP_HFI0_MODPRST_N) {
6084 if (!qsfp_mod_present(ppd)) {
6085 dd_dev_info(dd, "%s: QSFP module removed\n",
6088 ppd->driver_link_ready = 0;
6090 * Cable removed, reset all our information about the
6091 * cache and cable capabilities
6094 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6096 * We don't set cache_refresh_required here as we expect
6097 * an interrupt when a cable is inserted
6099 ppd->qsfp_info.cache_valid = 0;
6100 ppd->qsfp_info.reset_needed = 0;
6101 ppd->qsfp_info.limiting_active = 0;
6102 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6104 /* Invert the ModPresent pin now to detect plug-in */
6105 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6106 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6108 if ((ppd->offline_disabled_reason >
6110 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6111 (ppd->offline_disabled_reason ==
6112 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6113 ppd->offline_disabled_reason =
6115 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6117 if (ppd->host_link_state == HLS_DN_POLL) {
6119 * The link is still in POLL. This means
6120 * that the normal link down processing
6121 * will not happen. We have to do it here
6122 * before turning the DC off.
6124 queue_work(ppd->link_wq, &ppd->link_down_work);
6127 dd_dev_info(dd, "%s: QSFP module inserted\n",
6130 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6131 ppd->qsfp_info.cache_valid = 0;
6132 ppd->qsfp_info.cache_refresh_required = 1;
6133 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6137 * Stop inversion of ModPresent pin to detect
6138 * removal of the cable
6140 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6141 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6142 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6144 ppd->offline_disabled_reason =
6145 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6149 if (reg & QSFP_HFI0_INT_N) {
6150 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6152 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6153 ppd->qsfp_info.check_interrupt_flags = 1;
6154 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6157 /* Schedule the QSFP work only if there is a cable attached. */
6158 if (qsfp_mod_present(ppd))
6159 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6162 static int request_host_lcb_access(struct hfi1_devdata *dd)
6166 ret = do_8051_command(dd, HCMD_MISC,
6167 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6168 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6169 if (ret != HCMD_SUCCESS) {
6170 dd_dev_err(dd, "%s: command failed with error %d\n",
6173 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6176 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6180 ret = do_8051_command(dd, HCMD_MISC,
6181 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6182 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6183 if (ret != HCMD_SUCCESS) {
6184 dd_dev_err(dd, "%s: command failed with error %d\n",
6187 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6191 * Set the LCB selector - allow host access. The DCC selector always
6192 * points to the host.
6194 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6196 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6197 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6198 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6202 * Clear the LCB selector - allow 8051 access. The DCC selector always
6203 * points to the host.
6205 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6207 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6208 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6212 * Acquire LCB access from the 8051. If the host already has access,
6213 * just increment a counter. Otherwise, inform the 8051 that the
6214 * host is taking access.
6218 * -EBUSY if the 8051 has control and cannot be disturbed
6219 * -errno if unable to acquire access from the 8051
6221 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6223 struct hfi1_pportdata *ppd = dd->pport;
6227 * Use the host link state lock so the operation of this routine
6228 * { link state check, selector change, count increment } can occur
6229 * as a unit against a link state change. Otherwise there is a
6230 * race between the state change and the count increment.
6233 mutex_lock(&ppd->hls_lock);
6235 while (!mutex_trylock(&ppd->hls_lock))
6239 /* this access is valid only when the link is up */
6240 if (ppd->host_link_state & HLS_DOWN) {
6241 dd_dev_info(dd, "%s: link state %s not up\n",
6242 __func__, link_state_name(ppd->host_link_state));
6247 if (dd->lcb_access_count == 0) {
6248 ret = request_host_lcb_access(dd);
6251 "%s: unable to acquire LCB access, err %d\n",
6255 set_host_lcb_access(dd);
6257 dd->lcb_access_count++;
6259 mutex_unlock(&ppd->hls_lock);
6264 * Release LCB access by decrementing the use count. If the count is moving
6265 * from 1 to 0, inform 8051 that it has control back.
6269 * -errno if unable to release access to the 8051
6271 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6276 * Use the host link state lock because the acquire needed it.
6277 * Here, we only need to keep { selector change, count decrement }
6281 mutex_lock(&dd->pport->hls_lock);
6283 while (!mutex_trylock(&dd->pport->hls_lock))
6287 if (dd->lcb_access_count == 0) {
6288 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6293 if (dd->lcb_access_count == 1) {
6294 set_8051_lcb_access(dd);
6295 ret = request_8051_lcb_access(dd);
6298 "%s: unable to release LCB access, err %d\n",
6300 /* restore host access if the grant didn't work */
6301 set_host_lcb_access(dd);
6305 dd->lcb_access_count--;
6307 mutex_unlock(&dd->pport->hls_lock);
6312 * Initialize LCB access variables and state. Called during driver load,
6313 * after most of the initialization is finished.
6315 * The DC default is LCB access on for the host. The driver defaults to
6316 * leaving access to the 8051. Assign access now - this constrains the call
6317 * to this routine to be after all LCB set-up is done. In particular, after
6318 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6320 static void init_lcb_access(struct hfi1_devdata *dd)
6322 dd->lcb_access_count = 0;
6326 * Write a response back to a 8051 request.
6328 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6330 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6331 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6333 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6334 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6338 * Handle host requests from the 8051.
6340 static void handle_8051_request(struct hfi1_pportdata *ppd)
6342 struct hfi1_devdata *dd = ppd->dd;
6347 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6348 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6349 return; /* no request */
6351 /* zero out COMPLETED so the response is seen */
6352 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6354 /* extract request details */
6355 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6356 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6357 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6358 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6361 case HREQ_LOAD_CONFIG:
6362 case HREQ_SAVE_CONFIG:
6363 case HREQ_READ_CONFIG:
6364 case HREQ_SET_TX_EQ_ABS:
6365 case HREQ_SET_TX_EQ_REL:
6367 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6369 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6371 case HREQ_CONFIG_DONE:
6372 hreq_response(dd, HREQ_SUCCESS, 0);
6375 case HREQ_INTERFACE_TEST:
6376 hreq_response(dd, HREQ_SUCCESS, data);
6379 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6380 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6386 * Set up allocation unit vaulue.
6388 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6390 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6392 /* do not modify other values in the register */
6393 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6394 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6395 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6399 * Set up initial VL15 credits of the remote. Assumes the rest of
6400 * the CM credit registers are zero from a previous global or credit reset.
6401 * Shared limit for VL15 will always be 0.
6403 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6405 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6407 /* set initial values for total and shared credit limit */
6408 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6409 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6412 * Set total limit to be equal to VL15 credits.
6413 * Leave shared limit at 0.
6415 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6416 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6418 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6419 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6423 * Zero all credit details from the previous connection and
6424 * reset the CM manager's internal counters.
6426 void reset_link_credits(struct hfi1_devdata *dd)
6430 /* remove all previous VL credit limits */
6431 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6432 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6433 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6434 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6435 /* reset the CM block */
6436 pio_send_control(dd, PSC_CM_RESET);
6437 /* reset cached value */
6438 dd->vl15buf_cached = 0;
6441 /* convert a vCU to a CU */
6442 static u32 vcu_to_cu(u8 vcu)
6447 /* convert a CU to a vCU */
6448 static u8 cu_to_vcu(u32 cu)
6453 /* convert a vAU to an AU */
6454 static u32 vau_to_au(u8 vau)
6456 return 8 * (1 << vau);
6459 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6461 ppd->sm_trap_qp = 0x0;
6466 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6468 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6472 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6473 write_csr(dd, DC_LCB_CFG_RUN, 0);
6474 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6475 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6476 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6477 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6478 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6479 reg = read_csr(dd, DCC_CFG_RESET);
6480 write_csr(dd, DCC_CFG_RESET, reg |
6481 (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6482 (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6483 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6485 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6486 write_csr(dd, DCC_CFG_RESET, reg);
6487 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6492 * This routine should be called after the link has been transitioned to
6493 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6496 * The expectation is that the caller of this routine would have taken
6497 * care of properly transitioning the link into the correct state.
6498 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6499 * before calling this function.
6501 static void _dc_shutdown(struct hfi1_devdata *dd)
6503 lockdep_assert_held(&dd->dc8051_lock);
6505 if (dd->dc_shutdown)
6508 dd->dc_shutdown = 1;
6509 /* Shutdown the LCB */
6510 lcb_shutdown(dd, 1);
6512 * Going to OFFLINE would have causes the 8051 to put the
6513 * SerDes into reset already. Just need to shut down the 8051,
6516 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6519 static void dc_shutdown(struct hfi1_devdata *dd)
6521 mutex_lock(&dd->dc8051_lock);
6523 mutex_unlock(&dd->dc8051_lock);
6527 * Calling this after the DC has been brought out of reset should not
6529 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6530 * before calling this function.
6532 static void _dc_start(struct hfi1_devdata *dd)
6534 lockdep_assert_held(&dd->dc8051_lock);
6536 if (!dd->dc_shutdown)
6539 /* Take the 8051 out of reset */
6540 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6541 /* Wait until 8051 is ready */
6542 if (wait_fm_ready(dd, TIMEOUT_8051_START))
6543 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6546 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6547 write_csr(dd, DCC_CFG_RESET, 0x10);
6548 /* lcb_shutdown() with abort=1 does not restore these */
6549 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6550 dd->dc_shutdown = 0;
6553 static void dc_start(struct hfi1_devdata *dd)
6555 mutex_lock(&dd->dc8051_lock);
6557 mutex_unlock(&dd->dc8051_lock);
6561 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6563 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6565 u64 rx_radr, tx_radr;
6568 if (dd->icode != ICODE_FPGA_EMULATION)
6572 * These LCB defaults on emulator _s are good, nothing to do here:
6573 * LCB_CFG_TX_FIFOS_RADR
6574 * LCB_CFG_RX_FIFOS_RADR
6576 * LCB_CFG_IGNORE_LOST_RCLK
6578 if (is_emulator_s(dd))
6580 /* else this is _p */
6582 version = emulator_rev(dd);
6584 version = 0x2d; /* all B0 use 0x2d or higher settings */
6586 if (version <= 0x12) {
6587 /* release 0x12 and below */
6590 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6591 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6592 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6595 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6596 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6597 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6599 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6600 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6602 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6603 } else if (version <= 0x18) {
6604 /* release 0x13 up to 0x18 */
6605 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6607 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6608 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6609 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6610 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6611 } else if (version == 0x19) {
6613 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6615 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6616 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6617 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6618 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6619 } else if (version == 0x1a) {
6621 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6623 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6624 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6625 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6626 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6627 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6629 /* release 0x1b and higher */
6630 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6632 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6633 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6634 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6635 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6638 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6639 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6640 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6641 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6642 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6646 * Handle a SMA idle message
6648 * This is a work-queue function outside of the interrupt.
6650 void handle_sma_message(struct work_struct *work)
6652 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6654 struct hfi1_devdata *dd = ppd->dd;
6659 * msg is bytes 1-4 of the 40-bit idle message - the command code
6662 ret = read_idle_sma(dd, &msg);
6665 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6667 * React to the SMA message. Byte[1] (0 for us) is the command.
6669 switch (msg & 0xff) {
6672 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6675 * Only expected in INIT or ARMED, discard otherwise.
6677 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6678 ppd->neighbor_normal = 1;
6680 case SMA_IDLE_ACTIVE:
6682 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6685 * Can activate the node. Discard otherwise.
6687 if (ppd->host_link_state == HLS_UP_ARMED &&
6688 ppd->is_active_optimize_enabled) {
6689 ppd->neighbor_normal = 1;
6690 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6694 "%s: received Active SMA idle message, couldn't set link to Active\n",
6700 "%s: received unexpected SMA idle message 0x%llx\n",
6706 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6709 unsigned long flags;
6711 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6712 rcvctrl = read_csr(dd, RCV_CTRL);
6715 write_csr(dd, RCV_CTRL, rcvctrl);
6716 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6719 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6721 adjust_rcvctrl(dd, add, 0);
6724 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6726 adjust_rcvctrl(dd, 0, clear);
6730 * Called from all interrupt handlers to start handling an SPC freeze.
6732 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6734 struct hfi1_devdata *dd = ppd->dd;
6735 struct send_context *sc;
6739 if (flags & FREEZE_SELF)
6740 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6742 /* enter frozen mode */
6743 dd->flags |= HFI1_FROZEN;
6745 /* notify all SDMA engines that they are going into a freeze */
6746 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6748 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6750 /* do halt pre-handling on all enabled send contexts */
6751 for (i = 0; i < dd->num_send_contexts; i++) {
6752 sc = dd->send_contexts[i].sc;
6753 if (sc && (sc->flags & SCF_ENABLED))
6754 sc_stop(sc, sc_flags);
6757 /* Send context are frozen. Notify user space */
6758 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6760 if (flags & FREEZE_ABORT) {
6762 "Aborted freeze recovery. Please REBOOT system\n");
6765 /* queue non-interrupt handler */
6766 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6770 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6771 * depending on the "freeze" parameter.
6773 * No need to return an error if it times out, our only option
6774 * is to proceed anyway.
6776 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6778 unsigned long timeout;
6781 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6783 reg = read_csr(dd, CCE_STATUS);
6785 /* waiting until all indicators are set */
6786 if ((reg & ALL_FROZE) == ALL_FROZE)
6787 return; /* all done */
6789 /* waiting until all indicators are clear */
6790 if ((reg & ALL_FROZE) == 0)
6791 return; /* all done */
6794 if (time_after(jiffies, timeout)) {
6796 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6797 freeze ? "" : "un", reg & ALL_FROZE,
6798 freeze ? ALL_FROZE : 0ull);
6801 usleep_range(80, 120);
6806 * Do all freeze handling for the RXE block.
6808 static void rxe_freeze(struct hfi1_devdata *dd)
6811 struct hfi1_ctxtdata *rcd;
6814 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6816 /* disable all receive contexts */
6817 for (i = 0; i < dd->num_rcv_contexts; i++) {
6818 rcd = hfi1_rcd_get_by_index(dd, i);
6819 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6825 * Unfreeze handling for the RXE block - kernel contexts only.
6826 * This will also enable the port. User contexts will do unfreeze
6827 * handling on a per-context basis as they call into the driver.
6830 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6834 struct hfi1_ctxtdata *rcd;
6836 /* enable all kernel contexts */
6837 for (i = 0; i < dd->num_rcv_contexts; i++) {
6838 rcd = hfi1_rcd_get_by_index(dd, i);
6840 /* Ensure all non-user contexts(including vnic) are enabled */
6841 if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
6845 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6846 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6847 rcvmask |= rcd->rcvhdrtail_kvaddr ?
6848 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6849 hfi1_rcvctrl(dd, rcvmask, rcd);
6854 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6858 * Non-interrupt SPC freeze handling.
6860 * This is a work-queue function outside of the triggering interrupt.
6862 void handle_freeze(struct work_struct *work)
6864 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6866 struct hfi1_devdata *dd = ppd->dd;
6868 /* wait for freeze indicators on all affected blocks */
6869 wait_for_freeze_status(dd, 1);
6871 /* SPC is now frozen */
6873 /* do send PIO freeze steps */
6876 /* do send DMA freeze steps */
6879 /* do send egress freeze steps - nothing to do */
6881 /* do receive freeze steps */
6885 * Unfreeze the hardware - clear the freeze, wait for each
6886 * block's frozen bit to clear, then clear the frozen flag.
6888 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6889 wait_for_freeze_status(dd, 0);
6892 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6893 wait_for_freeze_status(dd, 1);
6894 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6895 wait_for_freeze_status(dd, 0);
6898 /* do send PIO unfreeze steps for kernel contexts */
6899 pio_kernel_unfreeze(dd);
6901 /* do send DMA unfreeze steps */
6904 /* do send egress unfreeze steps - nothing to do */
6906 /* do receive unfreeze steps for kernel contexts */
6907 rxe_kernel_unfreeze(dd);
6910 * The unfreeze procedure touches global device registers when
6911 * it disables and re-enables RXE. Mark the device unfrozen
6912 * after all that is done so other parts of the driver waiting
6913 * for the device to unfreeze don't do things out of order.
6915 * The above implies that the meaning of HFI1_FROZEN flag is
6916 * "Device has gone into freeze mode and freeze mode handling
6917 * is still in progress."
6919 * The flag will be removed when freeze mode processing has
6922 dd->flags &= ~HFI1_FROZEN;
6923 wake_up(&dd->event_queue);
6925 /* no longer frozen */
6929 * Handle a link up interrupt from the 8051.
6931 * This is a work-queue function outside of the interrupt.
6933 void handle_link_up(struct work_struct *work)
6935 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6937 struct hfi1_devdata *dd = ppd->dd;
6939 set_link_state(ppd, HLS_UP_INIT);
6941 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6944 * OPA specifies that certain counters are cleared on a transition
6945 * to link up, so do that.
6947 clear_linkup_counters(dd);
6949 * And (re)set link up default values.
6951 set_linkup_defaults(ppd);
6954 * Set VL15 credits. Use cached value from verify cap interrupt.
6955 * In case of quick linkup or simulator, vl15 value will be set by
6956 * handle_linkup_change. VerifyCap interrupt handler will not be
6957 * called in those scenarios.
6959 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
6960 set_up_vl15(dd, dd->vl15buf_cached);
6962 /* enforce link speed enabled */
6963 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6964 /* oops - current speed is not enabled, bounce */
6966 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6967 ppd->link_speed_active, ppd->link_speed_enabled);
6968 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6969 OPA_LINKDOWN_REASON_SPEED_POLICY);
6970 set_link_state(ppd, HLS_DN_OFFLINE);
6976 * Several pieces of LNI information were cached for SMA in ppd.
6977 * Reset these on link down
6979 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6981 ppd->neighbor_guid = 0;
6982 ppd->neighbor_port_number = 0;
6983 ppd->neighbor_type = 0;
6984 ppd->neighbor_fm_security = 0;
6987 static const char * const link_down_reason_strs[] = {
6988 [OPA_LINKDOWN_REASON_NONE] = "None",
6989 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
6990 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6991 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6992 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6993 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6994 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6995 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6996 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6997 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6998 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6999 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7000 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7001 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7002 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7003 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7004 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7005 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7006 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7007 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7008 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7009 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7010 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7011 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7012 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7013 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7014 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7015 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7016 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7017 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7018 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7019 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7020 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7021 "Excessive buffer overrun",
7022 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7023 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7024 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7025 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7026 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7027 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7028 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7029 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7030 "Local media not installed",
7031 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7032 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7033 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7034 "End to end not installed",
7035 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7036 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7037 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7038 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7039 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7040 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7043 /* return the neighbor link down reason string */
7044 static const char *link_down_reason_str(u8 reason)
7046 const char *str = NULL;
7048 if (reason < ARRAY_SIZE(link_down_reason_strs))
7049 str = link_down_reason_strs[reason];
7057 * Handle a link down interrupt from the 8051.
7059 * This is a work-queue function outside of the interrupt.
7061 void handle_link_down(struct work_struct *work)
7063 u8 lcl_reason, neigh_reason = 0;
7064 u8 link_down_reason;
7065 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7068 static const char ldr_str[] = "Link down reason: ";
7070 if ((ppd->host_link_state &
7071 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7072 ppd->port_type == PORT_TYPE_FIXED)
7073 ppd->offline_disabled_reason =
7074 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7076 /* Go offline first, then deal with reading/writing through 8051 */
7077 was_up = !!(ppd->host_link_state & HLS_UP);
7078 set_link_state(ppd, HLS_DN_OFFLINE);
7079 xchg(&ppd->is_link_down_queued, 0);
7083 /* link down reason is only valid if the link was up */
7084 read_link_down_reason(ppd->dd, &link_down_reason);
7085 switch (link_down_reason) {
7086 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7087 /* the link went down, no idle message reason */
7088 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7091 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7093 * The neighbor reason is only valid if an idle message
7094 * was received for it.
7096 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7097 dd_dev_info(ppd->dd,
7098 "%sNeighbor link down message %d, %s\n",
7099 ldr_str, neigh_reason,
7100 link_down_reason_str(neigh_reason));
7102 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7103 dd_dev_info(ppd->dd,
7104 "%sHost requested link to go offline\n",
7108 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7109 ldr_str, link_down_reason);
7114 * If no reason, assume peer-initiated but missed
7115 * LinkGoingDown idle flits.
7117 if (neigh_reason == 0)
7118 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7120 /* went down while polling or going up */
7121 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7124 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7126 /* inform the SMA when the link transitions from up to down */
7127 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7128 ppd->neigh_link_down_reason.sma == 0) {
7129 ppd->local_link_down_reason.sma =
7130 ppd->local_link_down_reason.latest;
7131 ppd->neigh_link_down_reason.sma =
7132 ppd->neigh_link_down_reason.latest;
7135 reset_neighbor_info(ppd);
7137 /* disable the port */
7138 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7141 * If there is no cable attached, turn the DC off. Otherwise,
7142 * start the link bring up.
7144 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7145 dc_shutdown(ppd->dd);
7150 void handle_link_bounce(struct work_struct *work)
7152 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7156 * Only do something if the link is currently up.
7158 if (ppd->host_link_state & HLS_UP) {
7159 set_link_state(ppd, HLS_DN_OFFLINE);
7162 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7163 __func__, link_state_name(ppd->host_link_state));
7168 * Mask conversion: Capability exchange to Port LTP. The capability
7169 * exchange has an implicit 16b CRC that is mandatory.
7171 static int cap_to_port_ltp(int cap)
7173 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7175 if (cap & CAP_CRC_14B)
7176 port_ltp |= PORT_LTP_CRC_MODE_14;
7177 if (cap & CAP_CRC_48B)
7178 port_ltp |= PORT_LTP_CRC_MODE_48;
7179 if (cap & CAP_CRC_12B_16B_PER_LANE)
7180 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7186 * Convert an OPA Port LTP mask to capability mask
7188 int port_ltp_to_cap(int port_ltp)
7192 if (port_ltp & PORT_LTP_CRC_MODE_14)
7193 cap_mask |= CAP_CRC_14B;
7194 if (port_ltp & PORT_LTP_CRC_MODE_48)
7195 cap_mask |= CAP_CRC_48B;
7196 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7197 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7203 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7205 static int lcb_to_port_ltp(int lcb_crc)
7209 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7210 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7211 else if (lcb_crc == LCB_CRC_48B)
7212 port_ltp = PORT_LTP_CRC_MODE_48;
7213 else if (lcb_crc == LCB_CRC_14B)
7214 port_ltp = PORT_LTP_CRC_MODE_14;
7216 port_ltp = PORT_LTP_CRC_MODE_16;
7222 * Our neighbor has indicated that we are allowed to act as a fabric
7223 * manager, so place the full management partition key in the second
7224 * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7225 * that we should already have the limited management partition key in
7226 * array element 1, and also that the port is not yet up when
7227 * add_full_mgmt_pkey() is invoked.
7229 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7231 struct hfi1_devdata *dd = ppd->dd;
7233 /* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
7234 if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7235 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7236 __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7237 ppd->pkeys[2] = FULL_MGMT_P_KEY;
7238 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7239 hfi1_event_pkey_change(ppd->dd, ppd->port);
7242 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7244 if (ppd->pkeys[2] != 0) {
7246 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7247 hfi1_event_pkey_change(ppd->dd, ppd->port);
7252 * Convert the given link width to the OPA link width bitmask.
7254 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7259 * Simulator and quick linkup do not set the width.
7260 * Just set it to 4x without complaint.
7262 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7263 return OPA_LINK_WIDTH_4X;
7264 return 0; /* no lanes up */
7265 case 1: return OPA_LINK_WIDTH_1X;
7266 case 2: return OPA_LINK_WIDTH_2X;
7267 case 3: return OPA_LINK_WIDTH_3X;
7269 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7272 case 4: return OPA_LINK_WIDTH_4X;
7277 * Do a population count on the bottom nibble.
7279 static const u8 bit_counts[16] = {
7280 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7283 static inline u8 nibble_to_count(u8 nibble)
7285 return bit_counts[nibble & 0xf];
7289 * Read the active lane information from the 8051 registers and return
7292 * Active lane information is found in these 8051 registers:
7296 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7302 u8 tx_polarity_inversion;
7303 u8 rx_polarity_inversion;
7306 /* read the active lanes */
7307 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7308 &rx_polarity_inversion, &max_rate);
7309 read_local_lni(dd, &enable_lane_rx);
7311 /* convert to counts */
7312 tx = nibble_to_count(enable_lane_tx);
7313 rx = nibble_to_count(enable_lane_rx);
7316 * Set link_speed_active here, overriding what was set in
7317 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7318 * set the max_rate field in handle_verify_cap until v0.19.
7320 if ((dd->icode == ICODE_RTL_SILICON) &&
7321 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7322 /* max_rate: 0 = 12.5G, 1 = 25G */
7325 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7329 "%s: unexpected max rate %d, using 25Gb\n",
7330 __func__, (int)max_rate);
7333 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7339 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7340 enable_lane_tx, tx, enable_lane_rx, rx);
7341 *tx_width = link_width_to_bits(dd, tx);
7342 *rx_width = link_width_to_bits(dd, rx);
7346 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7347 * Valid after the end of VerifyCap and during LinkUp. Does not change
7348 * after link up. I.e. look elsewhere for downgrade information.
7351 * + bits [7:4] contain the number of active transmitters
7352 * + bits [3:0] contain the number of active receivers
7353 * These are numbers 1 through 4 and can be different values if the
7354 * link is asymmetric.
7356 * verify_cap_local_fm_link_width[0] retains its original value.
7358 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7362 u8 misc_bits, local_flags;
7363 u16 active_tx, active_rx;
7365 read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7367 rx = (widths >> 8) & 0xf;
7369 *tx_width = link_width_to_bits(dd, tx);
7370 *rx_width = link_width_to_bits(dd, rx);
7372 /* print the active widths */
7373 get_link_widths(dd, &active_tx, &active_rx);
7377 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7378 * hardware information when the link first comes up.
7380 * The link width is not available until after VerifyCap.AllFramesReceived
7381 * (the trigger for handle_verify_cap), so this is outside that routine
7382 * and should be called when the 8051 signals linkup.
7384 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7386 u16 tx_width, rx_width;
7388 /* get end-of-LNI link widths */
7389 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7391 /* use tx_width as the link is supposed to be symmetric on link up */
7392 ppd->link_width_active = tx_width;
7393 /* link width downgrade active (LWD.A) starts out matching LW.A */
7394 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7395 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7396 /* per OPA spec, on link up LWD.E resets to LWD.S */
7397 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7398 /* cache the active egress rate (units {10^6 bits/sec]) */
7399 ppd->current_egress_rate = active_egress_rate(ppd);
7403 * Handle a verify capabilities interrupt from the 8051.
7405 * This is a work-queue function outside of the interrupt.
7407 void handle_verify_cap(struct work_struct *work)
7409 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7411 struct hfi1_devdata *dd = ppd->dd;
7413 u8 power_management;
7423 u16 active_tx, active_rx;
7424 u8 partner_supported_crc;
7428 set_link_state(ppd, HLS_VERIFY_CAP);
7430 lcb_shutdown(dd, 0);
7431 adjust_lcb_for_fpga_serdes(dd);
7433 read_vc_remote_phy(dd, &power_management, &continuous);
7434 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7435 &partner_supported_crc);
7436 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7437 read_remote_device_id(dd, &device_id, &device_rev);
7439 * And the 'MgmtAllowed' information, which is exchanged during
7440 * LNI, is also be available at this point.
7442 read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7443 /* print the active widths */
7444 get_link_widths(dd, &active_tx, &active_rx);
7446 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7447 (int)power_management, (int)continuous);
7449 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7450 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7451 (int)partner_supported_crc);
7452 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7453 (u32)remote_tx_rate, (u32)link_widths);
7454 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7455 (u32)device_id, (u32)device_rev);
7457 * The peer vAU value just read is the peer receiver value. HFI does
7458 * not support a transmit vAU of 0 (AU == 8). We advertised that
7459 * with Z=1 in the fabric capabilities sent to the peer. The peer
7460 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7461 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7462 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7463 * subject to the Z value exception.
7467 set_up_vau(dd, vau);
7470 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7471 * credits value and wait for link-up interrupt ot set it.
7474 dd->vl15buf_cached = vl15buf;
7476 /* set up the LCB CRC mode */
7477 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7479 /* order is important: use the lowest bit in common */
7480 if (crc_mask & CAP_CRC_14B)
7481 crc_val = LCB_CRC_14B;
7482 else if (crc_mask & CAP_CRC_48B)
7483 crc_val = LCB_CRC_48B;
7484 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7485 crc_val = LCB_CRC_12B_16B_PER_LANE;
7487 crc_val = LCB_CRC_16B;
7489 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7490 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7491 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7493 /* set (14b only) or clear sideband credit */
7494 reg = read_csr(dd, SEND_CM_CTRL);
7495 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7496 write_csr(dd, SEND_CM_CTRL,
7497 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7499 write_csr(dd, SEND_CM_CTRL,
7500 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7503 ppd->link_speed_active = 0; /* invalid value */
7504 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7505 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7506 switch (remote_tx_rate) {
7508 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7511 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7515 /* actual rate is highest bit of the ANDed rates */
7516 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7519 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7521 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7523 if (ppd->link_speed_active == 0) {
7524 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7525 __func__, (int)remote_tx_rate);
7526 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7530 * Cache the values of the supported, enabled, and active
7531 * LTP CRC modes to return in 'portinfo' queries. But the bit
7532 * flags that are returned in the portinfo query differ from
7533 * what's in the link_crc_mask, crc_sizes, and crc_val
7534 * variables. Convert these here.
7536 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7537 /* supported crc modes */
7538 ppd->port_ltp_crc_mode |=
7539 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7540 /* enabled crc modes */
7541 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7542 /* active crc mode */
7544 /* set up the remote credit return table */
7545 assign_remote_cm_au_table(dd, vcu);
7548 * The LCB is reset on entry to handle_verify_cap(), so this must
7549 * be applied on every link up.
7551 * Adjust LCB error kill enable to kill the link if
7552 * these RBUF errors are seen:
7553 * REPLAY_BUF_MBE_SMASK
7554 * FLIT_INPUT_BUF_MBE_SMASK
7556 if (is_ax(dd)) { /* fixed in B0 */
7557 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7558 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7559 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7560 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7563 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7564 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7566 /* give 8051 access to the LCB CSRs */
7567 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7568 set_8051_lcb_access(dd);
7570 if (ppd->mgmt_allowed)
7571 add_full_mgmt_pkey(ppd);
7573 /* tell the 8051 to go to LinkUp */
7574 set_link_state(ppd, HLS_GOING_UP);
7578 * Apply the link width downgrade enabled policy against the current active
7581 * Called when the enabled policy changes or the active link widths change.
7583 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7590 /* use the hls lock to avoid a race with actual link up */
7593 mutex_lock(&ppd->hls_lock);
7594 /* only apply if the link is up */
7595 if (ppd->host_link_state & HLS_DOWN) {
7596 /* still going up..wait and retry */
7597 if (ppd->host_link_state & HLS_GOING_UP) {
7598 if (++tries < 1000) {
7599 mutex_unlock(&ppd->hls_lock);
7600 usleep_range(100, 120); /* arbitrary */
7604 "%s: giving up waiting for link state change\n",
7610 lwde = ppd->link_width_downgrade_enabled;
7612 if (refresh_widths) {
7613 get_link_widths(ppd->dd, &tx, &rx);
7614 ppd->link_width_downgrade_tx_active = tx;
7615 ppd->link_width_downgrade_rx_active = rx;
7618 if (ppd->link_width_downgrade_tx_active == 0 ||
7619 ppd->link_width_downgrade_rx_active == 0) {
7620 /* the 8051 reported a dead link as a downgrade */
7621 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7622 } else if (lwde == 0) {
7623 /* downgrade is disabled */
7625 /* bounce if not at starting active width */
7626 if ((ppd->link_width_active !=
7627 ppd->link_width_downgrade_tx_active) ||
7628 (ppd->link_width_active !=
7629 ppd->link_width_downgrade_rx_active)) {
7631 "Link downgrade is disabled and link has downgraded, downing link\n");
7633 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7634 ppd->link_width_active,
7635 ppd->link_width_downgrade_tx_active,
7636 ppd->link_width_downgrade_rx_active);
7639 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7640 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7641 /* Tx or Rx is outside the enabled policy */
7643 "Link is outside of downgrade allowed, downing link\n");
7645 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7646 lwde, ppd->link_width_downgrade_tx_active,
7647 ppd->link_width_downgrade_rx_active);
7652 mutex_unlock(&ppd->hls_lock);
7655 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7656 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7657 set_link_state(ppd, HLS_DN_OFFLINE);
7663 * Handle a link downgrade interrupt from the 8051.
7665 * This is a work-queue function outside of the interrupt.
7667 void handle_link_downgrade(struct work_struct *work)
7669 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7670 link_downgrade_work);
7672 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7673 apply_link_downgrade_policy(ppd, 1);
7676 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7678 return flag_string(buf, buf_len, flags, dcc_err_flags,
7679 ARRAY_SIZE(dcc_err_flags));
7682 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7684 return flag_string(buf, buf_len, flags, lcb_err_flags,
7685 ARRAY_SIZE(lcb_err_flags));
7688 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7690 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7691 ARRAY_SIZE(dc8051_err_flags));
7694 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7696 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7697 ARRAY_SIZE(dc8051_info_err_flags));
7700 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7702 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7703 ARRAY_SIZE(dc8051_info_host_msg_flags));
7706 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7708 struct hfi1_pportdata *ppd = dd->pport;
7709 u64 info, err, host_msg;
7710 int queue_link_down = 0;
7713 /* look at the flags */
7714 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7715 /* 8051 information set by firmware */
7716 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7717 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7718 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7719 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7721 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7722 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7725 * Handle error flags.
7727 if (err & FAILED_LNI) {
7729 * LNI error indications are cleared by the 8051
7730 * only when starting polling. Only pay attention
7731 * to them when in the states that occur during
7734 if (ppd->host_link_state
7735 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7736 queue_link_down = 1;
7737 dd_dev_info(dd, "Link error: %s\n",
7738 dc8051_info_err_string(buf,
7743 err &= ~(u64)FAILED_LNI;
7745 /* unknown frames can happen durning LNI, just count */
7746 if (err & UNKNOWN_FRAME) {
7747 ppd->unknown_frame_count++;
7748 err &= ~(u64)UNKNOWN_FRAME;
7751 /* report remaining errors, but do not do anything */
7752 dd_dev_err(dd, "8051 info error: %s\n",
7753 dc8051_info_err_string(buf, sizeof(buf),
7758 * Handle host message flags.
7760 if (host_msg & HOST_REQ_DONE) {
7762 * Presently, the driver does a busy wait for
7763 * host requests to complete. This is only an
7764 * informational message.
7765 * NOTE: The 8051 clears the host message
7766 * information *on the next 8051 command*.
7767 * Therefore, when linkup is achieved,
7768 * this flag will still be set.
7770 host_msg &= ~(u64)HOST_REQ_DONE;
7772 if (host_msg & BC_SMA_MSG) {
7773 queue_work(ppd->link_wq, &ppd->sma_message_work);
7774 host_msg &= ~(u64)BC_SMA_MSG;
7776 if (host_msg & LINKUP_ACHIEVED) {
7777 dd_dev_info(dd, "8051: Link up\n");
7778 queue_work(ppd->link_wq, &ppd->link_up_work);
7779 host_msg &= ~(u64)LINKUP_ACHIEVED;
7781 if (host_msg & EXT_DEVICE_CFG_REQ) {
7782 handle_8051_request(ppd);
7783 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7785 if (host_msg & VERIFY_CAP_FRAME) {
7786 queue_work(ppd->link_wq, &ppd->link_vc_work);
7787 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7789 if (host_msg & LINK_GOING_DOWN) {
7790 const char *extra = "";
7791 /* no downgrade action needed if going down */
7792 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7793 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7794 extra = " (ignoring downgrade)";
7796 dd_dev_info(dd, "8051: Link down%s\n", extra);
7797 queue_link_down = 1;
7798 host_msg &= ~(u64)LINK_GOING_DOWN;
7800 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7801 queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7802 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7805 /* report remaining messages, but do not do anything */
7806 dd_dev_info(dd, "8051 info host message: %s\n",
7807 dc8051_info_host_msg_string(buf,
7812 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7814 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7816 * Lost the 8051 heartbeat. If this happens, we
7817 * receive constant interrupts about it. Disable
7818 * the interrupt after the first.
7820 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7821 write_csr(dd, DC_DC8051_ERR_EN,
7822 read_csr(dd, DC_DC8051_ERR_EN) &
7823 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7825 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7828 /* report the error, but do not do anything */
7829 dd_dev_err(dd, "8051 error: %s\n",
7830 dc8051_err_string(buf, sizeof(buf), reg));
7833 if (queue_link_down) {
7835 * if the link is already going down or disabled, do not
7836 * queue another. If there's a link down entry already
7837 * queued, don't queue another one.
7839 if ((ppd->host_link_state &
7840 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7841 ppd->link_enabled == 0) {
7842 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7843 __func__, ppd->host_link_state,
7846 if (xchg(&ppd->is_link_down_queued, 1) == 1)
7848 "%s: link down request already queued\n",
7851 queue_work(ppd->link_wq, &ppd->link_down_work);
7856 static const char * const fm_config_txt[] = {
7858 "BadHeadDist: Distance violation between two head flits",
7860 "BadTailDist: Distance violation between two tail flits",
7862 "BadCtrlDist: Distance violation between two credit control flits",
7864 "BadCrdAck: Credits return for unsupported VL",
7866 "UnsupportedVLMarker: Received VL Marker",
7868 "BadPreempt: Exceeded the preemption nesting level",
7870 "BadControlFlit: Received unsupported control flit",
7873 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7876 static const char * const port_rcv_txt[] = {
7878 "BadPktLen: Illegal PktLen",
7880 "PktLenTooLong: Packet longer than PktLen",
7882 "PktLenTooShort: Packet shorter than PktLen",
7884 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7886 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7888 "BadL2: Illegal L2 opcode",
7890 "BadSC: Unsupported SC",
7892 "BadRC: Illegal RC",
7894 "PreemptError: Preempting with same VL",
7896 "PreemptVL15: Preempting a VL15 packet",
7899 #define OPA_LDR_FMCONFIG_OFFSET 16
7900 #define OPA_LDR_PORTRCV_OFFSET 0
7901 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7903 u64 info, hdr0, hdr1;
7906 struct hfi1_pportdata *ppd = dd->pport;
7910 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7911 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7912 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7913 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7914 /* set status bit */
7915 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7917 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7920 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7921 struct hfi1_pportdata *ppd = dd->pport;
7922 /* this counter saturates at (2^32) - 1 */
7923 if (ppd->link_downed < (u32)UINT_MAX)
7925 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7928 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7929 u8 reason_valid = 1;
7931 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7932 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7933 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7934 /* set status bit */
7935 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7945 extra = fm_config_txt[info];
7948 extra = fm_config_txt[info];
7949 if (ppd->port_error_action &
7950 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7953 * lcl_reason cannot be derived from info
7957 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7962 snprintf(buf, sizeof(buf), "reserved%lld", info);
7967 if (reason_valid && !do_bounce) {
7968 do_bounce = ppd->port_error_action &
7969 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7970 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7973 /* just report this */
7974 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
7976 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7979 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7980 u8 reason_valid = 1;
7982 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7983 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7984 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7985 if (!(dd->err_info_rcvport.status_and_code &
7986 OPA_EI_STATUS_SMASK)) {
7987 dd->err_info_rcvport.status_and_code =
7988 info & OPA_EI_CODE_SMASK;
7989 /* set status bit */
7990 dd->err_info_rcvport.status_and_code |=
7991 OPA_EI_STATUS_SMASK;
7993 * save first 2 flits in the packet that caused
7996 dd->err_info_rcvport.packet_flit1 = hdr0;
7997 dd->err_info_rcvport.packet_flit2 = hdr1;
8010 extra = port_rcv_txt[info];
8014 snprintf(buf, sizeof(buf), "reserved%lld", info);
8019 if (reason_valid && !do_bounce) {
8020 do_bounce = ppd->port_error_action &
8021 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8022 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8025 /* just report this */
8026 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8027 " hdr0 0x%llx, hdr1 0x%llx\n",
8030 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8033 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8034 /* informative only */
8035 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8036 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8038 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8039 /* informative only */
8040 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8041 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8044 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8045 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8047 /* report any remaining errors */
8049 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8050 dcc_err_string(buf, sizeof(buf), reg));
8052 if (lcl_reason == 0)
8053 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8056 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8058 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8059 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8063 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8067 dd_dev_info(dd, "LCB Error: %s\n",
8068 lcb_err_string(buf, sizeof(buf), reg));
8072 * CCE block DC interrupt. Source is < 8.
8074 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8076 const struct err_reg_info *eri = &dc_errs[source];
8079 interrupt_clear_down(dd, 0, eri);
8080 } else if (source == 3 /* dc_lbm_int */) {
8082 * This indicates that a parity error has occurred on the
8083 * address/control lines presented to the LBM. The error
8084 * is a single pulse, there is no associated error flag,
8085 * and it is non-maskable. This is because if a parity
8086 * error occurs on the request the request is dropped.
8087 * This should never occur, but it is nice to know if it
8090 dd_dev_err(dd, "Parity error in DC LBM block\n");
8092 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8097 * TX block send credit interrupt. Source is < 160.
8099 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8101 sc_group_release_update(dd, source);
8105 * TX block SDMA interrupt. Source is < 48.
8107 * SDMA interrupts are grouped by type:
8110 * N - 2N-1 = SDmaProgress
8111 * 2N - 3N-1 = SDmaIdle
8113 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8115 /* what interrupt */
8116 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8118 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8120 #ifdef CONFIG_SDMA_VERBOSITY
8121 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8122 slashstrip(__FILE__), __LINE__, __func__);
8123 sdma_dumpstate(&dd->per_sdma[which]);
8126 if (likely(what < 3 && which < dd->num_sdma)) {
8127 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8129 /* should not happen */
8130 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8135 * RX block receive available interrupt. Source is < 160.
8137 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8139 struct hfi1_ctxtdata *rcd;
8142 if (likely(source < dd->num_rcv_contexts)) {
8143 rcd = hfi1_rcd_get_by_index(dd, source);
8145 /* Check for non-user contexts, including vnic */
8146 if ((source < dd->first_dyn_alloc_ctxt) ||
8147 (rcd->sc && (rcd->sc->type == SC_KERNEL)))
8148 rcd->do_interrupt(rcd, 0);
8150 handle_user_interrupt(rcd);
8155 /* received an interrupt, but no rcd */
8156 err_detail = "dataless";
8158 /* received an interrupt, but are not using that context */
8159 err_detail = "out of range";
8161 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8162 err_detail, source);
8166 * RX block receive urgent interrupt. Source is < 160.
8168 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8170 struct hfi1_ctxtdata *rcd;
8173 if (likely(source < dd->num_rcv_contexts)) {
8174 rcd = hfi1_rcd_get_by_index(dd, source);
8176 /* only pay attention to user urgent interrupts */
8177 if ((source >= dd->first_dyn_alloc_ctxt) &&
8178 (!rcd->sc || (rcd->sc->type == SC_USER)))
8179 handle_user_interrupt(rcd);
8184 /* received an interrupt, but no rcd */
8185 err_detail = "dataless";
8187 /* received an interrupt, but are not using that context */
8188 err_detail = "out of range";
8190 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8191 err_detail, source);
8195 * Reserved range interrupt. Should not be called in normal operation.
8197 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8201 dd_dev_err(dd, "unexpected %s interrupt\n",
8202 is_reserved_name(name, sizeof(name), source));
8205 static const struct is_table is_table[] = {
8208 * name func interrupt func
8210 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8211 is_misc_err_name, is_misc_err_int },
8212 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8213 is_sdma_eng_err_name, is_sdma_eng_err_int },
8214 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8215 is_sendctxt_err_name, is_sendctxt_err_int },
8216 { IS_SDMA_START, IS_SDMA_END,
8217 is_sdma_eng_name, is_sdma_eng_int },
8218 { IS_VARIOUS_START, IS_VARIOUS_END,
8219 is_various_name, is_various_int },
8220 { IS_DC_START, IS_DC_END,
8221 is_dc_name, is_dc_int },
8222 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8223 is_rcv_avail_name, is_rcv_avail_int },
8224 { IS_RCVURGENT_START, IS_RCVURGENT_END,
8225 is_rcv_urgent_name, is_rcv_urgent_int },
8226 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8227 is_send_credit_name, is_send_credit_int},
8228 { IS_RESERVED_START, IS_RESERVED_END,
8229 is_reserved_name, is_reserved_int},
8233 * Interrupt source interrupt - called when the given source has an interrupt.
8234 * Source is a bit index into an array of 64-bit integers.
8236 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8238 const struct is_table *entry;
8240 /* avoids a double compare by walking the table in-order */
8241 for (entry = &is_table[0]; entry->is_name; entry++) {
8242 if (source < entry->end) {
8243 trace_hfi1_interrupt(dd, entry, source);
8244 entry->is_int(dd, source - entry->start);
8248 /* fell off the end */
8249 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8253 * General interrupt handler. This is able to correctly handle
8254 * all interrupts in case INTx is used.
8256 static irqreturn_t general_interrupt(int irq, void *data)
8258 struct hfi1_devdata *dd = data;
8259 u64 regs[CCE_NUM_INT_CSRS];
8262 irqreturn_t handled = IRQ_NONE;
8264 this_cpu_inc(*dd->int_counter);
8266 /* phase 1: scan and clear all handled interrupts */
8267 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8268 if (dd->gi_mask[i] == 0) {
8269 regs[i] = 0; /* used later */
8272 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8274 /* only clear if anything is set */
8276 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8279 /* phase 2: call the appropriate handler */
8280 for_each_set_bit(bit, (unsigned long *)®s[0],
8281 CCE_NUM_INT_CSRS * 64) {
8282 is_interrupt(dd, bit);
8283 handled = IRQ_HANDLED;
8289 static irqreturn_t sdma_interrupt(int irq, void *data)
8291 struct sdma_engine *sde = data;
8292 struct hfi1_devdata *dd = sde->dd;
8295 #ifdef CONFIG_SDMA_VERBOSITY
8296 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8297 slashstrip(__FILE__), __LINE__, __func__);
8298 sdma_dumpstate(sde);
8301 this_cpu_inc(*dd->int_counter);
8303 /* This read_csr is really bad in the hot path */
8304 status = read_csr(dd,
8305 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8307 if (likely(status)) {
8308 /* clear the interrupt(s) */
8310 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8313 /* handle the interrupt(s) */
8314 sdma_engine_interrupt(sde, status);
8316 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8323 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8324 * to insure that the write completed. This does NOT guarantee that
8325 * queued DMA writes to memory from the chip are pushed.
8327 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8329 struct hfi1_devdata *dd = rcd->dd;
8330 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8332 mmiowb(); /* make sure everything before is written */
8333 write_csr(dd, addr, rcd->imask);
8334 /* force the above write on the chip and get a value back */
8335 (void)read_csr(dd, addr);
8338 /* force the receive interrupt */
8339 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8341 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8345 * Return non-zero if a packet is present.
8347 * This routine is called when rechecking for packets after the RcvAvail
8348 * interrupt has been cleared down. First, do a quick check of memory for
8349 * a packet present. If not found, use an expensive CSR read of the context
8350 * tail to determine the actual tail. The CSR read is necessary because there
8351 * is no method to push pending DMAs to memory other than an interrupt and we
8352 * are trying to determine if we need to force an interrupt.
8354 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8359 if (!rcd->rcvhdrtail_kvaddr)
8360 present = (rcd->seq_cnt ==
8361 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8362 else /* is RDMA rtail */
8363 present = (rcd->head != get_rcvhdrtail(rcd));
8368 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8369 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8370 return rcd->head != tail;
8374 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8375 * This routine will try to handle packets immediately (latency), but if
8376 * it finds too many, it will invoke the thread handler (bandwitdh). The
8377 * chip receive interrupt is *not* cleared down until this or the thread (if
8378 * invoked) is finished. The intent is to avoid extra interrupts while we
8379 * are processing packets anyway.
8381 static irqreturn_t receive_context_interrupt(int irq, void *data)
8383 struct hfi1_ctxtdata *rcd = data;
8384 struct hfi1_devdata *dd = rcd->dd;
8388 trace_hfi1_receive_interrupt(dd, rcd);
8389 this_cpu_inc(*dd->int_counter);
8390 aspm_ctx_disable(rcd);
8392 /* receive interrupt remains blocked while processing packets */
8393 disposition = rcd->do_interrupt(rcd, 0);
8396 * Too many packets were seen while processing packets in this
8397 * IRQ handler. Invoke the handler thread. The receive interrupt
8400 if (disposition == RCV_PKT_LIMIT)
8401 return IRQ_WAKE_THREAD;
8404 * The packet processor detected no more packets. Clear the receive
8405 * interrupt and recheck for a packet packet that may have arrived
8406 * after the previous check and interrupt clear. If a packet arrived,
8407 * force another interrupt.
8409 clear_recv_intr(rcd);
8410 present = check_packet_present(rcd);
8412 force_recv_intr(rcd);
8418 * Receive packet thread handler. This expects to be invoked with the
8419 * receive interrupt still blocked.
8421 static irqreturn_t receive_context_thread(int irq, void *data)
8423 struct hfi1_ctxtdata *rcd = data;
8426 /* receive interrupt is still blocked from the IRQ handler */
8427 (void)rcd->do_interrupt(rcd, 1);
8430 * The packet processor will only return if it detected no more
8431 * packets. Hold IRQs here so we can safely clear the interrupt and
8432 * recheck for a packet that may have arrived after the previous
8433 * check and the interrupt clear. If a packet arrived, force another
8436 local_irq_disable();
8437 clear_recv_intr(rcd);
8438 present = check_packet_present(rcd);
8440 force_recv_intr(rcd);
8446 /* ========================================================================= */
8448 u32 read_physical_state(struct hfi1_devdata *dd)
8452 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8453 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8454 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8457 u32 read_logical_state(struct hfi1_devdata *dd)
8461 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8462 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8463 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8466 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8470 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8471 /* clear current state, set new state */
8472 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8473 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8474 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8478 * Use the 8051 to read a LCB CSR.
8480 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8485 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8486 if (acquire_lcb_access(dd, 0) == 0) {
8487 *data = read_csr(dd, addr);
8488 release_lcb_access(dd, 0);
8494 /* register is an index of LCB registers: (offset - base) / 8 */
8495 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8496 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8497 if (ret != HCMD_SUCCESS)
8503 * Provide a cache for some of the LCB registers in case the LCB is
8505 * (The LCB is unavailable in certain link states, for example.)
8512 static struct lcb_datum lcb_cache[] = {
8513 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8514 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8515 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8518 static void update_lcb_cache(struct hfi1_devdata *dd)
8524 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8525 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8527 /* Update if we get good data */
8528 if (likely(ret != -EBUSY))
8529 lcb_cache[i].val = val;
8533 static int read_lcb_cache(u32 off, u64 *val)
8537 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8538 if (lcb_cache[i].off == off) {
8539 *val = lcb_cache[i].val;
8544 pr_warn("%s bad offset 0x%x\n", __func__, off);
8549 * Read an LCB CSR. Access may not be in host control, so check.
8550 * Return 0 on success, -EBUSY on failure.
8552 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8554 struct hfi1_pportdata *ppd = dd->pport;
8556 /* if up, go through the 8051 for the value */
8557 if (ppd->host_link_state & HLS_UP)
8558 return read_lcb_via_8051(dd, addr, data);
8559 /* if going up or down, check the cache, otherwise, no access */
8560 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8561 if (read_lcb_cache(addr, data))
8566 /* otherwise, host has access */
8567 *data = read_csr(dd, addr);
8572 * Use the 8051 to write a LCB CSR.
8574 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8579 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8580 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8581 if (acquire_lcb_access(dd, 0) == 0) {
8582 write_csr(dd, addr, data);
8583 release_lcb_access(dd, 0);
8589 /* register is an index of LCB registers: (offset - base) / 8 */
8590 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8591 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8592 if (ret != HCMD_SUCCESS)
8598 * Write an LCB CSR. Access may not be in host control, so check.
8599 * Return 0 on success, -EBUSY on failure.
8601 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8603 struct hfi1_pportdata *ppd = dd->pport;
8605 /* if up, go through the 8051 for the value */
8606 if (ppd->host_link_state & HLS_UP)
8607 return write_lcb_via_8051(dd, addr, data);
8608 /* if going up or down, no access */
8609 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8611 /* otherwise, host has access */
8612 write_csr(dd, addr, data);
8618 * < 0 = Linux error, not able to get access
8619 * > 0 = 8051 command RETURN_CODE
8621 static int do_8051_command(
8622 struct hfi1_devdata *dd,
8629 unsigned long timeout;
8631 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8633 mutex_lock(&dd->dc8051_lock);
8635 /* We can't send any commands to the 8051 if it's in reset */
8636 if (dd->dc_shutdown) {
8637 return_code = -ENODEV;
8642 * If an 8051 host command timed out previously, then the 8051 is
8645 * On first timeout, attempt to reset and restart the entire DC
8646 * block (including 8051). (Is this too big of a hammer?)
8648 * If the 8051 times out a second time, the reset did not bring it
8649 * back to healthy life. In that case, fail any subsequent commands.
8651 if (dd->dc8051_timed_out) {
8652 if (dd->dc8051_timed_out > 1) {
8654 "Previous 8051 host command timed out, skipping command %u\n",
8656 return_code = -ENXIO;
8664 * If there is no timeout, then the 8051 command interface is
8665 * waiting for a command.
8669 * When writing a LCB CSR, out_data contains the full value to
8670 * to be written, while in_data contains the relative LCB
8671 * address in 7:0. Do the work here, rather than the caller,
8672 * of distrubting the write data to where it needs to go:
8675 * 39:00 -> in_data[47:8]
8676 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8677 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8679 if (type == HCMD_WRITE_LCB_CSR) {
8680 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8681 /* must preserve COMPLETED - it is tied to hardware */
8682 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8683 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8684 reg |= ((((*out_data) >> 40) & 0xff) <<
8685 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8686 | ((((*out_data) >> 48) & 0xffff) <<
8687 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8688 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8692 * Do two writes: the first to stabilize the type and req_data, the
8693 * second to activate.
8695 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8696 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8697 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8698 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8699 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8700 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8701 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8703 /* wait for completion, alternate: interrupt */
8704 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8706 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8707 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8710 if (time_after(jiffies, timeout)) {
8711 dd->dc8051_timed_out++;
8712 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8715 return_code = -ETIMEDOUT;
8722 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8723 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8724 if (type == HCMD_READ_LCB_CSR) {
8725 /* top 16 bits are in a different register */
8726 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8727 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8729 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8732 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8733 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8734 dd->dc8051_timed_out = 0;
8736 * Clear command for next user.
8738 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8741 mutex_unlock(&dd->dc8051_lock);
8745 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8747 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8750 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8751 u8 lane_id, u32 config_data)
8756 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8757 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8758 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8759 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8760 if (ret != HCMD_SUCCESS) {
8762 "load 8051 config: field id %d, lane %d, err %d\n",
8763 (int)field_id, (int)lane_id, ret);
8769 * Read the 8051 firmware "registers". Use the RAM directly. Always
8770 * set the result, even on error.
8771 * Return 0 on success, -errno on failure
8773 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8780 /* address start depends on the lane_id */
8782 addr = (4 * NUM_GENERAL_FIELDS)
8783 + (lane_id * 4 * NUM_LANE_FIELDS);
8786 addr += field_id * 4;
8788 /* read is in 8-byte chunks, hardware will truncate the address down */
8789 ret = read_8051_data(dd, addr, 8, &big_data);
8792 /* extract the 4 bytes we want */
8794 *result = (u32)(big_data >> 32);
8796 *result = (u32)big_data;
8799 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8800 __func__, lane_id, field_id);
8806 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8811 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8812 | power_management << POWER_MANAGEMENT_SHIFT;
8813 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8814 GENERAL_CONFIG, frame);
8817 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8818 u16 vl15buf, u8 crc_sizes)
8822 frame = (u32)vau << VAU_SHIFT
8824 | (u32)vcu << VCU_SHIFT
8825 | (u32)vl15buf << VL15BUF_SHIFT
8826 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8827 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8828 GENERAL_CONFIG, frame);
8831 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8832 u8 *flag_bits, u16 *link_widths)
8836 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8838 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8839 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8840 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8843 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8850 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8851 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8852 | (u32)link_widths << LINK_WIDTH_SHIFT;
8853 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8857 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8862 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8863 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8864 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8867 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8872 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8873 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8874 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8875 & REMOTE_DEVICE_REV_MASK;
8878 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8883 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8884 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8885 /* Clear, then set field */
8887 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8888 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8892 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8897 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8898 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8899 STS_FM_VERSION_MAJOR_MASK;
8900 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8901 STS_FM_VERSION_MINOR_MASK;
8903 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8904 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8905 STS_FM_VERSION_PATCH_MASK;
8908 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8913 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8914 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8915 & POWER_MANAGEMENT_MASK;
8916 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8917 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8920 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8921 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8925 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8926 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8927 *z = (frame >> Z_SHIFT) & Z_MASK;
8928 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8929 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8930 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8933 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8939 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8941 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8942 & REMOTE_TX_RATE_MASK;
8943 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8946 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8950 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8951 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8954 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8958 read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8959 *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8962 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8964 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8967 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8969 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8972 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8978 if (dd->pport->host_link_state & HLS_UP) {
8979 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8982 *link_quality = (frame >> LINK_QUALITY_SHIFT)
8983 & LINK_QUALITY_MASK;
8987 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8991 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8992 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8995 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8999 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9000 *ldr = (frame & 0xff);
9003 static int read_tx_settings(struct hfi1_devdata *dd,
9005 u8 *tx_polarity_inversion,
9006 u8 *rx_polarity_inversion,
9012 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9013 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9014 & ENABLE_LANE_TX_MASK;
9015 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9016 & TX_POLARITY_INVERSION_MASK;
9017 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9018 & RX_POLARITY_INVERSION_MASK;
9019 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9023 static int write_tx_settings(struct hfi1_devdata *dd,
9025 u8 tx_polarity_inversion,
9026 u8 rx_polarity_inversion,
9031 /* no need to mask, all variable sizes match field widths */
9032 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9033 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9034 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9035 | max_rate << MAX_RATE_SHIFT;
9036 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9040 * Read an idle LCB message.
9042 * Returns 0 on success, -EINVAL on error
9044 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9048 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9049 if (ret != HCMD_SUCCESS) {
9050 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9054 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9055 /* return only the payload as we already know the type */
9056 *data_out >>= IDLE_PAYLOAD_SHIFT;
9061 * Read an idle SMA message. To be done in response to a notification from
9064 * Returns 0 on success, -EINVAL on error
9066 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9068 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9073 * Send an idle LCB message.
9075 * Returns 0 on success, -EINVAL on error
9077 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9081 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9082 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9083 if (ret != HCMD_SUCCESS) {
9084 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9092 * Send an idle SMA message.
9094 * Returns 0 on success, -EINVAL on error
9096 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9100 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9101 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9102 return send_idle_message(dd, data);
9106 * Initialize the LCB then do a quick link up. This may or may not be
9109 * return 0 on success, -errno on error
9111 static int do_quick_linkup(struct hfi1_devdata *dd)
9115 lcb_shutdown(dd, 0);
9118 /* LCB_CFG_LOOPBACK.VAL = 2 */
9119 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9120 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9121 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9122 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9125 /* start the LCBs */
9126 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9127 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9129 /* simulator only loopback steps */
9130 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9131 /* LCB_CFG_RUN.EN = 1 */
9132 write_csr(dd, DC_LCB_CFG_RUN,
9133 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9135 ret = wait_link_transfer_active(dd, 10);
9139 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9140 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9145 * When doing quick linkup and not in loopback, both
9146 * sides must be done with LCB set-up before either
9147 * starts the quick linkup. Put a delay here so that
9148 * both sides can be started and have a chance to be
9149 * done with LCB set up before resuming.
9152 "Pausing for peer to be finished with LCB set up\n");
9154 dd_dev_err(dd, "Continuing with quick linkup\n");
9157 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9158 set_8051_lcb_access(dd);
9161 * State "quick" LinkUp request sets the physical link state to
9162 * LinkUp without a verify capability sequence.
9163 * This state is in simulator v37 and later.
9165 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9166 if (ret != HCMD_SUCCESS) {
9168 "%s: set physical link state to quick LinkUp failed with return %d\n",
9171 set_host_lcb_access(dd);
9172 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9179 return 0; /* success */
9183 * Set the SerDes to internal loopback mode.
9184 * Returns 0 on success, -errno on error.
9186 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
9190 ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
9191 if (ret == HCMD_SUCCESS)
9194 "Set physical link state to SerDes Loopback failed with return %d\n",
9202 * Do all special steps to set up loopback.
9204 static int init_loopback(struct hfi1_devdata *dd)
9206 dd_dev_info(dd, "Entering loopback mode\n");
9208 /* all loopbacks should disable self GUID check */
9209 write_csr(dd, DC_DC8051_CFG_MODE,
9210 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9213 * The simulator has only one loopback option - LCB. Switch
9214 * to that option, which includes quick link up.
9216 * Accept all valid loopback values.
9218 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9219 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9220 loopback == LOOPBACK_CABLE)) {
9221 loopback = LOOPBACK_LCB;
9226 /* handle serdes loopback */
9227 if (loopback == LOOPBACK_SERDES) {
9228 /* internal serdes loopack needs quick linkup on RTL */
9229 if (dd->icode == ICODE_RTL_SILICON)
9231 return set_serdes_loopback_mode(dd);
9234 /* LCB loopback - handled at poll time */
9235 if (loopback == LOOPBACK_LCB) {
9236 quick_linkup = 1; /* LCB is always quick linkup */
9238 /* not supported in emulation due to emulation RTL changes */
9239 if (dd->icode == ICODE_FPGA_EMULATION) {
9241 "LCB loopback not supported in emulation\n");
9247 /* external cable loopback requires no extra steps */
9248 if (loopback == LOOPBACK_CABLE)
9251 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9256 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9257 * used in the Verify Capability link width attribute.
9259 static u16 opa_to_vc_link_widths(u16 opa_widths)
9264 static const struct link_bits {
9267 } opa_link_xlate[] = {
9268 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9269 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9270 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9271 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9274 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9275 if (opa_widths & opa_link_xlate[i].from)
9276 result |= opa_link_xlate[i].to;
9282 * Set link attributes before moving to polling.
9284 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9286 struct hfi1_devdata *dd = ppd->dd;
9288 u8 tx_polarity_inversion;
9289 u8 rx_polarity_inversion;
9292 /* reset our fabric serdes to clear any lingering problems */
9293 fabric_serdes_reset(dd);
9295 /* set the local tx rate - need to read-modify-write */
9296 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9297 &rx_polarity_inversion, &ppd->local_tx_rate);
9299 goto set_local_link_attributes_fail;
9301 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9302 /* set the tx rate to the fastest enabled */
9303 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9304 ppd->local_tx_rate = 1;
9306 ppd->local_tx_rate = 0;
9308 /* set the tx rate to all enabled */
9309 ppd->local_tx_rate = 0;
9310 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9311 ppd->local_tx_rate |= 2;
9312 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9313 ppd->local_tx_rate |= 1;
9316 enable_lane_tx = 0xF; /* enable all four lanes */
9317 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9318 rx_polarity_inversion, ppd->local_tx_rate);
9319 if (ret != HCMD_SUCCESS)
9320 goto set_local_link_attributes_fail;
9323 * DC supports continuous updates.
9325 ret = write_vc_local_phy(dd,
9326 0 /* no power management */,
9327 1 /* continuous updates */);
9328 if (ret != HCMD_SUCCESS)
9329 goto set_local_link_attributes_fail;
9331 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9332 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9333 ppd->port_crc_mode_enabled);
9334 if (ret != HCMD_SUCCESS)
9335 goto set_local_link_attributes_fail;
9337 ret = write_vc_local_link_width(dd, 0, 0,
9338 opa_to_vc_link_widths(
9339 ppd->link_width_enabled));
9340 if (ret != HCMD_SUCCESS)
9341 goto set_local_link_attributes_fail;
9343 /* let peer know who we are */
9344 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9345 if (ret == HCMD_SUCCESS)
9348 set_local_link_attributes_fail:
9350 "Failed to set local link attributes, return 0x%x\n",
9356 * Call this to start the link.
9357 * Do not do anything if the link is disabled.
9358 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9360 int start_link(struct hfi1_pportdata *ppd)
9363 * Tune the SerDes to a ballpark setting for optimal signal and bit
9364 * error rate. Needs to be done before starting the link.
9368 if (!ppd->driver_link_ready) {
9369 dd_dev_info(ppd->dd,
9370 "%s: stopping link start because driver is not ready\n",
9376 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9377 * pkey table can be configured properly if the HFI unit is connected
9378 * to switch port with MgmtAllowed=NO
9380 clear_full_mgmt_pkey(ppd);
9382 return set_link_state(ppd, HLS_DN_POLL);
9385 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9387 struct hfi1_devdata *dd = ppd->dd;
9389 unsigned long timeout;
9392 * Some QSFP cables have a quirk that asserts the IntN line as a side
9393 * effect of power up on plug-in. We ignore this false positive
9394 * interrupt until the module has finished powering up by waiting for
9395 * a minimum timeout of the module inrush initialization time of
9396 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9397 * module have stabilized.
9402 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9404 timeout = jiffies + msecs_to_jiffies(2000);
9406 mask = read_csr(dd, dd->hfi1_id ?
9407 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9408 if (!(mask & QSFP_HFI0_INT_N))
9410 if (time_after(jiffies, timeout)) {
9411 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9419 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9421 struct hfi1_devdata *dd = ppd->dd;
9424 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9427 * Clear the status register to avoid an immediate interrupt
9428 * when we re-enable the IntN pin
9430 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9432 mask |= (u64)QSFP_HFI0_INT_N;
9434 mask &= ~(u64)QSFP_HFI0_INT_N;
9436 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9439 int reset_qsfp(struct hfi1_pportdata *ppd)
9441 struct hfi1_devdata *dd = ppd->dd;
9442 u64 mask, qsfp_mask;
9444 /* Disable INT_N from triggering QSFP interrupts */
9445 set_qsfp_int_n(ppd, 0);
9447 /* Reset the QSFP */
9448 mask = (u64)QSFP_HFI0_RESET_N;
9450 qsfp_mask = read_csr(dd,
9451 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9454 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9460 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9462 wait_for_qsfp_init(ppd);
9465 * Allow INT_N to trigger the QSFP interrupt to watch
9466 * for alarms and warnings
9468 set_qsfp_int_n(ppd, 1);
9471 * After the reset, AOC transmitters are enabled by default. They need
9472 * to be turned off to complete the QSFP setup before they can be
9475 return set_qsfp_tx(ppd, 0);
9478 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9479 u8 *qsfp_interrupt_status)
9481 struct hfi1_devdata *dd = ppd->dd;
9483 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9484 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9485 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9488 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9489 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9490 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9494 * The remaining alarms/warnings don't matter if the link is down.
9496 if (ppd->host_link_state & HLS_DOWN)
9499 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9500 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9501 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9504 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9505 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9506 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9509 /* Byte 2 is vendor specific */
9511 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9512 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9513 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9516 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9517 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9518 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9521 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9522 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9523 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9526 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9527 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9528 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9531 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9532 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9533 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9536 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9537 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9538 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9541 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9542 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9543 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9546 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9547 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9548 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9551 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9552 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9553 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9556 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9557 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9558 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9561 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9562 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9563 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9566 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9567 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9568 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9571 /* Bytes 9-10 and 11-12 are reserved */
9572 /* Bytes 13-15 are vendor specific */
9577 /* This routine will only be scheduled if the QSFP module present is asserted */
9578 void qsfp_event(struct work_struct *work)
9580 struct qsfp_data *qd;
9581 struct hfi1_pportdata *ppd;
9582 struct hfi1_devdata *dd;
9584 qd = container_of(work, struct qsfp_data, qsfp_work);
9589 if (!qsfp_mod_present(ppd))
9592 if (ppd->host_link_state == HLS_DN_DISABLE) {
9593 dd_dev_info(ppd->dd,
9594 "%s: stopping link start because link is disabled\n",
9600 * Turn DC back on after cable has been re-inserted. Up until
9601 * now, the DC has been in reset to save power.
9605 if (qd->cache_refresh_required) {
9606 set_qsfp_int_n(ppd, 0);
9608 wait_for_qsfp_init(ppd);
9611 * Allow INT_N to trigger the QSFP interrupt to watch
9612 * for alarms and warnings
9614 set_qsfp_int_n(ppd, 1);
9619 if (qd->check_interrupt_flags) {
9620 u8 qsfp_interrupt_status[16] = {0,};
9622 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9623 &qsfp_interrupt_status[0], 16) != 16) {
9625 "%s: Failed to read status of QSFP module\n",
9628 unsigned long flags;
9630 handle_qsfp_error_conditions(
9631 ppd, qsfp_interrupt_status);
9632 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9633 ppd->qsfp_info.check_interrupt_flags = 0;
9634 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9640 static void init_qsfp_int(struct hfi1_devdata *dd)
9642 struct hfi1_pportdata *ppd = dd->pport;
9643 u64 qsfp_mask, cce_int_mask;
9644 const int qsfp1_int_smask = QSFP1_INT % 64;
9645 const int qsfp2_int_smask = QSFP2_INT % 64;
9648 * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9649 * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9650 * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9651 * the index of the appropriate CSR in the CCEIntMask CSR array
9653 cce_int_mask = read_csr(dd, CCE_INT_MASK +
9654 (8 * (QSFP1_INT / 64)));
9656 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9657 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9660 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9661 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9665 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9666 /* Clear current status to avoid spurious interrupts */
9667 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9669 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9672 set_qsfp_int_n(ppd, 0);
9674 /* Handle active low nature of INT_N and MODPRST_N pins */
9675 if (qsfp_mod_present(ppd))
9676 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9678 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9683 * Do a one-time initialize of the LCB block.
9685 static void init_lcb(struct hfi1_devdata *dd)
9687 /* simulator does not correctly handle LCB cclk loopback, skip */
9688 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9691 /* the DC has been reset earlier in the driver load */
9693 /* set LCB for cclk loopback on the port */
9694 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9695 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9696 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9697 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9698 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9699 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9700 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9704 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9707 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9713 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9716 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9719 /* read byte 2, the status byte */
9720 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9726 return 0; /* success */
9730 * Values for QSFP retry.
9732 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9733 * arrived at from experience on a large cluster.
9735 #define MAX_QSFP_RETRIES 20
9736 #define QSFP_RETRY_WAIT 500 /* msec */
9739 * Try a QSFP read. If it fails, schedule a retry for later.
9740 * Called on first link activation after driver load.
9742 static void try_start_link(struct hfi1_pportdata *ppd)
9744 if (test_qsfp_read(ppd)) {
9746 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9747 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9750 dd_dev_info(ppd->dd,
9751 "QSFP not responding, waiting and retrying %d\n",
9752 (int)ppd->qsfp_retry_count);
9753 ppd->qsfp_retry_count++;
9754 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9755 msecs_to_jiffies(QSFP_RETRY_WAIT));
9758 ppd->qsfp_retry_count = 0;
9764 * Workqueue function to start the link after a delay.
9766 void handle_start_link(struct work_struct *work)
9768 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9769 start_link_work.work);
9770 try_start_link(ppd);
9773 int bringup_serdes(struct hfi1_pportdata *ppd)
9775 struct hfi1_devdata *dd = ppd->dd;
9779 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9780 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9782 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9785 guid = dd->base_guid + ppd->port - 1;
9786 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9789 /* Set linkinit_reason on power up per OPA spec */
9790 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9792 /* one-time init of the LCB */
9796 ret = init_loopback(dd);
9802 if (ppd->port_type == PORT_TYPE_QSFP) {
9803 set_qsfp_int_n(ppd, 0);
9804 wait_for_qsfp_init(ppd);
9805 set_qsfp_int_n(ppd, 1);
9808 try_start_link(ppd);
9812 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9814 struct hfi1_devdata *dd = ppd->dd;
9817 * Shut down the link and keep it down. First turn off that the
9818 * driver wants to allow the link to be up (driver_link_ready).
9819 * Then make sure the link is not automatically restarted
9820 * (link_enabled). Cancel any pending restart. And finally
9823 ppd->driver_link_ready = 0;
9824 ppd->link_enabled = 0;
9826 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9827 flush_delayed_work(&ppd->start_link_work);
9828 cancel_delayed_work_sync(&ppd->start_link_work);
9830 ppd->offline_disabled_reason =
9831 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9832 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9833 OPA_LINKDOWN_REASON_SMA_DISABLED);
9834 set_link_state(ppd, HLS_DN_OFFLINE);
9836 /* disable the port */
9837 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9838 cancel_work_sync(&ppd->freeze_work);
9841 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9843 struct hfi1_pportdata *ppd;
9846 ppd = (struct hfi1_pportdata *)(dd + 1);
9847 for (i = 0; i < dd->num_pports; i++, ppd++) {
9848 ppd->ibport_data.rvp.rc_acks = NULL;
9849 ppd->ibport_data.rvp.rc_qacks = NULL;
9850 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9851 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9852 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9853 if (!ppd->ibport_data.rvp.rc_acks ||
9854 !ppd->ibport_data.rvp.rc_delayed_comp ||
9855 !ppd->ibport_data.rvp.rc_qacks)
9863 * index is the index into the receive array
9865 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9866 u32 type, unsigned long pa, u16 order)
9870 if (!(dd->flags & HFI1_PRESENT))
9873 if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9876 } else if (type > PT_INVALID) {
9878 "unexpected receive array type %u for index %u, not handled\n",
9882 trace_hfi1_put_tid(dd, index, type, pa, order);
9884 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9885 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9886 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9887 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9888 << RCV_ARRAY_RT_ADDR_SHIFT;
9889 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9890 writeq(reg, dd->rcvarray_wc + (index * 8));
9892 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9894 * Eager entries are written and flushed
9896 * Expected entries are flushed every 4 writes
9903 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9905 struct hfi1_devdata *dd = rcd->dd;
9908 /* this could be optimized */
9909 for (i = rcd->eager_base; i < rcd->eager_base +
9910 rcd->egrbufs.alloced; i++)
9911 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9913 for (i = rcd->expected_base;
9914 i < rcd->expected_base + rcd->expected_count; i++)
9915 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9918 static const char * const ib_cfg_name_strings[] = {
9919 "HFI1_IB_CFG_LIDLMC",
9920 "HFI1_IB_CFG_LWID_DG_ENB",
9921 "HFI1_IB_CFG_LWID_ENB",
9923 "HFI1_IB_CFG_SPD_ENB",
9925 "HFI1_IB_CFG_RXPOL_ENB",
9926 "HFI1_IB_CFG_LREV_ENB",
9927 "HFI1_IB_CFG_LINKLATENCY",
9928 "HFI1_IB_CFG_HRTBT",
9929 "HFI1_IB_CFG_OP_VLS",
9930 "HFI1_IB_CFG_VL_HIGH_CAP",
9931 "HFI1_IB_CFG_VL_LOW_CAP",
9932 "HFI1_IB_CFG_OVERRUN_THRESH",
9933 "HFI1_IB_CFG_PHYERR_THRESH",
9934 "HFI1_IB_CFG_LINKDEFAULT",
9935 "HFI1_IB_CFG_PKEYS",
9937 "HFI1_IB_CFG_LSTATE",
9938 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9939 "HFI1_IB_CFG_PMA_TICKS",
9943 static const char *ib_cfg_name(int which)
9945 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9947 return ib_cfg_name_strings[which];
9950 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9952 struct hfi1_devdata *dd = ppd->dd;
9956 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9957 val = ppd->link_width_enabled;
9959 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9960 val = ppd->link_width_active;
9962 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9963 val = ppd->link_speed_enabled;
9965 case HFI1_IB_CFG_SPD: /* current Link speed */
9966 val = ppd->link_speed_active;
9969 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9970 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9971 case HFI1_IB_CFG_LINKLATENCY:
9974 case HFI1_IB_CFG_OP_VLS:
9975 val = ppd->actual_vls_operational;
9977 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9978 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9980 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9981 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9983 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9984 val = ppd->overrun_threshold;
9986 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9987 val = ppd->phy_error_threshold;
9989 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9990 val = dd->link_default;
9993 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9994 case HFI1_IB_CFG_PMA_TICKS:
9997 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10000 "%s: which %s: not implemented\n",
10002 ib_cfg_name(which));
10010 * The largest MAD packet size.
10012 #define MAX_MAD_PACKET 2048
10015 * Return the maximum header bytes that can go on the _wire_
10016 * for this device. This count includes the ICRC which is
10017 * not part of the packet held in memory but it is appended
10019 * This is dependent on the device's receive header entry size.
10020 * HFI allows this to be set per-receive context, but the
10021 * driver presently enforces a global value.
10023 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10026 * The maximum non-payload (MTU) bytes in LRH.PktLen are
10027 * the Receive Header Entry Size minus the PBC (or RHF) size
10028 * plus one DW for the ICRC appended by HW.
10030 * dd->rcd[0].rcvhdrqentsize is in DW.
10031 * We use rcd[0] as all context will have the same value. Also,
10032 * the first kernel context would have been allocated by now so
10033 * we are guaranteed a valid value.
10035 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10040 * @ppd - per port data
10042 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
10043 * registers compare against LRH.PktLen, so use the max bytes included
10046 * This routine changes all VL values except VL15, which it maintains at
10049 static void set_send_length(struct hfi1_pportdata *ppd)
10051 struct hfi1_devdata *dd = ppd->dd;
10052 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10053 u32 maxvlmtu = dd->vld[15].mtu;
10054 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10055 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10056 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10060 for (i = 0; i < ppd->vls_supported; i++) {
10061 if (dd->vld[i].mtu > maxvlmtu)
10062 maxvlmtu = dd->vld[i].mtu;
10064 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10065 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10066 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10068 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10069 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10070 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10072 write_csr(dd, SEND_LEN_CHECK0, len1);
10073 write_csr(dd, SEND_LEN_CHECK1, len2);
10074 /* adjust kernel credit return thresholds based on new MTUs */
10075 /* all kernel receive contexts have the same hdrqentsize */
10076 for (i = 0; i < ppd->vls_supported; i++) {
10077 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10078 sc_mtu_to_threshold(dd->vld[i].sc,
10080 dd->rcd[0]->rcvhdrqentsize));
10081 for (j = 0; j < INIT_SC_PER_VL; j++)
10082 sc_set_cr_threshold(
10083 pio_select_send_context_vl(dd, j, i),
10086 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10087 sc_mtu_to_threshold(dd->vld[15].sc,
10089 dd->rcd[0]->rcvhdrqentsize));
10090 sc_set_cr_threshold(dd->vld[15].sc, thres);
10092 /* Adjust maximum MTU for the port in DC */
10093 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10094 (ilog2(maxvlmtu >> 8) + 1);
10095 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10096 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10097 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10098 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10099 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10102 static void set_lidlmc(struct hfi1_pportdata *ppd)
10106 struct hfi1_devdata *dd = ppd->dd;
10107 u32 mask = ~((1U << ppd->lmc) - 1);
10108 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10112 * Program 0 in CSR if port lid is extended. This prevents
10113 * 9B packets being sent out for large lids.
10115 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10116 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10117 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10118 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10119 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10120 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10121 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10122 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10125 * Iterate over all the send contexts and set their SLID check
10127 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10128 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10129 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10130 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10132 for (i = 0; i < dd->chip_send_contexts; i++) {
10133 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10135 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10138 /* Now we have to do the same thing for the sdma engines */
10139 sdma_update_lmc(dd, mask, lid);
10142 static const char *state_completed_string(u32 completed)
10144 static const char * const state_completed[] = {
10150 if (completed < ARRAY_SIZE(state_completed))
10151 return state_completed[completed];
10156 static const char all_lanes_dead_timeout_expired[] =
10157 "All lanes were inactive – was the interconnect media removed?";
10158 static const char tx_out_of_policy[] =
10159 "Passing lanes on local port do not meet the local link width policy";
10160 static const char no_state_complete[] =
10161 "State timeout occurred before link partner completed the state";
10162 static const char * const state_complete_reasons[] = {
10163 [0x00] = "Reason unknown",
10164 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10165 [0x02] = "Link partner reported failure",
10166 [0x10] = "Unable to achieve frame sync on any lane",
10168 "Unable to find a common bit rate with the link partner",
10170 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10172 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10173 [0x14] = no_state_complete,
10175 "State timeout occurred before link partner identified equalization presets",
10177 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10178 [0x17] = tx_out_of_policy,
10179 [0x20] = all_lanes_dead_timeout_expired,
10181 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10182 [0x22] = no_state_complete,
10184 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10185 [0x24] = tx_out_of_policy,
10186 [0x30] = all_lanes_dead_timeout_expired,
10188 "State timeout occurred waiting for host to process received frames",
10189 [0x32] = no_state_complete,
10191 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10192 [0x34] = tx_out_of_policy,
10195 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10198 const char *str = NULL;
10200 if (code < ARRAY_SIZE(state_complete_reasons))
10201 str = state_complete_reasons[code];
10208 /* describe the given last state complete frame */
10209 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10210 const char *prefix)
10212 struct hfi1_devdata *dd = ppd->dd;
10220 * [ 0: 0] - success
10222 * [ 7: 4] - next state timeout
10223 * [15: 8] - reason code
10226 success = frame & 0x1;
10227 state = (frame >> 1) & 0x7;
10228 reason = (frame >> 8) & 0xff;
10229 lanes = (frame >> 16) & 0xffff;
10231 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10233 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10234 state_completed_string(state), state);
10235 dd_dev_err(dd, " state successfully completed: %s\n",
10236 success ? "yes" : "no");
10237 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10238 reason, state_complete_reason_code_string(ppd, reason));
10239 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10243 * Read the last state complete frames and explain them. This routine
10244 * expects to be called if the link went down during link negotiation
10245 * and initialization (LNI). That is, anywhere between polling and link up.
10247 static void check_lni_states(struct hfi1_pportdata *ppd)
10249 u32 last_local_state;
10250 u32 last_remote_state;
10252 read_last_local_state(ppd->dd, &last_local_state);
10253 read_last_remote_state(ppd->dd, &last_remote_state);
10256 * Don't report anything if there is nothing to report. A value of
10257 * 0 means the link was taken down while polling and there was no
10258 * training in-process.
10260 if (last_local_state == 0 && last_remote_state == 0)
10263 decode_state_complete(ppd, last_local_state, "transmitted");
10264 decode_state_complete(ppd, last_remote_state, "received");
10267 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10268 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10271 unsigned long timeout;
10273 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10274 timeout = jiffies + msecs_to_jiffies(wait_ms);
10276 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10279 if (time_after(jiffies, timeout)) {
10281 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10289 /* called when the logical link state is not down as it should be */
10290 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10292 struct hfi1_devdata *dd = ppd->dd;
10295 * Bring link up in LCB loopback
10297 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10298 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10299 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10301 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10302 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10303 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10304 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10306 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10307 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10309 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10310 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10312 wait_link_transfer_active(dd, 100);
10315 * Bring the link down again.
10317 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10318 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10319 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10321 /* adjust ppd->statusp, if needed */
10322 update_statusp(ppd, IB_PORT_DOWN);
10324 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10328 * Helper for set_link_state(). Do not call except from that routine.
10329 * Expects ppd->hls_mutex to be held.
10331 * @rem_reason value to be sent to the neighbor
10333 * LinkDownReasons only set if transition succeeds.
10335 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10337 struct hfi1_devdata *dd = ppd->dd;
10338 u32 previous_state;
10339 int offline_state_ret;
10342 update_lcb_cache(dd);
10344 previous_state = ppd->host_link_state;
10345 ppd->host_link_state = HLS_GOING_OFFLINE;
10347 /* start offline transition */
10348 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10350 if (ret != HCMD_SUCCESS) {
10352 "Failed to transition to Offline link state, return %d\n",
10356 if (ppd->offline_disabled_reason ==
10357 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10358 ppd->offline_disabled_reason =
10359 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10361 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10362 if (offline_state_ret < 0)
10363 return offline_state_ret;
10365 /* Disabling AOC transmitters */
10366 if (ppd->port_type == PORT_TYPE_QSFP &&
10367 ppd->qsfp_info.limiting_active &&
10368 qsfp_mod_present(ppd)) {
10371 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10373 set_qsfp_tx(ppd, 0);
10374 release_chip_resource(dd, qsfp_resource(dd));
10376 /* not fatal, but should warn */
10378 "Unable to acquire lock to turn off QSFP TX\n");
10383 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10384 * can take a while for the link to go down.
10386 if (offline_state_ret != PLS_OFFLINE_QUIET) {
10387 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10393 * Now in charge of LCB - must be after the physical state is
10394 * offline.quiet and before host_link_state is changed.
10396 set_host_lcb_access(dd);
10397 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10399 /* make sure the logical state is also down */
10400 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10402 force_logical_link_state_down(ppd);
10404 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10407 * The LNI has a mandatory wait time after the physical state
10408 * moves to Offline.Quiet. The wait time may be different
10409 * depending on how the link went down. The 8051 firmware
10410 * will observe the needed wait time and only move to ready
10411 * when that is completed. The largest of the quiet timeouts
10412 * is 6s, so wait that long and then at least 0.5s more for
10413 * other transitions, and another 0.5s for a buffer.
10415 ret = wait_fm_ready(dd, 7000);
10418 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10419 /* state is really offline, so make it so */
10420 ppd->host_link_state = HLS_DN_OFFLINE;
10425 * The state is now offline and the 8051 is ready to accept host
10427 * - change our state
10428 * - notify others if we were previously in a linkup state
10430 ppd->host_link_state = HLS_DN_OFFLINE;
10431 if (previous_state & HLS_UP) {
10432 /* went down while link was up */
10433 handle_linkup_change(dd, 0);
10434 } else if (previous_state
10435 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10436 /* went down while attempting link up */
10437 check_lni_states(ppd);
10439 /* The QSFP doesn't need to be reset on LNI failure */
10440 ppd->qsfp_info.reset_needed = 0;
10443 /* the active link width (downgrade) is 0 on link down */
10444 ppd->link_width_active = 0;
10445 ppd->link_width_downgrade_tx_active = 0;
10446 ppd->link_width_downgrade_rx_active = 0;
10447 ppd->current_egress_rate = 0;
10451 /* return the link state name */
10452 static const char *link_state_name(u32 state)
10455 int n = ilog2(state);
10456 static const char * const names[] = {
10457 [__HLS_UP_INIT_BP] = "INIT",
10458 [__HLS_UP_ARMED_BP] = "ARMED",
10459 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10460 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10461 [__HLS_DN_POLL_BP] = "POLL",
10462 [__HLS_DN_DISABLE_BP] = "DISABLE",
10463 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10464 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10465 [__HLS_GOING_UP_BP] = "GOING_UP",
10466 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10467 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10470 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10471 return name ? name : "unknown";
10474 /* return the link state reason name */
10475 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10477 if (state == HLS_UP_INIT) {
10478 switch (ppd->linkinit_reason) {
10479 case OPA_LINKINIT_REASON_LINKUP:
10481 case OPA_LINKINIT_REASON_FLAPPING:
10482 return "(FLAPPING)";
10483 case OPA_LINKINIT_OUTSIDE_POLICY:
10484 return "(OUTSIDE_POLICY)";
10485 case OPA_LINKINIT_QUARANTINED:
10486 return "(QUARANTINED)";
10487 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10488 return "(INSUFIC_CAPABILITY)";
10497 * driver_pstate - convert the driver's notion of a port's
10498 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10499 * Return -1 (converted to a u32) to indicate error.
10501 u32 driver_pstate(struct hfi1_pportdata *ppd)
10503 switch (ppd->host_link_state) {
10506 case HLS_UP_ACTIVE:
10507 return IB_PORTPHYSSTATE_LINKUP;
10509 return IB_PORTPHYSSTATE_POLLING;
10510 case HLS_DN_DISABLE:
10511 return IB_PORTPHYSSTATE_DISABLED;
10512 case HLS_DN_OFFLINE:
10513 return OPA_PORTPHYSSTATE_OFFLINE;
10514 case HLS_VERIFY_CAP:
10515 return IB_PORTPHYSSTATE_POLLING;
10517 return IB_PORTPHYSSTATE_POLLING;
10518 case HLS_GOING_OFFLINE:
10519 return OPA_PORTPHYSSTATE_OFFLINE;
10520 case HLS_LINK_COOLDOWN:
10521 return OPA_PORTPHYSSTATE_OFFLINE;
10522 case HLS_DN_DOWNDEF:
10524 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10525 ppd->host_link_state);
10531 * driver_lstate - convert the driver's notion of a port's
10532 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10533 * (converted to a u32) to indicate error.
10535 u32 driver_lstate(struct hfi1_pportdata *ppd)
10537 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10538 return IB_PORT_DOWN;
10540 switch (ppd->host_link_state & HLS_UP) {
10542 return IB_PORT_INIT;
10544 return IB_PORT_ARMED;
10545 case HLS_UP_ACTIVE:
10546 return IB_PORT_ACTIVE;
10548 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10549 ppd->host_link_state);
10554 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10555 u8 neigh_reason, u8 rem_reason)
10557 if (ppd->local_link_down_reason.latest == 0 &&
10558 ppd->neigh_link_down_reason.latest == 0) {
10559 ppd->local_link_down_reason.latest = lcl_reason;
10560 ppd->neigh_link_down_reason.latest = neigh_reason;
10561 ppd->remote_link_down_reason = rem_reason;
10566 * data_vls_operational() - Verify if data VL BCT credits and MTU
10568 * @ppd: pointer to hfi1_pportdata structure
10570 * Return: true - Ok, false -otherwise.
10572 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10577 if (!ppd->actual_vls_operational)
10580 for (i = 0; i < ppd->vls_supported; i++) {
10581 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10582 if ((reg && !ppd->dd->vld[i].mtu) ||
10583 (!reg && ppd->dd->vld[i].mtu))
10591 * Change the physical and/or logical link state.
10593 * Do not call this routine while inside an interrupt. It contains
10594 * calls to routines that can take multiple seconds to finish.
10596 * Returns 0 on success, -errno on failure.
10598 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10600 struct hfi1_devdata *dd = ppd->dd;
10601 struct ib_event event = {.device = NULL};
10603 int orig_new_state, poll_bounce;
10605 mutex_lock(&ppd->hls_lock);
10607 orig_new_state = state;
10608 if (state == HLS_DN_DOWNDEF)
10609 state = dd->link_default;
10611 /* interpret poll -> poll as a link bounce */
10612 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10613 state == HLS_DN_POLL;
10615 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10616 link_state_name(ppd->host_link_state),
10617 link_state_name(orig_new_state),
10618 poll_bounce ? "(bounce) " : "",
10619 link_state_reason_name(ppd, state));
10622 * If we're going to a (HLS_*) link state that implies the logical
10623 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10624 * reset is_sm_config_started to 0.
10626 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10627 ppd->is_sm_config_started = 0;
10630 * Do nothing if the states match. Let a poll to poll link bounce
10633 if (ppd->host_link_state == state && !poll_bounce)
10638 if (ppd->host_link_state == HLS_DN_POLL &&
10639 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10641 * Quick link up jumps from polling to here.
10643 * Whether in normal or loopback mode, the
10644 * simulator jumps from polling to link up.
10645 * Accept that here.
10648 } else if (ppd->host_link_state != HLS_GOING_UP) {
10653 * Wait for Link_Up physical state.
10654 * Physical and Logical states should already be
10655 * be transitioned to LinkUp and LinkInit respectively.
10657 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10660 "%s: physical state did not change to LINK-UP\n",
10665 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10668 "%s: logical state did not change to INIT\n",
10673 /* clear old transient LINKINIT_REASON code */
10674 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10675 ppd->linkinit_reason =
10676 OPA_LINKINIT_REASON_LINKUP;
10678 /* enable the port */
10679 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10681 handle_linkup_change(dd, 1);
10682 pio_kernel_linkup(dd);
10684 ppd->host_link_state = HLS_UP_INIT;
10687 if (ppd->host_link_state != HLS_UP_INIT)
10690 if (!data_vls_operational(ppd)) {
10692 "%s: Invalid data VL credits or mtu\n",
10698 set_logical_state(dd, LSTATE_ARMED);
10699 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10702 "%s: logical state did not change to ARMED\n",
10706 ppd->host_link_state = HLS_UP_ARMED;
10708 * The simulator does not currently implement SMA messages,
10709 * so neighbor_normal is not set. Set it here when we first
10712 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10713 ppd->neighbor_normal = 1;
10715 case HLS_UP_ACTIVE:
10716 if (ppd->host_link_state != HLS_UP_ARMED)
10719 set_logical_state(dd, LSTATE_ACTIVE);
10720 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10723 "%s: logical state did not change to ACTIVE\n",
10726 /* tell all engines to go running */
10727 sdma_all_running(dd);
10728 ppd->host_link_state = HLS_UP_ACTIVE;
10730 /* Signal the IB layer that the port has went active */
10731 event.device = &dd->verbs_dev.rdi.ibdev;
10732 event.element.port_num = ppd->port;
10733 event.event = IB_EVENT_PORT_ACTIVE;
10737 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10738 ppd->host_link_state == HLS_DN_OFFLINE) &&
10741 /* Hand LED control to the DC */
10742 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10744 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10745 u8 tmp = ppd->link_enabled;
10747 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10749 ppd->link_enabled = tmp;
10752 ppd->remote_link_down_reason = 0;
10754 if (ppd->driver_link_ready)
10755 ppd->link_enabled = 1;
10758 set_all_slowpath(ppd->dd);
10759 ret = set_local_link_attributes(ppd);
10763 ppd->port_error_action = 0;
10765 if (quick_linkup) {
10766 /* quick linkup does not go into polling */
10767 ret = do_quick_linkup(dd);
10769 ret1 = set_physical_link_state(dd, PLS_POLLING);
10771 ret1 = wait_phys_link_out_of_offline(ppd,
10773 if (ret1 != HCMD_SUCCESS) {
10775 "Failed to transition to Polling link state, return 0x%x\n",
10782 * Change the host link state after requesting DC8051 to
10783 * change its physical state so that we can ignore any
10784 * interrupt with stale LNI(XX) error, which will not be
10785 * cleared until DC8051 transitions to Polling state.
10787 ppd->host_link_state = HLS_DN_POLL;
10788 ppd->offline_disabled_reason =
10789 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10791 * If an error occurred above, go back to offline. The
10792 * caller may reschedule another attempt.
10795 goto_offline(ppd, 0);
10797 log_physical_state(ppd, PLS_POLLING);
10799 case HLS_DN_DISABLE:
10800 /* link is disabled */
10801 ppd->link_enabled = 0;
10803 /* allow any state to transition to disabled */
10805 /* must transition to offline first */
10806 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10807 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10810 ppd->remote_link_down_reason = 0;
10813 if (!dd->dc_shutdown) {
10814 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10815 if (ret1 != HCMD_SUCCESS) {
10817 "Failed to transition to Disabled link state, return 0x%x\n",
10822 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10825 "%s: physical state did not change to DISABLED\n",
10831 ppd->host_link_state = HLS_DN_DISABLE;
10833 case HLS_DN_OFFLINE:
10834 if (ppd->host_link_state == HLS_DN_DISABLE)
10837 /* allow any state to transition to offline */
10838 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10840 ppd->remote_link_down_reason = 0;
10842 case HLS_VERIFY_CAP:
10843 if (ppd->host_link_state != HLS_DN_POLL)
10845 ppd->host_link_state = HLS_VERIFY_CAP;
10846 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10849 if (ppd->host_link_state != HLS_VERIFY_CAP)
10852 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10853 if (ret1 != HCMD_SUCCESS) {
10855 "Failed to transition to link up state, return 0x%x\n",
10860 ppd->host_link_state = HLS_GOING_UP;
10863 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10864 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10866 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10875 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10876 __func__, link_state_name(ppd->host_link_state),
10877 link_state_name(state));
10881 mutex_unlock(&ppd->hls_lock);
10884 ib_dispatch_event(&event);
10889 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10895 case HFI1_IB_CFG_LIDLMC:
10898 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10900 * The VL Arbitrator high limit is sent in units of 4k
10901 * bytes, while HFI stores it in units of 64 bytes.
10904 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10905 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10906 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10908 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10909 /* HFI only supports POLL as the default link down state */
10910 if (val != HLS_DN_POLL)
10913 case HFI1_IB_CFG_OP_VLS:
10914 if (ppd->vls_operational != val) {
10915 ppd->vls_operational = val;
10921 * For link width, link width downgrade, and speed enable, always AND
10922 * the setting with what is actually supported. This has two benefits.
10923 * First, enabled can't have unsupported values, no matter what the
10924 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10925 * "fill in with your supported value" have all the bits in the
10926 * field set, so simply ANDing with supported has the desired result.
10928 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10929 ppd->link_width_enabled = val & ppd->link_width_supported;
10931 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10932 ppd->link_width_downgrade_enabled =
10933 val & ppd->link_width_downgrade_supported;
10935 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10936 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10938 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10940 * HFI does not follow IB specs, save this value
10941 * so we can report it, if asked.
10943 ppd->overrun_threshold = val;
10945 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10947 * HFI does not follow IB specs, save this value
10948 * so we can report it, if asked.
10950 ppd->phy_error_threshold = val;
10953 case HFI1_IB_CFG_MTU:
10954 set_send_length(ppd);
10957 case HFI1_IB_CFG_PKEYS:
10958 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10959 set_partition_keys(ppd);
10963 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10964 dd_dev_info(ppd->dd,
10965 "%s: which %s, val 0x%x: not implemented\n",
10966 __func__, ib_cfg_name(which), val);
10972 /* begin functions related to vl arbitration table caching */
10973 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10977 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10978 VL_ARB_LOW_PRIO_TABLE_SIZE);
10979 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10980 VL_ARB_HIGH_PRIO_TABLE_SIZE);
10983 * Note that we always return values directly from the
10984 * 'vl_arb_cache' (and do no CSR reads) in response to a
10985 * 'Get(VLArbTable)'. This is obviously correct after a
10986 * 'Set(VLArbTable)', since the cache will then be up to
10987 * date. But it's also correct prior to any 'Set(VLArbTable)'
10988 * since then both the cache, and the relevant h/w registers
10992 for (i = 0; i < MAX_PRIO_TABLE; i++)
10993 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10997 * vl_arb_lock_cache
10999 * All other vl_arb_* functions should be called only after locking
11002 static inline struct vl_arb_cache *
11003 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11005 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11007 spin_lock(&ppd->vl_arb_cache[idx].lock);
11008 return &ppd->vl_arb_cache[idx];
11011 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11013 spin_unlock(&ppd->vl_arb_cache[idx].lock);
11016 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11017 struct ib_vl_weight_elem *vl)
11019 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11022 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11023 struct ib_vl_weight_elem *vl)
11025 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11028 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11029 struct ib_vl_weight_elem *vl)
11031 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11034 /* end functions related to vl arbitration table caching */
11036 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11037 u32 size, struct ib_vl_weight_elem *vl)
11039 struct hfi1_devdata *dd = ppd->dd;
11041 unsigned int i, is_up = 0;
11042 int drain, ret = 0;
11044 mutex_lock(&ppd->hls_lock);
11046 if (ppd->host_link_state & HLS_UP)
11049 drain = !is_ax(dd) && is_up;
11053 * Before adjusting VL arbitration weights, empty per-VL
11054 * FIFOs, otherwise a packet whose VL weight is being
11055 * set to 0 could get stuck in a FIFO with no chance to
11058 ret = stop_drain_data_vls(dd);
11063 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11068 for (i = 0; i < size; i++, vl++) {
11070 * NOTE: The low priority shift and mask are used here, but
11071 * they are the same for both the low and high registers.
11073 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11074 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11075 | (((u64)vl->weight
11076 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11077 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11078 write_csr(dd, target + (i * 8), reg);
11080 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11083 open_fill_data_vls(dd); /* reopen all VLs */
11086 mutex_unlock(&ppd->hls_lock);
11092 * Read one credit merge VL register.
11094 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11095 struct vl_limit *vll)
11097 u64 reg = read_csr(dd, csr);
11099 vll->dedicated = cpu_to_be16(
11100 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11101 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11102 vll->shared = cpu_to_be16(
11103 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11104 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11108 * Read the current credit merge limits.
11110 static int get_buffer_control(struct hfi1_devdata *dd,
11111 struct buffer_control *bc, u16 *overall_limit)
11116 /* not all entries are filled in */
11117 memset(bc, 0, sizeof(*bc));
11119 /* OPA and HFI have a 1-1 mapping */
11120 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11121 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11123 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11124 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11126 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11127 bc->overall_shared_limit = cpu_to_be16(
11128 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11129 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11131 *overall_limit = (reg
11132 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11133 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11134 return sizeof(struct buffer_control);
11137 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11142 /* each register contains 16 SC->VLnt mappings, 4 bits each */
11143 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11144 for (i = 0; i < sizeof(u64); i++) {
11145 u8 byte = *(((u8 *)®) + i);
11147 dp->vlnt[2 * i] = byte & 0xf;
11148 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11151 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11152 for (i = 0; i < sizeof(u64); i++) {
11153 u8 byte = *(((u8 *)®) + i);
11155 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11156 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11158 return sizeof(struct sc2vlnt);
11161 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11162 struct ib_vl_weight_elem *vl)
11166 for (i = 0; i < nelems; i++, vl++) {
11172 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11174 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11176 0, dp->vlnt[0] & 0xf,
11177 1, dp->vlnt[1] & 0xf,
11178 2, dp->vlnt[2] & 0xf,
11179 3, dp->vlnt[3] & 0xf,
11180 4, dp->vlnt[4] & 0xf,
11181 5, dp->vlnt[5] & 0xf,
11182 6, dp->vlnt[6] & 0xf,
11183 7, dp->vlnt[7] & 0xf,
11184 8, dp->vlnt[8] & 0xf,
11185 9, dp->vlnt[9] & 0xf,
11186 10, dp->vlnt[10] & 0xf,
11187 11, dp->vlnt[11] & 0xf,
11188 12, dp->vlnt[12] & 0xf,
11189 13, dp->vlnt[13] & 0xf,
11190 14, dp->vlnt[14] & 0xf,
11191 15, dp->vlnt[15] & 0xf));
11192 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11193 DC_SC_VL_VAL(31_16,
11194 16, dp->vlnt[16] & 0xf,
11195 17, dp->vlnt[17] & 0xf,
11196 18, dp->vlnt[18] & 0xf,
11197 19, dp->vlnt[19] & 0xf,
11198 20, dp->vlnt[20] & 0xf,
11199 21, dp->vlnt[21] & 0xf,
11200 22, dp->vlnt[22] & 0xf,
11201 23, dp->vlnt[23] & 0xf,
11202 24, dp->vlnt[24] & 0xf,
11203 25, dp->vlnt[25] & 0xf,
11204 26, dp->vlnt[26] & 0xf,
11205 27, dp->vlnt[27] & 0xf,
11206 28, dp->vlnt[28] & 0xf,
11207 29, dp->vlnt[29] & 0xf,
11208 30, dp->vlnt[30] & 0xf,
11209 31, dp->vlnt[31] & 0xf));
11212 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11216 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11217 what, (int)limit, idx);
11220 /* change only the shared limit portion of SendCmGLobalCredit */
11221 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11225 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11226 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11227 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11228 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11231 /* change only the total credit limit portion of SendCmGLobalCredit */
11232 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11236 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11237 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11238 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11239 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11242 /* set the given per-VL shared limit */
11243 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11248 if (vl < TXE_NUM_DATA_VL)
11249 addr = SEND_CM_CREDIT_VL + (8 * vl);
11251 addr = SEND_CM_CREDIT_VL15;
11253 reg = read_csr(dd, addr);
11254 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11255 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11256 write_csr(dd, addr, reg);
11259 /* set the given per-VL dedicated limit */
11260 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11265 if (vl < TXE_NUM_DATA_VL)
11266 addr = SEND_CM_CREDIT_VL + (8 * vl);
11268 addr = SEND_CM_CREDIT_VL15;
11270 reg = read_csr(dd, addr);
11271 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11272 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11273 write_csr(dd, addr, reg);
11276 /* spin until the given per-VL status mask bits clear */
11277 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11280 unsigned long timeout;
11283 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11285 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11288 return; /* success */
11289 if (time_after(jiffies, timeout))
11290 break; /* timed out */
11295 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11296 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11298 * If this occurs, it is likely there was a credit loss on the link.
11299 * The only recovery from that is a link bounce.
11302 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
11306 * The number of credits on the VLs may be changed while everything
11307 * is "live", but the following algorithm must be followed due to
11308 * how the hardware is actually implemented. In particular,
11309 * Return_Credit_Status[] is the only correct status check.
11311 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11312 * set Global_Shared_Credit_Limit = 0
11314 * mask0 = all VLs that are changing either dedicated or shared limits
11315 * set Shared_Limit[mask0] = 0
11316 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11317 * if (changing any dedicated limit)
11318 * mask1 = all VLs that are lowering dedicated limits
11319 * lower Dedicated_Limit[mask1]
11320 * spin until Return_Credit_Status[mask1] == 0
11321 * raise Dedicated_Limits
11322 * raise Shared_Limits
11323 * raise Global_Shared_Credit_Limit
11325 * lower = if the new limit is lower, set the limit to the new value
11326 * raise = if the new limit is higher than the current value (may be changed
11327 * earlier in the algorithm), set the new limit to the new value
11329 int set_buffer_control(struct hfi1_pportdata *ppd,
11330 struct buffer_control *new_bc)
11332 struct hfi1_devdata *dd = ppd->dd;
11333 u64 changing_mask, ld_mask, stat_mask;
11335 int i, use_all_mask;
11336 int this_shared_changing;
11337 int vl_count = 0, ret;
11339 * A0: add the variable any_shared_limit_changing below and in the
11340 * algorithm above. If removing A0 support, it can be removed.
11342 int any_shared_limit_changing;
11343 struct buffer_control cur_bc;
11344 u8 changing[OPA_MAX_VLS];
11345 u8 lowering_dedicated[OPA_MAX_VLS];
11348 const u64 all_mask =
11349 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11350 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11351 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11352 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11353 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11354 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11355 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11356 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11357 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11359 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11360 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
11362 /* find the new total credits, do sanity check on unused VLs */
11363 for (i = 0; i < OPA_MAX_VLS; i++) {
11365 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11368 nonzero_msg(dd, i, "dedicated",
11369 be16_to_cpu(new_bc->vl[i].dedicated));
11370 nonzero_msg(dd, i, "shared",
11371 be16_to_cpu(new_bc->vl[i].shared));
11372 new_bc->vl[i].dedicated = 0;
11373 new_bc->vl[i].shared = 0;
11375 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11377 /* fetch the current values */
11378 get_buffer_control(dd, &cur_bc, &cur_total);
11381 * Create the masks we will use.
11383 memset(changing, 0, sizeof(changing));
11384 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11386 * NOTE: Assumes that the individual VL bits are adjacent and in
11390 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11394 any_shared_limit_changing = 0;
11395 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11398 this_shared_changing = new_bc->vl[i].shared
11399 != cur_bc.vl[i].shared;
11400 if (this_shared_changing)
11401 any_shared_limit_changing = 1;
11402 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11403 this_shared_changing) {
11405 changing_mask |= stat_mask;
11408 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11409 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11410 lowering_dedicated[i] = 1;
11411 ld_mask |= stat_mask;
11415 /* bracket the credit change with a total adjustment */
11416 if (new_total > cur_total)
11417 set_global_limit(dd, new_total);
11420 * Start the credit change algorithm.
11423 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11424 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11425 (is_ax(dd) && any_shared_limit_changing)) {
11426 set_global_shared(dd, 0);
11427 cur_bc.overall_shared_limit = 0;
11431 for (i = 0; i < NUM_USABLE_VLS; i++) {
11436 set_vl_shared(dd, i, 0);
11437 cur_bc.vl[i].shared = 0;
11441 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11444 if (change_count > 0) {
11445 for (i = 0; i < NUM_USABLE_VLS; i++) {
11449 if (lowering_dedicated[i]) {
11450 set_vl_dedicated(dd, i,
11451 be16_to_cpu(new_bc->
11453 cur_bc.vl[i].dedicated =
11454 new_bc->vl[i].dedicated;
11458 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11460 /* now raise all dedicated that are going up */
11461 for (i = 0; i < NUM_USABLE_VLS; i++) {
11465 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11466 be16_to_cpu(cur_bc.vl[i].dedicated))
11467 set_vl_dedicated(dd, i,
11468 be16_to_cpu(new_bc->
11473 /* next raise all shared that are going up */
11474 for (i = 0; i < NUM_USABLE_VLS; i++) {
11478 if (be16_to_cpu(new_bc->vl[i].shared) >
11479 be16_to_cpu(cur_bc.vl[i].shared))
11480 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11483 /* finally raise the global shared */
11484 if (be16_to_cpu(new_bc->overall_shared_limit) >
11485 be16_to_cpu(cur_bc.overall_shared_limit))
11486 set_global_shared(dd,
11487 be16_to_cpu(new_bc->overall_shared_limit));
11489 /* bracket the credit change with a total adjustment */
11490 if (new_total < cur_total)
11491 set_global_limit(dd, new_total);
11494 * Determine the actual number of operational VLS using the number of
11495 * dedicated and shared credits for each VL.
11497 if (change_count > 0) {
11498 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11499 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11500 be16_to_cpu(new_bc->vl[i].shared) > 0)
11502 ppd->actual_vls_operational = vl_count;
11503 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11504 ppd->actual_vls_operational :
11505 ppd->vls_operational,
11508 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11509 ppd->actual_vls_operational :
11510 ppd->vls_operational, NULL);
11518 * Read the given fabric manager table. Return the size of the
11519 * table (in bytes) on success, and a negative error code on
11522 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11526 struct vl_arb_cache *vlc;
11529 case FM_TBL_VL_HIGH_ARB:
11532 * OPA specifies 128 elements (of 2 bytes each), though
11533 * HFI supports only 16 elements in h/w.
11535 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11536 vl_arb_get_cache(vlc, t);
11537 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11539 case FM_TBL_VL_LOW_ARB:
11542 * OPA specifies 128 elements (of 2 bytes each), though
11543 * HFI supports only 16 elements in h/w.
11545 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11546 vl_arb_get_cache(vlc, t);
11547 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11549 case FM_TBL_BUFFER_CONTROL:
11550 size = get_buffer_control(ppd->dd, t, NULL);
11552 case FM_TBL_SC2VLNT:
11553 size = get_sc2vlnt(ppd->dd, t);
11555 case FM_TBL_VL_PREEMPT_ELEMS:
11557 /* OPA specifies 128 elements, of 2 bytes each */
11558 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11560 case FM_TBL_VL_PREEMPT_MATRIX:
11563 * OPA specifies that this is the same size as the VL
11564 * arbitration tables (i.e., 256 bytes).
11574 * Write the given fabric manager table.
11576 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11579 struct vl_arb_cache *vlc;
11582 case FM_TBL_VL_HIGH_ARB:
11583 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11584 if (vl_arb_match_cache(vlc, t)) {
11585 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11588 vl_arb_set_cache(vlc, t);
11589 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11590 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11591 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11593 case FM_TBL_VL_LOW_ARB:
11594 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11595 if (vl_arb_match_cache(vlc, t)) {
11596 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11599 vl_arb_set_cache(vlc, t);
11600 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11601 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11602 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11604 case FM_TBL_BUFFER_CONTROL:
11605 ret = set_buffer_control(ppd, t);
11607 case FM_TBL_SC2VLNT:
11608 set_sc2vlnt(ppd->dd, t);
11617 * Disable all data VLs.
11619 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11621 static int disable_data_vls(struct hfi1_devdata *dd)
11626 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11632 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11633 * Just re-enables all data VLs (the "fill" part happens
11634 * automatically - the name was chosen for symmetry with
11635 * stop_drain_data_vls()).
11637 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11639 int open_fill_data_vls(struct hfi1_devdata *dd)
11644 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11650 * drain_data_vls() - assumes that disable_data_vls() has been called,
11651 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11652 * engines to drop to 0.
11654 static void drain_data_vls(struct hfi1_devdata *dd)
11658 pause_for_credit_return(dd);
11662 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11664 * Use open_fill_data_vls() to resume using data VLs. This pair is
11665 * meant to be used like this:
11667 * stop_drain_data_vls(dd);
11668 * // do things with per-VL resources
11669 * open_fill_data_vls(dd);
11671 int stop_drain_data_vls(struct hfi1_devdata *dd)
11675 ret = disable_data_vls(dd);
11677 drain_data_vls(dd);
11683 * Convert a nanosecond time to a cclock count. No matter how slow
11684 * the cclock, a non-zero ns will always have a non-zero result.
11686 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11690 if (dd->icode == ICODE_FPGA_EMULATION)
11691 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11692 else /* simulation pretends to be ASIC */
11693 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11694 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11700 * Convert a cclock count to nanoseconds. Not matter how slow
11701 * the cclock, a non-zero cclocks will always have a non-zero result.
11703 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11707 if (dd->icode == ICODE_FPGA_EMULATION)
11708 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11709 else /* simulation pretends to be ASIC */
11710 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11711 if (cclocks && !ns)
11717 * Dynamically adjust the receive interrupt timeout for a context based on
11718 * incoming packet rate.
11720 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11722 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11724 struct hfi1_devdata *dd = rcd->dd;
11725 u32 timeout = rcd->rcvavail_timeout;
11728 * This algorithm doubles or halves the timeout depending on whether
11729 * the number of packets received in this interrupt were less than or
11730 * greater equal the interrupt count.
11732 * The calculations below do not allow a steady state to be achieved.
11733 * Only at the endpoints it is possible to have an unchanging
11736 if (npkts < rcv_intr_count) {
11738 * Not enough packets arrived before the timeout, adjust
11739 * timeout downward.
11741 if (timeout < 2) /* already at minimum? */
11746 * More than enough packets arrived before the timeout, adjust
11749 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11751 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11754 rcd->rcvavail_timeout = timeout;
11756 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11757 * been verified to be in range
11759 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11761 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11764 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11765 u32 intr_adjust, u32 npkts)
11767 struct hfi1_devdata *dd = rcd->dd;
11769 u32 ctxt = rcd->ctxt;
11772 * Need to write timeout register before updating RcvHdrHead to ensure
11773 * that a new value is used when the HW decides to restart counting.
11776 adjust_rcv_timeout(rcd, npkts);
11778 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11779 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11780 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11783 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11784 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11785 << RCV_HDR_HEAD_HEAD_SHIFT);
11786 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11790 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11794 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11795 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11797 if (rcd->rcvhdrtail_kvaddr)
11798 tail = get_rcvhdrtail(rcd);
11800 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11802 return head == tail;
11806 * Context Control and Receive Array encoding for buffer size:
11815 * 0x8 512 KB (Receive Array only)
11816 * 0x9 1 MB (Receive Array only)
11817 * 0xa 2 MB (Receive Array only)
11819 * 0xB-0xF - reserved (Receive Array only)
11822 * This routine assumes that the value has already been sanity checked.
11824 static u32 encoded_size(u32 size)
11827 case 4 * 1024: return 0x1;
11828 case 8 * 1024: return 0x2;
11829 case 16 * 1024: return 0x3;
11830 case 32 * 1024: return 0x4;
11831 case 64 * 1024: return 0x5;
11832 case 128 * 1024: return 0x6;
11833 case 256 * 1024: return 0x7;
11834 case 512 * 1024: return 0x8;
11835 case 1 * 1024 * 1024: return 0x9;
11836 case 2 * 1024 * 1024: return 0xa;
11838 return 0x1; /* if invalid, go with the minimum size */
11841 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11842 struct hfi1_ctxtdata *rcd)
11845 int did_enable = 0;
11853 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11855 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11856 /* if the context already enabled, don't do the extra steps */
11857 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11858 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11859 /* reset the tail and hdr addresses, and sequence count */
11860 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11862 if (rcd->rcvhdrtail_kvaddr)
11863 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11864 rcd->rcvhdrqtailaddr_dma);
11867 /* reset the cached receive header queue head value */
11871 * Zero the receive header queue so we don't get false
11872 * positives when checking the sequence number. The
11873 * sequence numbers could land exactly on the same spot.
11874 * E.g. a rcd restart before the receive header wrapped.
11876 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11878 /* starting timeout */
11879 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11881 /* enable the context */
11882 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11884 /* clean the egr buffer size first */
11885 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11886 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11887 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11888 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11890 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11891 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11894 /* zero RcvEgrIndexHead */
11895 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11897 /* set eager count and base index */
11898 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11899 & RCV_EGR_CTRL_EGR_CNT_MASK)
11900 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11901 (((rcd->eager_base >> RCV_SHIFT)
11902 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11903 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11904 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11907 * Set TID (expected) count and base index.
11908 * rcd->expected_count is set to individual RcvArray entries,
11909 * not pairs, and the CSR takes a pair-count in groups of
11910 * four, so divide by 8.
11912 reg = (((rcd->expected_count >> RCV_SHIFT)
11913 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11914 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11915 (((rcd->expected_base >> RCV_SHIFT)
11916 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11917 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11918 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11919 if (ctxt == HFI1_CTRL_CTXT)
11920 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11922 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11923 write_csr(dd, RCV_VL15, 0);
11925 * When receive context is being disabled turn on tail
11926 * update with a dummy tail address and then disable
11929 if (dd->rcvhdrtail_dummy_dma) {
11930 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11931 dd->rcvhdrtail_dummy_dma);
11932 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11933 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11936 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11938 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11939 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11940 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11941 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11942 if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11943 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11944 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11945 /* See comment on RcvCtxtCtrl.TailUpd above */
11946 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11947 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11949 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11950 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11951 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11952 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11953 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11955 * In one-packet-per-eager mode, the size comes from
11956 * the RcvArray entry.
11958 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11959 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11961 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11962 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11963 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11964 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11965 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11966 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11967 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11968 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11969 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11970 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11971 rcd->rcvctrl = rcvctrl;
11972 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11973 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11975 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11977 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11978 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11980 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11982 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11983 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11984 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11985 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11986 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11987 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11988 ctxt, reg, reg == 0 ? "not" : "still");
11994 * The interrupt timeout and count must be set after
11995 * the context is enabled to take effect.
11997 /* set interrupt timeout */
11998 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11999 (u64)rcd->rcvavail_timeout <<
12000 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12002 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12003 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12004 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12007 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12009 * If the context has been disabled and the Tail Update has
12010 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12011 * so it doesn't contain an address that is invalid.
12013 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12014 dd->rcvhdrtail_dummy_dma);
12017 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12023 ret = dd->cntrnameslen;
12024 *namep = dd->cntrnames;
12026 const struct cntr_entry *entry;
12029 ret = (dd->ndevcntrs) * sizeof(u64);
12031 /* Get the start of the block of counters */
12032 *cntrp = dd->cntrs;
12035 * Now go and fill in each counter in the block.
12037 for (i = 0; i < DEV_CNTR_LAST; i++) {
12038 entry = &dev_cntrs[i];
12039 hfi1_cdbg(CNTR, "reading %s", entry->name);
12040 if (entry->flags & CNTR_DISABLED) {
12042 hfi1_cdbg(CNTR, "\tDisabled\n");
12044 if (entry->flags & CNTR_VL) {
12045 hfi1_cdbg(CNTR, "\tPer VL\n");
12046 for (j = 0; j < C_VL_COUNT; j++) {
12047 val = entry->rw_cntr(entry,
12053 "\t\tRead 0x%llx for %d\n",
12055 dd->cntrs[entry->offset + j] =
12058 } else if (entry->flags & CNTR_SDMA) {
12060 "\t Per SDMA Engine\n");
12061 for (j = 0; j < dd->chip_sdma_engines;
12064 entry->rw_cntr(entry, dd, j,
12067 "\t\tRead 0x%llx for %d\n",
12069 dd->cntrs[entry->offset + j] =
12073 val = entry->rw_cntr(entry, dd,
12076 dd->cntrs[entry->offset] = val;
12077 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12086 * Used by sysfs to create files for hfi stats to read
12088 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12094 ret = ppd->dd->portcntrnameslen;
12095 *namep = ppd->dd->portcntrnames;
12097 const struct cntr_entry *entry;
12100 ret = ppd->dd->nportcntrs * sizeof(u64);
12101 *cntrp = ppd->cntrs;
12103 for (i = 0; i < PORT_CNTR_LAST; i++) {
12104 entry = &port_cntrs[i];
12105 hfi1_cdbg(CNTR, "reading %s", entry->name);
12106 if (entry->flags & CNTR_DISABLED) {
12108 hfi1_cdbg(CNTR, "\tDisabled\n");
12112 if (entry->flags & CNTR_VL) {
12113 hfi1_cdbg(CNTR, "\tPer VL");
12114 for (j = 0; j < C_VL_COUNT; j++) {
12115 val = entry->rw_cntr(entry, ppd, j,
12120 "\t\tRead 0x%llx for %d",
12122 ppd->cntrs[entry->offset + j] = val;
12125 val = entry->rw_cntr(entry, ppd,
12129 ppd->cntrs[entry->offset] = val;
12130 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12137 static void free_cntrs(struct hfi1_devdata *dd)
12139 struct hfi1_pportdata *ppd;
12142 if (dd->synth_stats_timer.data)
12143 del_timer_sync(&dd->synth_stats_timer);
12144 cancel_work_sync(&dd->update_cntr_work);
12145 dd->synth_stats_timer.data = 0;
12146 ppd = (struct hfi1_pportdata *)(dd + 1);
12147 for (i = 0; i < dd->num_pports; i++, ppd++) {
12149 kfree(ppd->scntrs);
12150 free_percpu(ppd->ibport_data.rvp.rc_acks);
12151 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12152 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12154 ppd->scntrs = NULL;
12155 ppd->ibport_data.rvp.rc_acks = NULL;
12156 ppd->ibport_data.rvp.rc_qacks = NULL;
12157 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12159 kfree(dd->portcntrnames);
12160 dd->portcntrnames = NULL;
12165 kfree(dd->cntrnames);
12166 dd->cntrnames = NULL;
12167 if (dd->update_cntr_wq) {
12168 destroy_workqueue(dd->update_cntr_wq);
12169 dd->update_cntr_wq = NULL;
12173 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12174 u64 *psval, void *context, int vl)
12179 if (entry->flags & CNTR_DISABLED) {
12180 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12184 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12186 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12188 /* If its a synthetic counter there is more work we need to do */
12189 if (entry->flags & CNTR_SYNTH) {
12190 if (sval == CNTR_MAX) {
12191 /* No need to read already saturated */
12195 if (entry->flags & CNTR_32BIT) {
12196 /* 32bit counters can wrap multiple times */
12197 u64 upper = sval >> 32;
12198 u64 lower = (sval << 32) >> 32;
12200 if (lower > val) { /* hw wrapped */
12201 if (upper == CNTR_32BIT_MAX)
12207 if (val != CNTR_MAX)
12208 val = (upper << 32) | val;
12211 /* If we rolled we are saturated */
12212 if ((val < sval) || (val > CNTR_MAX))
12219 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12224 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12225 struct cntr_entry *entry,
12226 u64 *psval, void *context, int vl, u64 data)
12230 if (entry->flags & CNTR_DISABLED) {
12231 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12235 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12237 if (entry->flags & CNTR_SYNTH) {
12239 if (entry->flags & CNTR_32BIT) {
12240 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12241 (data << 32) >> 32);
12242 val = data; /* return the full 64bit value */
12244 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12248 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12253 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12258 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12260 struct cntr_entry *entry;
12263 entry = &dev_cntrs[index];
12264 sval = dd->scntrs + entry->offset;
12266 if (vl != CNTR_INVALID_VL)
12269 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12272 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12274 struct cntr_entry *entry;
12277 entry = &dev_cntrs[index];
12278 sval = dd->scntrs + entry->offset;
12280 if (vl != CNTR_INVALID_VL)
12283 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12286 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12288 struct cntr_entry *entry;
12291 entry = &port_cntrs[index];
12292 sval = ppd->scntrs + entry->offset;
12294 if (vl != CNTR_INVALID_VL)
12297 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12298 (index <= C_RCV_HDR_OVF_LAST)) {
12299 /* We do not want to bother for disabled contexts */
12303 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12306 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12308 struct cntr_entry *entry;
12311 entry = &port_cntrs[index];
12312 sval = ppd->scntrs + entry->offset;
12314 if (vl != CNTR_INVALID_VL)
12317 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12318 (index <= C_RCV_HDR_OVF_LAST)) {
12319 /* We do not want to bother for disabled contexts */
12323 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12326 static void do_update_synth_timer(struct work_struct *work)
12333 struct hfi1_pportdata *ppd;
12334 struct cntr_entry *entry;
12335 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12339 * Rather than keep beating on the CSRs pick a minimal set that we can
12340 * check to watch for potential roll over. We can do this by looking at
12341 * the number of flits sent/recv. If the total flits exceeds 32bits then
12342 * we have to iterate all the counters and update.
12344 entry = &dev_cntrs[C_DC_RCV_FLITS];
12345 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12347 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12348 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12352 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12353 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12355 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12357 * May not be strictly necessary to update but it won't hurt and
12358 * simplifies the logic here.
12361 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12364 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12366 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12367 total_flits, (u64)CNTR_32BIT_MAX);
12368 if (total_flits >= CNTR_32BIT_MAX) {
12369 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12376 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12377 for (i = 0; i < DEV_CNTR_LAST; i++) {
12378 entry = &dev_cntrs[i];
12379 if (entry->flags & CNTR_VL) {
12380 for (vl = 0; vl < C_VL_COUNT; vl++)
12381 read_dev_cntr(dd, i, vl);
12383 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12386 ppd = (struct hfi1_pportdata *)(dd + 1);
12387 for (i = 0; i < dd->num_pports; i++, ppd++) {
12388 for (j = 0; j < PORT_CNTR_LAST; j++) {
12389 entry = &port_cntrs[j];
12390 if (entry->flags & CNTR_VL) {
12391 for (vl = 0; vl < C_VL_COUNT; vl++)
12392 read_port_cntr(ppd, j, vl);
12394 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12400 * We want the value in the register. The goal is to keep track
12401 * of the number of "ticks" not the counter value. In other
12402 * words if the register rolls we want to notice it and go ahead
12403 * and force an update.
12405 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12406 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12409 entry = &dev_cntrs[C_DC_RCV_FLITS];
12410 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12413 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12414 dd->unit, dd->last_tx, dd->last_rx);
12417 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12421 static void update_synth_timer(unsigned long opaque)
12423 struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
12425 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12426 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12429 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12430 static int init_cntrs(struct hfi1_devdata *dd)
12432 int i, rcv_ctxts, j;
12435 char name[C_MAX_NAME];
12436 struct hfi1_pportdata *ppd;
12437 const char *bit_type_32 = ",32";
12438 const int bit_type_32_sz = strlen(bit_type_32);
12440 /* set up the stats timer; the add_timer is done at the end */
12441 setup_timer(&dd->synth_stats_timer, update_synth_timer,
12442 (unsigned long)dd);
12444 /***********************/
12445 /* per device counters */
12446 /***********************/
12448 /* size names and determine how many we have*/
12452 for (i = 0; i < DEV_CNTR_LAST; i++) {
12453 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12454 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12458 if (dev_cntrs[i].flags & CNTR_VL) {
12459 dev_cntrs[i].offset = dd->ndevcntrs;
12460 for (j = 0; j < C_VL_COUNT; j++) {
12461 snprintf(name, C_MAX_NAME, "%s%d",
12462 dev_cntrs[i].name, vl_from_idx(j));
12463 sz += strlen(name);
12464 /* Add ",32" for 32-bit counters */
12465 if (dev_cntrs[i].flags & CNTR_32BIT)
12466 sz += bit_type_32_sz;
12470 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12471 dev_cntrs[i].offset = dd->ndevcntrs;
12472 for (j = 0; j < dd->chip_sdma_engines; j++) {
12473 snprintf(name, C_MAX_NAME, "%s%d",
12474 dev_cntrs[i].name, j);
12475 sz += strlen(name);
12476 /* Add ",32" for 32-bit counters */
12477 if (dev_cntrs[i].flags & CNTR_32BIT)
12478 sz += bit_type_32_sz;
12483 /* +1 for newline. */
12484 sz += strlen(dev_cntrs[i].name) + 1;
12485 /* Add ",32" for 32-bit counters */
12486 if (dev_cntrs[i].flags & CNTR_32BIT)
12487 sz += bit_type_32_sz;
12488 dev_cntrs[i].offset = dd->ndevcntrs;
12493 /* allocate space for the counter values */
12494 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12499 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12503 /* allocate space for the counter names */
12504 dd->cntrnameslen = sz;
12505 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12506 if (!dd->cntrnames)
12509 /* fill in the names */
12510 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12511 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12513 } else if (dev_cntrs[i].flags & CNTR_VL) {
12514 for (j = 0; j < C_VL_COUNT; j++) {
12515 snprintf(name, C_MAX_NAME, "%s%d",
12518 memcpy(p, name, strlen(name));
12521 /* Counter is 32 bits */
12522 if (dev_cntrs[i].flags & CNTR_32BIT) {
12523 memcpy(p, bit_type_32, bit_type_32_sz);
12524 p += bit_type_32_sz;
12529 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12530 for (j = 0; j < dd->chip_sdma_engines; j++) {
12531 snprintf(name, C_MAX_NAME, "%s%d",
12532 dev_cntrs[i].name, j);
12533 memcpy(p, name, strlen(name));
12536 /* Counter is 32 bits */
12537 if (dev_cntrs[i].flags & CNTR_32BIT) {
12538 memcpy(p, bit_type_32, bit_type_32_sz);
12539 p += bit_type_32_sz;
12545 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12546 p += strlen(dev_cntrs[i].name);
12548 /* Counter is 32 bits */
12549 if (dev_cntrs[i].flags & CNTR_32BIT) {
12550 memcpy(p, bit_type_32, bit_type_32_sz);
12551 p += bit_type_32_sz;
12558 /*********************/
12559 /* per port counters */
12560 /*********************/
12563 * Go through the counters for the overflows and disable the ones we
12564 * don't need. This varies based on platform so we need to do it
12565 * dynamically here.
12567 rcv_ctxts = dd->num_rcv_contexts;
12568 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12569 i <= C_RCV_HDR_OVF_LAST; i++) {
12570 port_cntrs[i].flags |= CNTR_DISABLED;
12573 /* size port counter names and determine how many we have*/
12575 dd->nportcntrs = 0;
12576 for (i = 0; i < PORT_CNTR_LAST; i++) {
12577 if (port_cntrs[i].flags & CNTR_DISABLED) {
12578 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12582 if (port_cntrs[i].flags & CNTR_VL) {
12583 port_cntrs[i].offset = dd->nportcntrs;
12584 for (j = 0; j < C_VL_COUNT; j++) {
12585 snprintf(name, C_MAX_NAME, "%s%d",
12586 port_cntrs[i].name, vl_from_idx(j));
12587 sz += strlen(name);
12588 /* Add ",32" for 32-bit counters */
12589 if (port_cntrs[i].flags & CNTR_32BIT)
12590 sz += bit_type_32_sz;
12595 /* +1 for newline */
12596 sz += strlen(port_cntrs[i].name) + 1;
12597 /* Add ",32" for 32-bit counters */
12598 if (port_cntrs[i].flags & CNTR_32BIT)
12599 sz += bit_type_32_sz;
12600 port_cntrs[i].offset = dd->nportcntrs;
12605 /* allocate space for the counter names */
12606 dd->portcntrnameslen = sz;
12607 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12608 if (!dd->portcntrnames)
12611 /* fill in port cntr names */
12612 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12613 if (port_cntrs[i].flags & CNTR_DISABLED)
12616 if (port_cntrs[i].flags & CNTR_VL) {
12617 for (j = 0; j < C_VL_COUNT; j++) {
12618 snprintf(name, C_MAX_NAME, "%s%d",
12619 port_cntrs[i].name, vl_from_idx(j));
12620 memcpy(p, name, strlen(name));
12623 /* Counter is 32 bits */
12624 if (port_cntrs[i].flags & CNTR_32BIT) {
12625 memcpy(p, bit_type_32, bit_type_32_sz);
12626 p += bit_type_32_sz;
12632 memcpy(p, port_cntrs[i].name,
12633 strlen(port_cntrs[i].name));
12634 p += strlen(port_cntrs[i].name);
12636 /* Counter is 32 bits */
12637 if (port_cntrs[i].flags & CNTR_32BIT) {
12638 memcpy(p, bit_type_32, bit_type_32_sz);
12639 p += bit_type_32_sz;
12646 /* allocate per port storage for counter values */
12647 ppd = (struct hfi1_pportdata *)(dd + 1);
12648 for (i = 0; i < dd->num_pports; i++, ppd++) {
12649 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12653 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12658 /* CPU counters need to be allocated and zeroed */
12659 if (init_cpu_counters(dd))
12662 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12663 WQ_MEM_RECLAIM, dd->unit);
12664 if (!dd->update_cntr_wq)
12667 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12669 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12676 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12678 switch (chip_lstate) {
12681 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12685 return IB_PORT_DOWN;
12687 return IB_PORT_INIT;
12689 return IB_PORT_ARMED;
12690 case LSTATE_ACTIVE:
12691 return IB_PORT_ACTIVE;
12695 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12697 /* look at the HFI meta-states only */
12698 switch (chip_pstate & 0xf0) {
12700 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12704 return IB_PORTPHYSSTATE_DISABLED;
12706 return OPA_PORTPHYSSTATE_OFFLINE;
12708 return IB_PORTPHYSSTATE_POLLING;
12709 case PLS_CONFIGPHY:
12710 return IB_PORTPHYSSTATE_TRAINING;
12712 return IB_PORTPHYSSTATE_LINKUP;
12714 return IB_PORTPHYSSTATE_PHY_TEST;
12718 /* return the OPA port logical state name */
12719 const char *opa_lstate_name(u32 lstate)
12721 static const char * const port_logical_names[] = {
12727 "PORT_ACTIVE_DEFER",
12729 if (lstate < ARRAY_SIZE(port_logical_names))
12730 return port_logical_names[lstate];
12734 /* return the OPA port physical state name */
12735 const char *opa_pstate_name(u32 pstate)
12737 static const char * const port_physical_names[] = {
12744 "PHYS_LINK_ERR_RECOVER",
12751 if (pstate < ARRAY_SIZE(port_physical_names))
12752 return port_physical_names[pstate];
12756 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12759 * Set port status flags in the page mapped into userspace
12760 * memory. Do it here to ensure a reliable state - this is
12761 * the only function called by all state handling code.
12762 * Always set the flags due to the fact that the cache value
12763 * might have been changed explicitly outside of this
12766 if (ppd->statusp) {
12770 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12771 HFI1_STATUS_IB_READY);
12773 case IB_PORT_ARMED:
12774 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12776 case IB_PORT_ACTIVE:
12777 *ppd->statusp |= HFI1_STATUS_IB_READY;
12784 * wait_logical_linkstate - wait for an IB link state change to occur
12785 * @ppd: port device
12786 * @state: the state to wait for
12787 * @msecs: the number of milliseconds to wait
12789 * Wait up to msecs milliseconds for IB link state change to occur.
12790 * For now, take the easy polling route.
12791 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12793 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12796 unsigned long timeout;
12799 timeout = jiffies + msecs_to_jiffies(msecs);
12801 new_state = chip_to_opa_lstate(ppd->dd,
12802 read_logical_state(ppd->dd));
12803 if (new_state == state)
12805 if (time_after(jiffies, timeout)) {
12806 dd_dev_err(ppd->dd,
12807 "timeout waiting for link state 0x%x\n",
12814 update_statusp(ppd, state);
12815 dd_dev_info(ppd->dd,
12816 "logical state changed to %s (0x%x)\n",
12817 opa_lstate_name(state),
12822 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12824 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12826 dd_dev_info(ppd->dd,
12827 "physical state changed to %s (0x%x), phy 0x%x\n",
12828 opa_pstate_name(ib_pstate), ib_pstate, state);
12832 * Read the physical hardware link state and check if it matches host
12833 * drivers anticipated state.
12835 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12837 u32 read_state = read_physical_state(ppd->dd);
12839 if (read_state == state) {
12840 log_state_transition(ppd, state);
12842 dd_dev_err(ppd->dd,
12843 "anticipated phy link state 0x%x, read 0x%x\n",
12844 state, read_state);
12849 * wait_physical_linkstate - wait for an physical link state change to occur
12850 * @ppd: port device
12851 * @state: the state to wait for
12852 * @msecs: the number of milliseconds to wait
12854 * Wait up to msecs milliseconds for physical link state change to occur.
12855 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12857 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12861 unsigned long timeout;
12863 timeout = jiffies + msecs_to_jiffies(msecs);
12865 read_state = read_physical_state(ppd->dd);
12866 if (read_state == state)
12868 if (time_after(jiffies, timeout)) {
12869 dd_dev_err(ppd->dd,
12870 "timeout waiting for phy link state 0x%x\n",
12874 usleep_range(1950, 2050); /* sleep 2ms-ish */
12877 log_state_transition(ppd, state);
12882 * wait_phys_link_offline_quiet_substates - wait for any offline substate
12883 * @ppd: port device
12884 * @msecs: the number of milliseconds to wait
12886 * Wait up to msecs milliseconds for any offline physical link
12887 * state change to occur.
12888 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12890 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12894 unsigned long timeout;
12896 timeout = jiffies + msecs_to_jiffies(msecs);
12898 read_state = read_physical_state(ppd->dd);
12899 if ((read_state & 0xF0) == PLS_OFFLINE)
12901 if (time_after(jiffies, timeout)) {
12902 dd_dev_err(ppd->dd,
12903 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12904 read_state, msecs);
12907 usleep_range(1950, 2050); /* sleep 2ms-ish */
12910 log_state_transition(ppd, read_state);
12915 * wait_phys_link_out_of_offline - wait for any out of offline state
12916 * @ppd: port device
12917 * @msecs: the number of milliseconds to wait
12919 * Wait up to msecs milliseconds for any out of offline physical link
12920 * state change to occur.
12921 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12923 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12927 unsigned long timeout;
12929 timeout = jiffies + msecs_to_jiffies(msecs);
12931 read_state = read_physical_state(ppd->dd);
12932 if ((read_state & 0xF0) != PLS_OFFLINE)
12934 if (time_after(jiffies, timeout)) {
12935 dd_dev_err(ppd->dd,
12936 "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12937 read_state, msecs);
12940 usleep_range(1950, 2050); /* sleep 2ms-ish */
12943 log_state_transition(ppd, read_state);
12947 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12948 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12950 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12951 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12953 void hfi1_init_ctxt(struct send_context *sc)
12956 struct hfi1_devdata *dd = sc->dd;
12958 u8 set = (sc->type == SC_USER ?
12959 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12960 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12961 reg = read_kctxt_csr(dd, sc->hw_context,
12962 SEND_CTXT_CHECK_ENABLE);
12964 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12966 SET_STATIC_RATE_CONTROL_SMASK(reg);
12967 write_kctxt_csr(dd, sc->hw_context,
12968 SEND_CTXT_CHECK_ENABLE, reg);
12972 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12977 if (dd->icode != ICODE_RTL_SILICON) {
12978 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12979 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12983 reg = read_csr(dd, ASIC_STS_THERM);
12984 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12985 ASIC_STS_THERM_CURR_TEMP_MASK);
12986 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12987 ASIC_STS_THERM_LO_TEMP_MASK);
12988 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12989 ASIC_STS_THERM_HI_TEMP_MASK);
12990 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12991 ASIC_STS_THERM_CRIT_TEMP_MASK);
12992 /* triggers is a 3-bit value - 1 bit per trigger. */
12993 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12998 /* ========================================================================= */
13001 * Enable/disable chip from delivering interrupts.
13003 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
13008 * In HFI, the mask needs to be 1 to allow interrupts.
13011 /* enable all interrupts */
13012 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13013 write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
13017 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13018 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13023 * Clear all interrupt sources on the chip.
13025 static void clear_all_interrupts(struct hfi1_devdata *dd)
13029 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13030 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13032 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13033 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13034 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13035 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13036 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13037 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13038 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13039 for (i = 0; i < dd->chip_send_contexts; i++)
13040 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13041 for (i = 0; i < dd->chip_sdma_engines; i++)
13042 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13044 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13045 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13046 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13049 /* Move to pcie.c? */
13050 static void disable_intx(struct pci_dev *pdev)
13056 * hfi1_clean_up_interrupts() - Free all IRQ resources
13057 * @dd: valid device data data structure
13059 * Free the MSI or INTx IRQs and assoicated PCI resources,
13060 * if they have been allocated.
13062 void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
13066 /* remove irqs - must happen before disabling/turning off */
13067 if (dd->num_msix_entries) {
13069 struct hfi1_msix_entry *me = dd->msix_entries;
13071 for (i = 0; i < dd->num_msix_entries; i++, me++) {
13072 if (!me->arg) /* => no irq, no affinity */
13074 hfi1_put_irq_affinity(dd, me);
13075 free_irq(me->irq, me->arg);
13078 /* clean structures */
13079 kfree(dd->msix_entries);
13080 dd->msix_entries = NULL;
13081 dd->num_msix_entries = 0;
13084 if (dd->requested_intx_irq) {
13085 free_irq(dd->pcidev->irq, dd);
13086 dd->requested_intx_irq = 0;
13088 disable_intx(dd->pcidev);
13091 pci_free_irq_vectors(dd->pcidev);
13095 * Remap the interrupt source from the general handler to the given MSI-X
13098 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13103 /* clear from the handled mask of the general interrupt */
13106 if (likely(m < CCE_NUM_INT_CSRS)) {
13107 dd->gi_mask[m] &= ~((u64)1 << n);
13109 dd_dev_err(dd, "remap interrupt err\n");
13113 /* direct the chip source to the given MSI-X interrupt */
13116 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13117 reg &= ~((u64)0xff << (8 * n));
13118 reg |= ((u64)msix_intr & 0xff) << (8 * n);
13119 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13122 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
13123 int engine, int msix_intr)
13126 * SDMA engine interrupt sources grouped by type, rather than
13127 * engine. Per-engine interrupts are as follows:
13132 remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
13134 remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
13136 remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
13140 static int request_intx_irq(struct hfi1_devdata *dd)
13144 snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
13146 ret = request_irq(dd->pcidev->irq, general_interrupt,
13147 IRQF_SHARED, dd->intx_name, dd);
13149 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
13152 dd->requested_intx_irq = 1;
13156 static int request_msix_irqs(struct hfi1_devdata *dd)
13158 int first_general, last_general;
13159 int first_sdma, last_sdma;
13160 int first_rx, last_rx;
13163 /* calculate the ranges we are going to use */
13165 last_general = first_general + 1;
13166 first_sdma = last_general;
13167 last_sdma = first_sdma + dd->num_sdma;
13168 first_rx = last_sdma;
13169 last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
13171 /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
13172 dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
13175 * Sanity check - the code expects all SDMA chip source
13176 * interrupts to be in the same CSR, starting at bit 0. Verify
13177 * that this is true by checking the bit location of the start.
13179 BUILD_BUG_ON(IS_SDMA_START % 64);
13181 for (i = 0; i < dd->num_msix_entries; i++) {
13182 struct hfi1_msix_entry *me = &dd->msix_entries[i];
13183 const char *err_info;
13184 irq_handler_t handler;
13185 irq_handler_t thread = NULL;
13188 struct hfi1_ctxtdata *rcd = NULL;
13189 struct sdma_engine *sde = NULL;
13191 /* obtain the arguments to request_irq */
13192 if (first_general <= i && i < last_general) {
13193 idx = i - first_general;
13194 handler = general_interrupt;
13196 snprintf(me->name, sizeof(me->name),
13197 DRIVER_NAME "_%d", dd->unit);
13198 err_info = "general";
13199 me->type = IRQ_GENERAL;
13200 } else if (first_sdma <= i && i < last_sdma) {
13201 idx = i - first_sdma;
13202 sde = &dd->per_sdma[idx];
13203 handler = sdma_interrupt;
13205 snprintf(me->name, sizeof(me->name),
13206 DRIVER_NAME "_%d sdma%d", dd->unit, idx);
13208 remap_sdma_interrupts(dd, idx, i);
13209 me->type = IRQ_SDMA;
13210 } else if (first_rx <= i && i < last_rx) {
13211 idx = i - first_rx;
13212 rcd = hfi1_rcd_get_by_index(dd, idx);
13215 * Set the interrupt register and mask for this
13216 * context's interrupt.
13218 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13219 rcd->imask = ((u64)1) <<
13220 ((IS_RCVAVAIL_START + idx) % 64);
13221 handler = receive_context_interrupt;
13222 thread = receive_context_thread;
13224 snprintf(me->name, sizeof(me->name),
13225 DRIVER_NAME "_%d kctxt%d",
13227 err_info = "receive context";
13228 remap_intr(dd, IS_RCVAVAIL_START + idx, i);
13229 me->type = IRQ_RCVCTXT;
13230 rcd->msix_intr = i;
13234 /* not in our expected range - complain, then
13238 "Unexpected extra MSI-X interrupt %d\n", i);
13241 /* no argument, no interrupt */
13244 /* make sure the name is terminated */
13245 me->name[sizeof(me->name) - 1] = 0;
13246 me->irq = pci_irq_vector(dd->pcidev, i);
13248 * On err return me->irq. Don't need to clear this
13249 * because 'arg' has not been set, and cleanup will
13250 * do the right thing.
13255 ret = request_threaded_irq(me->irq, handler, thread, 0,
13259 "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
13260 err_info, me->irq, idx, ret);
13264 * assign arg after request_irq call, so it will be
13269 ret = hfi1_get_irq_affinity(dd, me);
13271 dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
13277 void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
13281 if (!dd->num_msix_entries) {
13282 synchronize_irq(dd->pcidev->irq);
13286 for (i = 0; i < dd->vnic.num_ctxt; i++) {
13287 struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
13288 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13290 synchronize_irq(me->irq);
13294 void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13296 struct hfi1_devdata *dd = rcd->dd;
13297 struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
13299 if (!me->arg) /* => no irq, no affinity */
13302 hfi1_put_irq_affinity(dd, me);
13303 free_irq(me->irq, me->arg);
13308 void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
13310 struct hfi1_devdata *dd = rcd->dd;
13311 struct hfi1_msix_entry *me;
13312 int idx = rcd->ctxt;
13316 rcd->msix_intr = dd->vnic.msix_idx++;
13317 me = &dd->msix_entries[rcd->msix_intr];
13320 * Set the interrupt register and mask for this
13321 * context's interrupt.
13323 rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
13324 rcd->imask = ((u64)1) <<
13325 ((IS_RCVAVAIL_START + idx) % 64);
13327 snprintf(me->name, sizeof(me->name),
13328 DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
13329 me->name[sizeof(me->name) - 1] = 0;
13330 me->type = IRQ_RCVCTXT;
13331 me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
13333 dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
13337 remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
13339 ret = request_threaded_irq(me->irq, receive_context_interrupt,
13340 receive_context_thread, 0, me->name, arg);
13342 dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
13343 me->irq, idx, ret);
13347 * assign arg after request_irq call, so it will be
13352 ret = hfi1_get_irq_affinity(dd, me);
13355 "unable to pin IRQ %d\n", ret);
13356 free_irq(me->irq, me->arg);
13361 * Set the general handler to accept all interrupts, remap all
13362 * chip interrupts back to MSI-X 0.
13364 static void reset_interrupts(struct hfi1_devdata *dd)
13368 /* all interrupts handled by the general handler */
13369 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13370 dd->gi_mask[i] = ~(u64)0;
13372 /* all chip interrupts map to MSI-X 0 */
13373 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13374 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13377 static int set_up_interrupts(struct hfi1_devdata *dd)
13381 int single_interrupt = 0; /* we expect to have all the interrupts */
13385 * 1 general, "slow path" interrupt (includes the SDMA engines
13386 * slow source, SDMACleanupDone)
13387 * N interrupts - one per used SDMA engine
13388 * M interrupt - one per kernel receive context
13389 * V interrupt - one for each VNIC context
13391 total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
13393 /* ask for MSI-X interrupts */
13394 request = request_msix(dd, total);
13398 } else if (request == 0) {
13400 /* dd->num_msix_entries already zero */
13401 single_interrupt = 1;
13402 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
13403 } else if (request < total) {
13404 /* using MSI-X, with reduced interrupts */
13405 dd_dev_err(dd, "reduced interrupt found, wanted %u, got %u\n",
13410 dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
13412 if (!dd->msix_entries) {
13417 dd->num_msix_entries = total;
13418 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
13421 /* mask all interrupts */
13422 set_intr_state(dd, 0);
13423 /* clear all pending interrupts */
13424 clear_all_interrupts(dd);
13426 /* reset general handler mask, chip MSI-X mappings */
13427 reset_interrupts(dd);
13429 if (single_interrupt)
13430 ret = request_intx_irq(dd);
13432 ret = request_msix_irqs(dd);
13439 hfi1_clean_up_interrupts(dd);
13444 * Set up context values in dd. Sets:
13446 * num_rcv_contexts - number of contexts being used
13447 * n_krcv_queues - number of kernel contexts
13448 * first_dyn_alloc_ctxt - first dynamically allocated context
13449 * in array of contexts
13450 * freectxts - number of free user contexts
13451 * num_send_contexts - number of PIO send contexts being used
13452 * num_vnic_contexts - number of contexts reserved for VNIC
13454 static int set_up_context_variables(struct hfi1_devdata *dd)
13456 unsigned long num_kernel_contexts;
13457 u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13458 int total_contexts;
13462 int user_rmt_reduced;
13465 * Kernel receive contexts:
13466 * - Context 0 - control context (VL15/multicast/error)
13467 * - Context 1 - first kernel context
13468 * - Context 2 - second kernel context
13473 * n_krcvqs is the sum of module parameter kernel receive
13474 * contexts, krcvqs[]. It does not include the control
13475 * context, so add that.
13477 num_kernel_contexts = n_krcvqs + 1;
13479 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13481 * Every kernel receive context needs an ACK send context.
13482 * one send context is allocated for each VL{0-7} and VL15
13484 if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
13486 "Reducing # kernel rcv contexts to: %d, from %lu\n",
13487 (int)(dd->chip_send_contexts - num_vls - 1),
13488 num_kernel_contexts);
13489 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
13492 /* Accommodate VNIC contexts if possible */
13493 if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
13494 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13495 num_vnic_contexts = 0;
13497 total_contexts = num_kernel_contexts + num_vnic_contexts;
13501 * - default to 1 user context per real (non-HT) CPU core if
13502 * num_user_contexts is negative
13504 if (num_user_contexts < 0)
13505 num_user_contexts =
13506 cpumask_weight(&node_affinity.real_cpu_mask);
13509 * Adjust the counts given a global max.
13511 if (total_contexts + num_user_contexts > dd->chip_rcv_contexts) {
13513 "Reducing # user receive contexts to: %d, from %d\n",
13514 (int)(dd->chip_rcv_contexts - total_contexts),
13515 (int)num_user_contexts);
13517 num_user_contexts = dd->chip_rcv_contexts - total_contexts;
13520 /* each user context requires an entry in the RMT */
13521 qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
13522 if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
13523 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
13525 "RMT size is reducing the number of user receive contexts from %d to %d\n",
13526 (int)num_user_contexts,
13529 num_user_contexts = user_rmt_reduced;
13532 total_contexts += num_user_contexts;
13534 /* the first N are kernel contexts, the rest are user/vnic contexts */
13535 dd->num_rcv_contexts = total_contexts;
13536 dd->n_krcv_queues = num_kernel_contexts;
13537 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13538 dd->num_vnic_contexts = num_vnic_contexts;
13539 dd->num_user_contexts = num_user_contexts;
13540 dd->freectxts = num_user_contexts;
13542 "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13543 (int)dd->chip_rcv_contexts,
13544 (int)dd->num_rcv_contexts,
13545 (int)dd->n_krcv_queues,
13546 dd->num_vnic_contexts,
13547 dd->num_user_contexts);
13550 * Receive array allocation:
13551 * All RcvArray entries are divided into groups of 8. This
13552 * is required by the hardware and will speed up writes to
13553 * consecutive entries by using write-combining of the entire
13556 * The number of groups are evenly divided among all contexts.
13557 * any left over groups will be given to the first N user
13560 dd->rcv_entries.group_size = RCV_INCREMENT;
13561 ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
13562 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13563 dd->rcv_entries.nctxt_extra = ngroups -
13564 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13565 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13566 dd->rcv_entries.ngroups,
13567 dd->rcv_entries.nctxt_extra);
13568 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13569 MAX_EAGER_ENTRIES * 2) {
13570 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13571 dd->rcv_entries.group_size;
13573 "RcvArray group count too high, change to %u\n",
13574 dd->rcv_entries.ngroups);
13575 dd->rcv_entries.nctxt_extra = 0;
13578 * PIO send contexts
13580 ret = init_sc_pools_and_sizes(dd);
13581 if (ret >= 0) { /* success */
13582 dd->num_send_contexts = ret;
13585 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13586 dd->chip_send_contexts,
13587 dd->num_send_contexts,
13588 dd->sc_sizes[SC_KERNEL].count,
13589 dd->sc_sizes[SC_ACK].count,
13590 dd->sc_sizes[SC_USER].count,
13591 dd->sc_sizes[SC_VL15].count);
13592 ret = 0; /* success */
13599 * Set the device/port partition key table. The MAD code
13600 * will ensure that, at least, the partial management
13601 * partition key is present in the table.
13603 static void set_partition_keys(struct hfi1_pportdata *ppd)
13605 struct hfi1_devdata *dd = ppd->dd;
13609 dd_dev_info(dd, "Setting partition keys\n");
13610 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13611 reg |= (ppd->pkeys[i] &
13612 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13614 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13615 /* Each register holds 4 PKey values. */
13616 if ((i % 4) == 3) {
13617 write_csr(dd, RCV_PARTITION_KEY +
13618 ((i - 3) * 2), reg);
13623 /* Always enable HW pkeys check when pkeys table is set */
13624 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13628 * These CSRs and memories are uninitialized on reset and must be
13629 * written before reading to set the ECC/parity bits.
13631 * NOTE: All user context CSRs that are not mmaped write-only
13632 * (e.g. the TID flows) must be initialized even if the driver never
13635 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13640 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13641 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13643 /* SendCtxtCreditReturnAddr */
13644 for (i = 0; i < dd->chip_send_contexts; i++)
13645 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13647 /* PIO Send buffers */
13648 /* SDMA Send buffers */
13650 * These are not normally read, and (presently) have no method
13651 * to be read, so are not pre-initialized
13655 /* RcvHdrTailAddr */
13656 /* RcvTidFlowTable */
13657 for (i = 0; i < dd->chip_rcv_contexts; i++) {
13658 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13659 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13660 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13661 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13665 for (i = 0; i < dd->chip_rcv_array_count; i++)
13666 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13668 /* RcvQPMapTable */
13669 for (i = 0; i < 32; i++)
13670 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13674 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13676 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13679 unsigned long timeout;
13682 /* is the condition present? */
13683 reg = read_csr(dd, CCE_STATUS);
13684 if ((reg & status_bits) == 0)
13687 /* clear the condition */
13688 write_csr(dd, CCE_CTRL, ctrl_bits);
13690 /* wait for the condition to clear */
13691 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13693 reg = read_csr(dd, CCE_STATUS);
13694 if ((reg & status_bits) == 0)
13696 if (time_after(jiffies, timeout)) {
13698 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13699 status_bits, reg & status_bits);
13706 /* set CCE CSRs to chip reset defaults */
13707 static void reset_cce_csrs(struct hfi1_devdata *dd)
13711 /* CCE_REVISION read-only */
13712 /* CCE_REVISION2 read-only */
13713 /* CCE_CTRL - bits clear automatically */
13714 /* CCE_STATUS read-only, use CceCtrl to clear */
13715 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13716 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13717 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13718 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13719 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13720 /* CCE_ERR_STATUS read-only */
13721 write_csr(dd, CCE_ERR_MASK, 0);
13722 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13723 /* CCE_ERR_FORCE leave alone */
13724 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13725 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13726 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13727 /* CCE_PCIE_CTRL leave alone */
13728 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13729 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13730 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13731 CCE_MSIX_TABLE_UPPER_RESETCSR);
13733 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13734 /* CCE_MSIX_PBA read-only */
13735 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13736 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13738 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13739 write_csr(dd, CCE_INT_MAP, 0);
13740 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13741 /* CCE_INT_STATUS read-only */
13742 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13743 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13744 /* CCE_INT_FORCE leave alone */
13745 /* CCE_INT_BLOCKED read-only */
13747 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13748 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13751 /* set MISC CSRs to chip reset defaults */
13752 static void reset_misc_csrs(struct hfi1_devdata *dd)
13756 for (i = 0; i < 32; i++) {
13757 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13758 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13759 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13762 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13763 * only be written 128-byte chunks
13765 /* init RSA engine to clear lingering errors */
13766 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13767 write_csr(dd, MISC_CFG_RSA_MU, 0);
13768 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13769 /* MISC_STS_8051_DIGEST read-only */
13770 /* MISC_STS_SBM_DIGEST read-only */
13771 /* MISC_STS_PCIE_DIGEST read-only */
13772 /* MISC_STS_FAB_DIGEST read-only */
13773 /* MISC_ERR_STATUS read-only */
13774 write_csr(dd, MISC_ERR_MASK, 0);
13775 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13776 /* MISC_ERR_FORCE leave alone */
13779 /* set TXE CSRs to chip reset defaults */
13780 static void reset_txe_csrs(struct hfi1_devdata *dd)
13787 write_csr(dd, SEND_CTRL, 0);
13788 __cm_reset(dd, 0); /* reset CM internal state */
13789 /* SEND_CONTEXTS read-only */
13790 /* SEND_DMA_ENGINES read-only */
13791 /* SEND_PIO_MEM_SIZE read-only */
13792 /* SEND_DMA_MEM_SIZE read-only */
13793 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13794 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13795 /* SEND_PIO_ERR_STATUS read-only */
13796 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13797 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13798 /* SEND_PIO_ERR_FORCE leave alone */
13799 /* SEND_DMA_ERR_STATUS read-only */
13800 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13801 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13802 /* SEND_DMA_ERR_FORCE leave alone */
13803 /* SEND_EGRESS_ERR_STATUS read-only */
13804 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13805 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13806 /* SEND_EGRESS_ERR_FORCE leave alone */
13807 write_csr(dd, SEND_BTH_QP, 0);
13808 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13809 write_csr(dd, SEND_SC2VLT0, 0);
13810 write_csr(dd, SEND_SC2VLT1, 0);
13811 write_csr(dd, SEND_SC2VLT2, 0);
13812 write_csr(dd, SEND_SC2VLT3, 0);
13813 write_csr(dd, SEND_LEN_CHECK0, 0);
13814 write_csr(dd, SEND_LEN_CHECK1, 0);
13815 /* SEND_ERR_STATUS read-only */
13816 write_csr(dd, SEND_ERR_MASK, 0);
13817 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13818 /* SEND_ERR_FORCE read-only */
13819 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13820 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13821 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13822 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13823 for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13824 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13825 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13826 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13827 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13828 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13829 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13830 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13831 /* SEND_CM_CREDIT_USED_STATUS read-only */
13832 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13833 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13834 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13835 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13836 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13837 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13838 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13839 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13840 /* SEND_CM_CREDIT_USED_VL read-only */
13841 /* SEND_CM_CREDIT_USED_VL15 read-only */
13842 /* SEND_EGRESS_CTXT_STATUS read-only */
13843 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13844 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13845 /* SEND_EGRESS_ERR_INFO read-only */
13846 /* SEND_EGRESS_ERR_SOURCE read-only */
13849 * TXE Per-Context CSRs
13851 for (i = 0; i < dd->chip_send_contexts; i++) {
13852 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13853 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13854 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13855 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13856 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13857 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13858 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13859 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13860 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13861 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13862 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13863 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13867 * TXE Per-SDMA CSRs
13869 for (i = 0; i < dd->chip_sdma_engines; i++) {
13870 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13871 /* SEND_DMA_STATUS read-only */
13872 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13873 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13874 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13875 /* SEND_DMA_HEAD read-only */
13876 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13877 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13878 /* SEND_DMA_IDLE_CNT read-only */
13879 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13880 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13881 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13882 /* SEND_DMA_ENG_ERR_STATUS read-only */
13883 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13884 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13885 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13886 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13887 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13888 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13889 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13890 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13891 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13892 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13898 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13900 static void init_rbufs(struct hfi1_devdata *dd)
13906 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13911 reg = read_csr(dd, RCV_STATUS);
13912 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13913 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13916 * Give up after 1ms - maximum wait time.
13918 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
13919 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13920 * 136 KB / (66% * 250MB/s) = 844us
13922 if (count++ > 500) {
13924 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13928 udelay(2); /* do not busy-wait the CSR */
13931 /* start the init - expect RcvCtrl to be 0 */
13932 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13935 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13936 * period after the write before RcvStatus.RxRbufInitDone is valid.
13937 * The delay in the first run through the loop below is sufficient and
13938 * required before the first read of RcvStatus.RxRbufInintDone.
13940 read_csr(dd, RCV_CTRL);
13942 /* wait for the init to finish */
13945 /* delay is required first time through - see above */
13946 udelay(2); /* do not busy-wait the CSR */
13947 reg = read_csr(dd, RCV_STATUS);
13948 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13951 /* give up after 100us - slowest possible at 33MHz is 73us */
13952 if (count++ > 50) {
13954 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13961 /* set RXE CSRs to chip reset defaults */
13962 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13969 write_csr(dd, RCV_CTRL, 0);
13971 /* RCV_STATUS read-only */
13972 /* RCV_CONTEXTS read-only */
13973 /* RCV_ARRAY_CNT read-only */
13974 /* RCV_BUF_SIZE read-only */
13975 write_csr(dd, RCV_BTH_QP, 0);
13976 write_csr(dd, RCV_MULTICAST, 0);
13977 write_csr(dd, RCV_BYPASS, 0);
13978 write_csr(dd, RCV_VL15, 0);
13979 /* this is a clear-down */
13980 write_csr(dd, RCV_ERR_INFO,
13981 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13982 /* RCV_ERR_STATUS read-only */
13983 write_csr(dd, RCV_ERR_MASK, 0);
13984 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13985 /* RCV_ERR_FORCE leave alone */
13986 for (i = 0; i < 32; i++)
13987 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13988 for (i = 0; i < 4; i++)
13989 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13990 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13991 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13992 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13993 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13994 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13995 clear_rsm_rule(dd, i);
13996 for (i = 0; i < 32; i++)
13997 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
14000 * RXE Kernel and User Per-Context CSRs
14002 for (i = 0; i < dd->chip_rcv_contexts; i++) {
14004 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
14005 /* RCV_CTXT_STATUS read-only */
14006 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
14007 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
14008 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
14009 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
14010 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
14011 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
14012 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
14013 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
14014 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
14015 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
14018 /* RCV_HDR_TAIL read-only */
14019 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
14020 /* RCV_EGR_INDEX_TAIL read-only */
14021 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
14022 /* RCV_EGR_OFFSET_TAIL read-only */
14023 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
14024 write_uctxt_csr(dd, i,
14025 RCV_TID_FLOW_TABLE + (8 * j), 0);
14031 * Set sc2vl tables.
14033 * They power on to zeros, so to avoid send context errors
14034 * they need to be set:
14036 * SC 0-7 -> VL 0-7 (respectively)
14041 static void init_sc2vl_tables(struct hfi1_devdata *dd)
14044 /* init per architecture spec, constrained by hardware capability */
14046 /* HFI maps sent packets */
14047 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
14053 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
14059 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
14065 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
14072 /* DC maps received packets */
14073 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
14075 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
14076 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
14077 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
14079 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
14080 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
14082 /* initialize the cached sc2vl values consistently with h/w */
14083 for (i = 0; i < 32; i++) {
14084 if (i < 8 || i == 15)
14085 *((u8 *)(dd->sc2vl) + i) = (u8)i;
14087 *((u8 *)(dd->sc2vl) + i) = 0;
14092 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
14093 * depend on the chip going through a power-on reset - a driver may be loaded
14094 * and unloaded many times.
14096 * Do not write any CSR values to the chip in this routine - there may be
14097 * a reset following the (possible) FLR in this routine.
14100 static int init_chip(struct hfi1_devdata *dd)
14106 * Put the HFI CSRs in a known state.
14107 * Combine this with a DC reset.
14109 * Stop the device from doing anything while we do a
14110 * reset. We know there are no other active users of
14111 * the device since we are now in charge. Turn off
14112 * off all outbound and inbound traffic and make sure
14113 * the device does not generate any interrupts.
14116 /* disable send contexts and SDMA engines */
14117 write_csr(dd, SEND_CTRL, 0);
14118 for (i = 0; i < dd->chip_send_contexts; i++)
14119 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
14120 for (i = 0; i < dd->chip_sdma_engines; i++)
14121 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
14122 /* disable port (turn off RXE inbound traffic) and contexts */
14123 write_csr(dd, RCV_CTRL, 0);
14124 for (i = 0; i < dd->chip_rcv_contexts; i++)
14125 write_csr(dd, RCV_CTXT_CTRL, 0);
14126 /* mask all interrupt sources */
14127 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
14128 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
14131 * DC Reset: do a full DC reset before the register clear.
14132 * A recommended length of time to hold is one CSR read,
14133 * so reread the CceDcCtrl. Then, hold the DC in reset
14134 * across the clear.
14136 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
14137 (void)read_csr(dd, CCE_DC_CTRL);
14141 * A FLR will reset the SPC core and part of the PCIe.
14142 * The parts that need to be restored have already been
14145 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14147 /* do the FLR, the DC reset will remain */
14148 pcie_flr(dd->pcidev);
14150 /* restore command and BARs */
14151 ret = restore_pci_variables(dd);
14153 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14159 dd_dev_info(dd, "Resetting CSRs with FLR\n");
14160 pcie_flr(dd->pcidev);
14161 ret = restore_pci_variables(dd);
14163 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
14169 dd_dev_info(dd, "Resetting CSRs with writes\n");
14170 reset_cce_csrs(dd);
14171 reset_txe_csrs(dd);
14172 reset_rxe_csrs(dd);
14173 reset_misc_csrs(dd);
14175 /* clear the DC reset */
14176 write_csr(dd, CCE_DC_CTRL, 0);
14178 /* Set the LED off */
14182 * Clear the QSFP reset.
14183 * An FLR enforces a 0 on all out pins. The driver does not touch
14184 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
14185 * anything plugged constantly in reset, if it pays attention
14187 * Prime examples of this are optical cables. Set all pins high.
14188 * I2CCLK and I2CDAT will change per direction, and INT_N and
14189 * MODPRS_N are input only and their value is ignored.
14191 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
14192 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
14193 init_chip_resources(dd);
14197 static void init_early_variables(struct hfi1_devdata *dd)
14201 /* assign link credit variables */
14203 dd->link_credits = CM_GLOBAL_CREDITS;
14205 dd->link_credits--;
14206 dd->vcu = cu_to_vcu(hfi1_cu);
14207 /* enough room for 8 MAD packets plus header - 17K */
14208 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14209 if (dd->vl15_init > dd->link_credits)
14210 dd->vl15_init = dd->link_credits;
14212 write_uninitialized_csrs_and_memories(dd);
14214 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14215 for (i = 0; i < dd->num_pports; i++) {
14216 struct hfi1_pportdata *ppd = &dd->pport[i];
14218 set_partition_keys(ppd);
14220 init_sc2vl_tables(dd);
14223 static void init_kdeth_qp(struct hfi1_devdata *dd)
14225 /* user changed the KDETH_QP */
14226 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14227 /* out of range or illegal value */
14228 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14231 if (kdeth_qp == 0) /* not set, or failed range check */
14232 kdeth_qp = DEFAULT_KDETH_QP;
14234 write_csr(dd, SEND_BTH_QP,
14235 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14236 SEND_BTH_QP_KDETH_QP_SHIFT);
14238 write_csr(dd, RCV_BTH_QP,
14239 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14240 RCV_BTH_QP_KDETH_QP_SHIFT);
14245 * @dd - device data
14246 * @first_ctxt - first context
14247 * @last_ctxt - first context
14249 * This return sets the qpn mapping table that
14250 * is indexed by qpn[8:1].
14252 * The routine will round robin the 256 settings
14253 * from first_ctxt to last_ctxt.
14255 * The first/last looks ahead to having specialized
14256 * receive contexts for mgmt and bypass. Normal
14257 * verbs traffic will assumed to be on a range
14258 * of receive contexts.
14260 static void init_qpmap_table(struct hfi1_devdata *dd,
14265 u64 regno = RCV_QP_MAP_TABLE;
14267 u64 ctxt = first_ctxt;
14269 for (i = 0; i < 256; i++) {
14270 reg |= ctxt << (8 * (i % 8));
14272 if (ctxt > last_ctxt)
14275 write_csr(dd, regno, reg);
14281 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14282 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14285 struct rsm_map_table {
14286 u64 map[NUM_MAP_REGS];
14290 struct rsm_rule_data {
14306 * Return an initialized RMT map table for users to fill in. OK if it
14307 * returns NULL, indicating no table.
14309 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14311 struct rsm_map_table *rmt;
14312 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14314 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14316 memset(rmt->map, rxcontext, sizeof(rmt->map));
14324 * Write the final RMT map table to the chip and free the table. OK if
14327 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14328 struct rsm_map_table *rmt)
14333 /* write table to chip */
14334 for (i = 0; i < NUM_MAP_REGS; i++)
14335 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14338 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14343 * Add a receive side mapping rule.
14345 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14346 struct rsm_rule_data *rrd)
14348 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14349 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14350 1ull << rule_index | /* enable bit */
14351 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14352 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14353 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14354 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14355 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14356 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14357 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14358 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14359 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14360 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14361 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14362 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14363 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14367 * Clear a receive side mapping rule.
14369 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14371 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14372 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14373 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14376 /* return the number of RSM map table entries that will be used for QOS */
14377 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14384 /* is QOS active at all? */
14385 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14390 /* determine bits for qpn */
14391 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14392 if (krcvqs[i] > max_by_vl)
14393 max_by_vl = krcvqs[i];
14394 if (max_by_vl > 32)
14396 m = ilog2(__roundup_pow_of_two(max_by_vl));
14398 /* determine bits for vl */
14399 n = ilog2(__roundup_pow_of_two(num_vls));
14401 /* reject if too much is used */
14410 return 1 << (m + n);
14421 * init_qos - init RX qos
14422 * @dd - device data
14423 * @rmt - RSM map table
14425 * This routine initializes Rule 0 and the RSM map table to implement
14426 * quality of service (qos).
14428 * If all of the limit tests succeed, qos is applied based on the array
14429 * interpretation of krcvqs where entry 0 is VL0.
14431 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14432 * feed both the RSM map table and the single rule.
14434 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14436 struct rsm_rule_data rrd;
14437 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14438 unsigned int rmt_entries;
14443 rmt_entries = qos_rmt_entries(dd, &m, &n);
14444 if (rmt_entries == 0)
14446 qpns_per_vl = 1 << m;
14448 /* enough room in the map table? */
14449 rmt_entries = 1 << (m + n);
14450 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14453 /* add qos entries to the the RSM map table */
14454 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14457 for (qpn = 0, tctxt = ctxt;
14458 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14459 unsigned idx, regoff, regidx;
14461 /* generate the index the hardware will produce */
14462 idx = rmt->used + ((qpn << n) ^ i);
14463 regoff = (idx % 8) * 8;
14465 /* replace default with context number */
14466 reg = rmt->map[regidx];
14467 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14469 reg |= (u64)(tctxt++) << regoff;
14470 rmt->map[regidx] = reg;
14471 if (tctxt == ctxt + krcvqs[i])
14477 rrd.offset = rmt->used;
14479 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14480 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14481 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14482 rrd.index1_width = n;
14483 rrd.index2_off = QPN_SELECT_OFFSET;
14484 rrd.index2_width = m + n;
14485 rrd.mask1 = LRH_BTH_MASK;
14486 rrd.value1 = LRH_BTH_VALUE;
14487 rrd.mask2 = LRH_SC_MASK;
14488 rrd.value2 = LRH_SC_VALUE;
14491 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14493 /* mark RSM map entries as used */
14494 rmt->used += rmt_entries;
14495 /* map everything else to the mcast/err/vl15 context */
14496 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14497 dd->qos_shift = n + 1;
14501 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14504 static void init_user_fecn_handling(struct hfi1_devdata *dd,
14505 struct rsm_map_table *rmt)
14507 struct rsm_rule_data rrd;
14509 int i, idx, regoff, regidx;
14512 /* there needs to be enough room in the map table */
14513 if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
14514 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
14519 * RSM will extract the destination context as an index into the
14520 * map table. The destination contexts are a sequential block
14521 * in the range first_dyn_alloc_ctxt...num_rcv_contexts-1 (inclusive).
14522 * Map entries are accessed as offset + extracted value. Adjust
14523 * the added offset so this sequence can be placed anywhere in
14524 * the table - as long as the entries themselves do not wrap.
14525 * There are only enough bits in offset for the table size, so
14526 * start with that to allow for a "negative" offset.
14528 offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
14529 (int)dd->first_dyn_alloc_ctxt);
14531 for (i = dd->first_dyn_alloc_ctxt, idx = rmt->used;
14532 i < dd->num_rcv_contexts; i++, idx++) {
14533 /* replace with identity mapping */
14534 regoff = (idx % 8) * 8;
14536 reg = rmt->map[regidx];
14537 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14538 reg |= (u64)i << regoff;
14539 rmt->map[regidx] = reg;
14543 * For RSM intercept of Expected FECN packets:
14544 * o packet type 0 - expected
14545 * o match on F (bit 95), using select/match 1, and
14546 * o match on SH (bit 133), using select/match 2.
14548 * Use index 1 to extract the 8-bit receive context from DestQP
14549 * (start at bit 64). Use that as the RSM map table index.
14551 rrd.offset = offset;
14553 rrd.field1_off = 95;
14554 rrd.field2_off = 133;
14555 rrd.index1_off = 64;
14556 rrd.index1_width = 8;
14557 rrd.index2_off = 0;
14558 rrd.index2_width = 0;
14565 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14567 rmt->used += dd->num_user_contexts;
14570 /* Initialize RSM for VNIC */
14571 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14577 struct rsm_rule_data rrd;
14579 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14580 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14581 dd->vnic.rmt_start);
14585 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14586 dd->vnic.rmt_start,
14587 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14589 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14590 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14591 reg = read_csr(dd, regoff);
14592 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14593 /* Update map register with vnic context */
14594 j = (dd->vnic.rmt_start + i) % 8;
14595 reg &= ~(0xffllu << (j * 8));
14596 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14597 /* Wrap up vnic ctx index */
14598 ctx_id %= dd->vnic.num_ctxt;
14599 /* Write back map register */
14600 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14601 dev_dbg(&(dd)->pcidev->dev,
14602 "Vnic rsm map reg[%d] =0x%llx\n",
14603 regoff - RCV_RSM_MAP_TABLE, reg);
14605 write_csr(dd, regoff, reg);
14607 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14608 reg = read_csr(dd, regoff);
14612 /* Add rule for vnic */
14613 rrd.offset = dd->vnic.rmt_start;
14615 /* Match 16B packets */
14616 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14617 rrd.mask1 = L2_TYPE_MASK;
14618 rrd.value1 = L2_16B_VALUE;
14619 /* Match ETH L4 packets */
14620 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14621 rrd.mask2 = L4_16B_TYPE_MASK;
14622 rrd.value2 = L4_16B_ETH_VALUE;
14623 /* Calc context from veswid and entropy */
14624 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14625 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14626 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14627 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14628 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14630 /* Enable RSM if not already enabled */
14631 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14634 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14636 clear_rsm_rule(dd, RSM_INS_VNIC);
14638 /* Disable RSM if used only by vnic */
14639 if (dd->vnic.rmt_start == 0)
14640 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14643 static int init_rxe(struct hfi1_devdata *dd)
14645 struct rsm_map_table *rmt;
14648 /* enable all receive errors */
14649 write_csr(dd, RCV_ERR_MASK, ~0ull);
14651 rmt = alloc_rsm_map_table(dd);
14655 /* set up QOS, including the QPN map table */
14657 init_user_fecn_handling(dd, rmt);
14658 complete_rsm_map_table(dd, rmt);
14659 /* record number of used rsm map entries for vnic */
14660 dd->vnic.rmt_start = rmt->used;
14664 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14665 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14666 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14667 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14668 * Max_PayLoad_Size set to its minimum of 128.
14670 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14671 * (64 bytes). Max_Payload_Size is possibly modified upward in
14672 * tune_pcie_caps() which is called after this routine.
14675 /* Have 16 bytes (4DW) of bypass header available in header queue */
14676 val = read_csr(dd, RCV_BYPASS);
14677 val |= (4ull << 16);
14678 write_csr(dd, RCV_BYPASS, val);
14682 static void init_other(struct hfi1_devdata *dd)
14684 /* enable all CCE errors */
14685 write_csr(dd, CCE_ERR_MASK, ~0ull);
14686 /* enable *some* Misc errors */
14687 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14688 /* enable all DC errors, except LCB */
14689 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14690 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14694 * Fill out the given AU table using the given CU. A CU is defined in terms
14695 * AUs. The table is a an encoding: given the index, how many AUs does that
14698 * NOTE: Assumes that the register layout is the same for the
14699 * local and remote tables.
14701 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14702 u32 csr0to3, u32 csr4to7)
14704 write_csr(dd, csr0to3,
14705 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14706 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14708 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14710 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14711 write_csr(dd, csr4to7,
14713 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14715 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14717 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14719 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14722 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14724 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14725 SEND_CM_LOCAL_AU_TABLE4_TO7);
14728 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14730 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14731 SEND_CM_REMOTE_AU_TABLE4_TO7);
14734 static void init_txe(struct hfi1_devdata *dd)
14738 /* enable all PIO, SDMA, general, and Egress errors */
14739 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14740 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14741 write_csr(dd, SEND_ERR_MASK, ~0ull);
14742 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14744 /* enable all per-context and per-SDMA engine errors */
14745 for (i = 0; i < dd->chip_send_contexts; i++)
14746 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14747 for (i = 0; i < dd->chip_sdma_engines; i++)
14748 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14750 /* set the local CU to AU mapping */
14751 assign_local_cm_au_table(dd, dd->vcu);
14754 * Set reasonable default for Credit Return Timer
14755 * Don't set on Simulator - causes it to choke.
14757 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14758 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14761 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14767 if (!rcd || !rcd->sc)
14770 hw_ctxt = rcd->sc->hw_context;
14771 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14772 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14773 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14774 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14775 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14776 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14777 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14779 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14782 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14783 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14784 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14787 /* Enable J_KEY check on receive context. */
14788 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14789 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14790 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14791 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14796 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14801 if (!rcd || !rcd->sc)
14804 hw_ctxt = rcd->sc->hw_context;
14805 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14807 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14808 * This check would not have been enabled for A0 h/w, see
14812 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14813 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14814 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14816 /* Turn off the J_KEY on the receive side */
14817 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14822 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14828 if (!rcd || !rcd->sc)
14831 hw_ctxt = rcd->sc->hw_context;
14832 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14833 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14834 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14835 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14836 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14837 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14838 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14843 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14848 if (!ctxt || !ctxt->sc)
14851 hw_ctxt = ctxt->sc->hw_context;
14852 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14853 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14854 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14855 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14861 * Start doing the clean up the the chip. Our clean up happens in multiple
14862 * stages and this is just the first.
14864 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14869 finish_chip_resources(dd);
14872 #define HFI_BASE_GUID(dev) \
14873 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14876 * Information can be shared between the two HFIs on the same ASIC
14877 * in the same OS. This function finds the peer device and sets
14878 * up a shared structure.
14880 static int init_asic_data(struct hfi1_devdata *dd)
14882 unsigned long flags;
14883 struct hfi1_devdata *tmp, *peer = NULL;
14884 struct hfi1_asic_data *asic_data;
14887 /* pre-allocate the asic structure in case we are the first device */
14888 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14892 spin_lock_irqsave(&hfi1_devs_lock, flags);
14893 /* Find our peer device */
14894 list_for_each_entry(tmp, &hfi1_dev_list, list) {
14895 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14896 dd->unit != tmp->unit) {
14903 /* use already allocated structure */
14904 dd->asic_data = peer->asic_data;
14907 dd->asic_data = asic_data;
14908 mutex_init(&dd->asic_data->asic_resource_mutex);
14910 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14911 spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14913 /* first one through - set up i2c devices */
14915 ret = set_up_i2c(dd, dd->asic_data);
14921 * Set dd->boardname. Use a generic name if a name is not returned from
14922 * EFI variable space.
14924 * Return 0 on success, -ENOMEM if space could not be allocated.
14926 static int obtain_boardname(struct hfi1_devdata *dd)
14928 /* generic board description */
14929 const char generic[] =
14930 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14931 unsigned long size;
14934 ret = read_hfi1_efi_var(dd, "description", &size,
14935 (void **)&dd->boardname);
14937 dd_dev_info(dd, "Board description not found\n");
14938 /* use generic description */
14939 dd->boardname = kstrdup(generic, GFP_KERNEL);
14940 if (!dd->boardname)
14947 * Check the interrupt registers to make sure that they are mapped correctly.
14948 * It is intended to help user identify any mismapping by VMM when the driver
14949 * is running in a VM. This function should only be called before interrupt
14950 * is set up properly.
14952 * Return 0 on success, -EINVAL on failure.
14954 static int check_int_registers(struct hfi1_devdata *dd)
14957 u64 all_bits = ~(u64)0;
14960 /* Clear CceIntMask[0] to avoid raising any interrupts */
14961 mask = read_csr(dd, CCE_INT_MASK);
14962 write_csr(dd, CCE_INT_MASK, 0ull);
14963 reg = read_csr(dd, CCE_INT_MASK);
14967 /* Clear all interrupt status bits */
14968 write_csr(dd, CCE_INT_CLEAR, all_bits);
14969 reg = read_csr(dd, CCE_INT_STATUS);
14973 /* Set all interrupt status bits */
14974 write_csr(dd, CCE_INT_FORCE, all_bits);
14975 reg = read_csr(dd, CCE_INT_STATUS);
14976 if (reg != all_bits)
14979 /* Restore the interrupt mask */
14980 write_csr(dd, CCE_INT_CLEAR, all_bits);
14981 write_csr(dd, CCE_INT_MASK, mask);
14985 write_csr(dd, CCE_INT_MASK, mask);
14986 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14991 * Allocate and initialize the device structure for the hfi.
14992 * @dev: the pci_dev for hfi1_ib device
14993 * @ent: pci_device_id struct for this dev
14995 * Also allocates, initializes, and returns the devdata struct for this
14998 * This is global, and is called directly at init to set up the
14999 * chip-specific function pointers for later use.
15001 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
15002 const struct pci_device_id *ent)
15004 struct hfi1_devdata *dd;
15005 struct hfi1_pportdata *ppd;
15008 static const char * const inames[] = { /* implementation names */
15010 "RTL VCS simulation",
15011 "RTL FPGA emulation",
15012 "Functional simulator"
15014 struct pci_dev *parent = pdev->bus->self;
15016 dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
15017 sizeof(struct hfi1_pportdata));
15021 for (i = 0; i < dd->num_pports; i++, ppd++) {
15023 /* init common fields */
15024 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
15025 /* DC supports 4 link widths */
15026 ppd->link_width_supported =
15027 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
15028 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
15029 ppd->link_width_downgrade_supported =
15030 ppd->link_width_supported;
15031 /* start out enabling only 4X */
15032 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
15033 ppd->link_width_downgrade_enabled =
15034 ppd->link_width_downgrade_supported;
15035 /* link width active is 0 when link is down */
15036 /* link width downgrade active is 0 when link is down */
15038 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
15039 num_vls > HFI1_MAX_VLS_SUPPORTED) {
15040 hfi1_early_err(&pdev->dev,
15041 "Invalid num_vls %u, using %u VLs\n",
15042 num_vls, HFI1_MAX_VLS_SUPPORTED);
15043 num_vls = HFI1_MAX_VLS_SUPPORTED;
15045 ppd->vls_supported = num_vls;
15046 ppd->vls_operational = ppd->vls_supported;
15047 /* Set the default MTU. */
15048 for (vl = 0; vl < num_vls; vl++)
15049 dd->vld[vl].mtu = hfi1_max_mtu;
15050 dd->vld[15].mtu = MAX_MAD_PACKET;
15052 * Set the initial values to reasonable default, will be set
15053 * for real when link is up.
15055 ppd->overrun_threshold = 0x4;
15056 ppd->phy_error_threshold = 0xf;
15057 ppd->port_crc_mode_enabled = link_crc_mask;
15058 /* initialize supported LTP CRC mode */
15059 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
15060 /* initialize enabled LTP CRC mode */
15061 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
15062 /* start in offline */
15063 ppd->host_link_state = HLS_DN_OFFLINE;
15064 init_vl_arb_caches(ppd);
15067 dd->link_default = HLS_DN_POLL;
15070 * Do remaining PCIe setup and save PCIe values in dd.
15071 * Any error printing is already done by the init code.
15072 * On return, we have the chip mapped.
15074 ret = hfi1_pcie_ddinit(dd, pdev);
15078 /* Save PCI space registers to rewrite after device reset */
15079 ret = save_pci_variables(dd);
15083 /* verify that reads actually work, save revision for reset check */
15084 dd->revision = read_csr(dd, CCE_REVISION);
15085 if (dd->revision == ~(u64)0) {
15086 dd_dev_err(dd, "cannot read chip CSRs\n");
15090 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
15091 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
15092 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
15093 & CCE_REVISION_CHIP_REV_MINOR_MASK;
15096 * Check interrupt registers mapping if the driver has no access to
15097 * the upstream component. In this case, it is likely that the driver
15098 * is running in a VM.
15101 ret = check_int_registers(dd);
15107 * obtain the hardware ID - NOT related to unit, which is a
15108 * software enumeration
15110 reg = read_csr(dd, CCE_REVISION2);
15111 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
15112 & CCE_REVISION2_HFI_ID_MASK;
15113 /* the variable size will remove unwanted bits */
15114 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
15115 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
15116 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
15117 dd->icode < ARRAY_SIZE(inames) ?
15118 inames[dd->icode] : "unknown", (int)dd->irev);
15120 /* speeds the hardware can support */
15121 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
15122 /* speeds allowed to run at */
15123 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
15124 /* give a reasonable active value, will be set on link up */
15125 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
15127 dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
15128 dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
15129 dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
15130 dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
15131 dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
15132 /* fix up link widths for emulation _p */
15134 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
15135 ppd->link_width_supported =
15136 ppd->link_width_enabled =
15137 ppd->link_width_downgrade_supported =
15138 ppd->link_width_downgrade_enabled =
15141 /* insure num_vls isn't larger than number of sdma engines */
15142 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
15143 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
15144 num_vls, dd->chip_sdma_engines);
15145 num_vls = dd->chip_sdma_engines;
15146 ppd->vls_supported = dd->chip_sdma_engines;
15147 ppd->vls_operational = ppd->vls_supported;
15151 * Convert the ns parameter to the 64 * cclocks used in the CSR.
15152 * Limit the max if larger than the field holds. If timeout is
15153 * non-zero, then the calculated field will be at least 1.
15155 * Must be after icode is set up - the cclock rate depends
15156 * on knowing the hardware being used.
15158 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
15159 if (dd->rcv_intr_timeout_csr >
15160 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
15161 dd->rcv_intr_timeout_csr =
15162 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
15163 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
15164 dd->rcv_intr_timeout_csr = 1;
15166 /* needs to be done before we look for the peer device */
15169 /* set up shared ASIC data with peer device */
15170 ret = init_asic_data(dd);
15174 /* obtain chip sizes, reset chip CSRs */
15175 ret = init_chip(dd);
15179 /* read in the PCIe link speed information */
15180 ret = pcie_speeds(dd);
15184 /* call before get_platform_config(), after init_chip_resources() */
15185 ret = eprom_init(dd);
15187 goto bail_free_rcverr;
15189 /* Needs to be called before hfi1_firmware_init */
15190 get_platform_config(dd);
15192 /* read in firmware */
15193 ret = hfi1_firmware_init(dd);
15198 * In general, the PCIe Gen3 transition must occur after the
15199 * chip has been idled (so it won't initiate any PCIe transactions
15200 * e.g. an interrupt) and before the driver changes any registers
15201 * (the transition will reset the registers).
15203 * In particular, place this call after:
15204 * - init_chip() - the chip will not initiate any PCIe transactions
15205 * - pcie_speeds() - reads the current link speed
15206 * - hfi1_firmware_init() - the needed firmware is ready to be
15209 ret = do_pcie_gen3_transition(dd);
15213 /* start setting dd values and adjusting CSRs */
15214 init_early_variables(dd);
15216 parse_platform_config(dd);
15218 ret = obtain_boardname(dd);
15222 snprintf(dd->boardversion, BOARD_VERS_MAX,
15223 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15224 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15227 (dd->revision >> CCE_REVISION_SW_SHIFT)
15228 & CCE_REVISION_SW_MASK);
15230 ret = set_up_context_variables(dd);
15234 /* set initial RXE CSRs */
15235 ret = init_rxe(dd);
15239 /* set initial TXE CSRs */
15241 /* set initial non-RXE, non-TXE CSRs */
15243 /* set up KDETH QP prefix in both RX and TX CSRs */
15246 ret = hfi1_dev_affinity_init(dd);
15250 /* send contexts must be set up before receive contexts */
15251 ret = init_send_contexts(dd);
15255 ret = hfi1_create_kctxts(dd);
15260 * Initialize aspm, to be done after gen3 transition and setting up
15261 * contexts and before enabling interrupts
15265 dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
15267 * rcd[0] is guaranteed to be valid by this point. Also, all
15268 * context are using the same value, as per the module parameter.
15270 dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
15272 ret = init_pervl_scs(dd);
15277 for (i = 0; i < dd->num_pports; ++i) {
15278 ret = sdma_init(dd, i);
15283 /* use contexts created by hfi1_create_kctxts */
15284 ret = set_up_interrupts(dd);
15288 /* set up LCB access - must be after set_up_interrupts() */
15289 init_lcb_access(dd);
15292 * Serial number is created from the base guid:
15293 * [27:24] = base guid [38:35]
15294 * [23: 0] = base guid [23: 0]
15296 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15297 (dd->base_guid & 0xFFFFFF) |
15298 ((dd->base_guid >> 11) & 0xF000000));
15300 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15301 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15302 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15304 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15306 goto bail_clear_intr;
15310 ret = init_cntrs(dd);
15312 goto bail_clear_intr;
15314 ret = init_rcverr(dd);
15316 goto bail_free_cntrs;
15318 init_completion(&dd->user_comp);
15320 /* The user refcount starts with one to inidicate an active device */
15321 atomic_set(&dd->user_refcount, 1);
15330 hfi1_clean_up_interrupts(dd);
15332 hfi1_pcie_ddcleanup(dd);
15334 hfi1_free_devdata(dd);
15340 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15344 u32 current_egress_rate = ppd->current_egress_rate;
15345 /* rates here are in units of 10^6 bits/sec */
15347 if (desired_egress_rate == -1)
15348 return 0; /* shouldn't happen */
15350 if (desired_egress_rate >= current_egress_rate)
15351 return 0; /* we can't help go faster, only slower */
15353 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15354 egress_cycles(dw_len * 4, current_egress_rate);
15356 return (u16)delta_cycles;
15360 * create_pbc - build a pbc for transmission
15361 * @flags: special case flags or-ed in built pbc
15362 * @srate: static rate
15364 * @dwlen: dword length (header words + data words + pbc words)
15366 * Create a PBC with the given flags, rate, VL, and length.
15368 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15369 * for verbs, which does not use this PSM feature. The lone other caller
15370 * is for the diagnostic interface which calls this if the user does not
15371 * supply their own PBC.
15373 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15376 u64 pbc, delay = 0;
15378 if (unlikely(srate_mbs))
15379 delay = delay_cycles(ppd, srate_mbs, dw_len);
15382 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15383 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15384 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15385 | (dw_len & PBC_LENGTH_DWS_MASK)
15386 << PBC_LENGTH_DWS_SHIFT;
15391 #define SBUS_THERMAL 0x4f
15392 #define SBUS_THERM_MONITOR_MODE 0x1
15394 #define THERM_FAILURE(dev, ret, reason) \
15396 "Thermal sensor initialization failed: %s (%d)\n", \
15400 * Initialize the thermal sensor.
15402 * After initialization, enable polling of thermal sensor through
15403 * SBus interface. In order for this to work, the SBus Master
15404 * firmware has to be loaded due to the fact that the HW polling
15405 * logic uses SBus interrupts, which are not supported with
15406 * default firmware. Otherwise, no data will be returned through
15407 * the ASIC_STS_THERM CSR.
15409 static int thermal_init(struct hfi1_devdata *dd)
15413 if (dd->icode != ICODE_RTL_SILICON ||
15414 check_chip_resource(dd, CR_THERM_INIT, NULL))
15417 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15419 THERM_FAILURE(dd, ret, "Acquire SBus");
15423 dd_dev_info(dd, "Initializing thermal sensor\n");
15424 /* Disable polling of thermal readings */
15425 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15427 /* Thermal Sensor Initialization */
15428 /* Step 1: Reset the Thermal SBus Receiver */
15429 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15430 RESET_SBUS_RECEIVER, 0);
15432 THERM_FAILURE(dd, ret, "Bus Reset");
15435 /* Step 2: Set Reset bit in Thermal block */
15436 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15437 WRITE_SBUS_RECEIVER, 0x1);
15439 THERM_FAILURE(dd, ret, "Therm Block Reset");
15442 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15443 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15444 WRITE_SBUS_RECEIVER, 0x32);
15446 THERM_FAILURE(dd, ret, "Write Clock Div");
15449 /* Step 4: Select temperature mode */
15450 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15451 WRITE_SBUS_RECEIVER,
15452 SBUS_THERM_MONITOR_MODE);
15454 THERM_FAILURE(dd, ret, "Write Mode Sel");
15457 /* Step 5: De-assert block reset and start conversion */
15458 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15459 WRITE_SBUS_RECEIVER, 0x2);
15461 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15464 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15467 /* Enable polling of thermal readings */
15468 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15470 /* Set initialized flag */
15471 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15473 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15476 release_chip_resource(dd, CR_SBUS);
15480 static void handle_temp_err(struct hfi1_devdata *dd)
15482 struct hfi1_pportdata *ppd = &dd->pport[0];
15484 * Thermal Critical Interrupt
15485 * Put the device into forced freeze mode, take link down to
15486 * offline, and put DC into reset.
15489 "Critical temperature reached! Forcing device into freeze mode!\n");
15490 dd->flags |= HFI1_FORCED_FREEZE;
15491 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15493 * Shut DC down as much and as quickly as possible.
15495 * Step 1: Take the link down to OFFLINE. This will cause the
15496 * 8051 to put the Serdes in reset. However, we don't want to
15497 * go through the entire link state machine since we want to
15498 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15499 * but rather an attempt to save the chip.
15500 * Code below is almost the same as quiet_serdes() but avoids
15501 * all the extra work and the sleeps.
15503 ppd->driver_link_ready = 0;
15504 ppd->link_enabled = 0;
15505 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15508 * Step 2: Shutdown LCB and 8051
15509 * After shutdown, do not restore DC_CFG_RESET value.