1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/slab.h>
8 /* Read a NIC register from the alternate function. */
9 static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
14 unsigned int status = 0;
16 register_to_read = MPI_NIC_REG_BLOCK
18 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
20 status = ql_read_mpi_reg(qdev, register_to_read, ®_val);
27 /* Write a NIC register from the alternate function. */
28 static int ql_write_other_func_reg(struct ql_adapter *qdev,
33 register_to_read = MPI_NIC_REG_BLOCK
35 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
38 return ql_write_mpi_reg(qdev, register_to_read, reg_val);
41 static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
47 for (count = 10; count; count--) {
48 temp = ql_read_other_func_reg(qdev, reg);
50 /* check for errors */
60 static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
65 /* wait for reg to come ready */
66 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
67 XG_SERDES_ADDR_RDY, 0);
71 /* set up for reg read */
72 ql_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R);
74 /* wait for reg to come ready */
75 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
76 XG_SERDES_ADDR_RDY, 0);
81 *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
86 /* Read out the SERDES registers */
87 static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
91 /* wait for reg to come ready */
92 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
96 /* set up for reg read */
97 ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
99 /* wait for reg to come ready */
100 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
105 *data = ql_read32(qdev, XG_SERDES_DATA);
110 static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
111 u32 *direct_ptr, u32 *indirect_ptr,
112 bool direct_valid, bool indirect_valid)
118 status = ql_read_serdes_reg(qdev, addr, direct_ptr);
119 /* Dead fill any failures or invalids. */
121 *direct_ptr = 0xDEADBEEF;
125 status = ql_read_other_func_serdes_reg(
126 qdev, addr, indirect_ptr);
127 /* Dead fill any failures or invalids. */
129 *indirect_ptr = 0xDEADBEEF;
132 static int ql_get_serdes_regs(struct ql_adapter *qdev,
133 struct ql_mpi_coredump *mpi_coredump)
136 bool xfi_direct_valid = false, xfi_indirect_valid = false;
137 bool xaui_direct_valid = true, xaui_indirect_valid = true;
139 u32 *direct_ptr, temp;
142 /* The XAUI needs to be read out per port */
143 status = ql_read_other_func_serdes_reg(qdev,
144 XG_SERDES_XAUI_HSS_PCS_START,
147 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
149 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
150 XG_SERDES_ADDR_XAUI_PWR_DOWN)
151 xaui_indirect_valid = false;
153 status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
156 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
158 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
159 XG_SERDES_ADDR_XAUI_PWR_DOWN)
160 xaui_direct_valid = false;
163 * XFI register is shared so only need to read one
164 * functions and then check the bits.
166 status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
170 if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
171 XG_SERDES_ADDR_XFI1_PWR_UP) {
172 /* now see if i'm NIC 1 or NIC 2 */
174 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
175 xfi_indirect_valid = true;
177 xfi_direct_valid = true;
179 if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
180 XG_SERDES_ADDR_XFI2_PWR_UP) {
181 /* now see if i'm NIC 1 or NIC 2 */
183 /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
184 xfi_direct_valid = true;
186 xfi_indirect_valid = true;
189 /* Get XAUI_AN register block. */
190 if (qdev->func & 1) {
191 /* Function 2 is direct */
192 direct_ptr = mpi_coredump->serdes2_xaui_an;
193 indirect_ptr = mpi_coredump->serdes_xaui_an;
195 /* Function 1 is direct */
196 direct_ptr = mpi_coredump->serdes_xaui_an;
197 indirect_ptr = mpi_coredump->serdes2_xaui_an;
200 for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
201 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
202 xaui_direct_valid, xaui_indirect_valid);
204 /* Get XAUI_HSS_PCS register block. */
205 if (qdev->func & 1) {
207 mpi_coredump->serdes2_xaui_hss_pcs;
209 mpi_coredump->serdes_xaui_hss_pcs;
212 mpi_coredump->serdes_xaui_hss_pcs;
214 mpi_coredump->serdes2_xaui_hss_pcs;
217 for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
218 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
219 xaui_direct_valid, xaui_indirect_valid);
221 /* Get XAUI_XFI_AN register block. */
222 if (qdev->func & 1) {
223 direct_ptr = mpi_coredump->serdes2_xfi_an;
224 indirect_ptr = mpi_coredump->serdes_xfi_an;
226 direct_ptr = mpi_coredump->serdes_xfi_an;
227 indirect_ptr = mpi_coredump->serdes2_xfi_an;
230 for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
231 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
232 xfi_direct_valid, xfi_indirect_valid);
234 /* Get XAUI_XFI_TRAIN register block. */
235 if (qdev->func & 1) {
236 direct_ptr = mpi_coredump->serdes2_xfi_train;
238 mpi_coredump->serdes_xfi_train;
240 direct_ptr = mpi_coredump->serdes_xfi_train;
242 mpi_coredump->serdes2_xfi_train;
245 for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
246 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
247 xfi_direct_valid, xfi_indirect_valid);
249 /* Get XAUI_XFI_HSS_PCS register block. */
250 if (qdev->func & 1) {
252 mpi_coredump->serdes2_xfi_hss_pcs;
254 mpi_coredump->serdes_xfi_hss_pcs;
257 mpi_coredump->serdes_xfi_hss_pcs;
259 mpi_coredump->serdes2_xfi_hss_pcs;
262 for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
263 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
264 xfi_direct_valid, xfi_indirect_valid);
266 /* Get XAUI_XFI_HSS_TX register block. */
267 if (qdev->func & 1) {
269 mpi_coredump->serdes2_xfi_hss_tx;
271 mpi_coredump->serdes_xfi_hss_tx;
273 direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
275 mpi_coredump->serdes2_xfi_hss_tx;
277 for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
278 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
279 xfi_direct_valid, xfi_indirect_valid);
281 /* Get XAUI_XFI_HSS_RX register block. */
282 if (qdev->func & 1) {
284 mpi_coredump->serdes2_xfi_hss_rx;
286 mpi_coredump->serdes_xfi_hss_rx;
288 direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
290 mpi_coredump->serdes2_xfi_hss_rx;
293 for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
294 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
295 xfi_direct_valid, xfi_indirect_valid);
297 /* Get XAUI_XFI_HSS_PLL register block. */
298 if (qdev->func & 1) {
300 mpi_coredump->serdes2_xfi_hss_pll;
302 mpi_coredump->serdes_xfi_hss_pll;
305 mpi_coredump->serdes_xfi_hss_pll;
307 mpi_coredump->serdes2_xfi_hss_pll;
309 for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
310 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
311 xfi_direct_valid, xfi_indirect_valid);
315 static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
320 /* wait for reg to come ready */
321 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
322 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
326 /* set up for reg read */
327 ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
329 /* wait for reg to come ready */
330 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
331 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
336 *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
341 /* Read the 400 xgmac control/statistics registers
342 * skipping unused locations.
344 static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
345 unsigned int other_function)
350 for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
351 /* We're reading 400 xgmac registers, but we filter out
352 * several locations that are non-responsive to reads.
354 if ((i == 0x00000114) ||
358 (i > 0x00000150 && i < 0x000001fc) ||
359 (i > 0x00000278 && i < 0x000002a0) ||
360 (i > 0x000002c0 && i < 0x000002cf) ||
361 (i > 0x000002dc && i < 0x000002f0) ||
362 (i > 0x000003c8 && i < 0x00000400) ||
363 (i > 0x00000400 && i < 0x00000410) ||
364 (i > 0x00000410 && i < 0x00000420) ||
365 (i > 0x00000420 && i < 0x00000430) ||
366 (i > 0x00000430 && i < 0x00000440) ||
367 (i > 0x00000440 && i < 0x00000450) ||
368 (i > 0x00000450 && i < 0x00000500) ||
369 (i > 0x0000054c && i < 0x00000568) ||
370 (i > 0x000005c8 && i < 0x00000600)) {
373 ql_read_other_func_xgmac_reg(qdev, i, buf);
375 status = ql_read_xgmac_reg(qdev, i, buf);
385 static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
389 for (i = 0; i < 8; i++, buf++) {
390 ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
391 *buf = ql_read32(qdev, NIC_ETS);
394 for (i = 0; i < 2; i++, buf++) {
395 ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
396 *buf = ql_read32(qdev, CNA_ETS);
402 static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
406 for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
407 ql_write32(qdev, INTR_EN,
408 qdev->intr_context[i].intr_read_mask);
409 *buf = ql_read32(qdev, INTR_EN);
413 static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
418 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
422 for (i = 0; i < 16; i++) {
423 status = ql_get_mac_addr_reg(qdev,
424 MAC_ADDR_TYPE_CAM_MAC, i, value);
426 netif_err(qdev, drv, qdev->ndev,
427 "Failed read of mac index register\n");
430 *buf++ = value[0]; /* lower MAC address */
431 *buf++ = value[1]; /* upper MAC address */
432 *buf++ = value[2]; /* output */
434 for (i = 0; i < 32; i++) {
435 status = ql_get_mac_addr_reg(qdev,
436 MAC_ADDR_TYPE_MULTI_MAC, i, value);
438 netif_err(qdev, drv, qdev->ndev,
439 "Failed read of mac index register\n");
442 *buf++ = value[0]; /* lower Mcast address */
443 *buf++ = value[1]; /* upper Mcast address */
446 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
450 static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
455 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
459 for (i = 0; i < 16; i++) {
460 status = ql_get_routing_reg(qdev, i, &value);
462 netif_err(qdev, drv, qdev->ndev,
463 "Failed read of routing index register\n");
470 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
474 /* Read the MPI Processor shadow registers */
475 static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
480 for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
481 status = ql_write_mpi_reg(qdev,
483 (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
486 status = ql_read_mpi_reg(qdev, RISC_127, buf);
494 /* Read the MPI Processor core registers */
495 static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
496 u32 offset, u32 count)
500 for (i = 0; i < count; i++, buf++) {
501 status = ql_read_mpi_reg(qdev, offset + i, buf);
508 /* Read the ASIC probe dump */
509 static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
512 u32 module, mux_sel, probe, lo_val, hi_val;
514 for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
515 if (!((valid >> module) & 1))
517 for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
521 | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
522 ql_write32(qdev, PRB_MX_ADDR, probe);
523 lo_val = ql_read32(qdev, PRB_MX_DATA);
528 probe |= PRB_MX_ADDR_UP;
529 ql_write32(qdev, PRB_MX_ADDR, probe);
530 hi_val = ql_read32(qdev, PRB_MX_DATA);
540 static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
542 /* First we have to enable the probe mux */
543 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
544 buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
545 PRB_MX_ADDR_VALID_SYS_MOD, buf);
546 buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
547 PRB_MX_ADDR_VALID_PCI_MOD, buf);
548 buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
549 PRB_MX_ADDR_VALID_XGM_MOD, buf);
550 buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
551 PRB_MX_ADDR_VALID_FC_MOD, buf);
555 /* Read out the routing index registers */
556 static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
559 u32 type, index, index_max;
564 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
568 for (type = 0; type < 4; type++) {
573 for (index = 0; index < index_max; index++) {
575 | (type << RT_IDX_TYPE_SHIFT)
576 | (index << RT_IDX_IDX_SHIFT);
577 ql_write32(qdev, RT_IDX, val);
579 while ((result_index & RT_IDX_MR) == 0)
580 result_index = ql_read32(qdev, RT_IDX);
581 result_data = ql_read32(qdev, RT_DATA);
592 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
596 /* Read out the MAC protocol registers */
597 static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
599 u32 result_index, result_data;
604 u32 initial_val = MAC_ADDR_RS;
608 for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
611 initial_val |= MAC_ADDR_ADR;
612 max_index = MAC_ADDR_MAX_CAM_ENTRIES;
613 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
615 case 1: /* Multicast MAC Address */
616 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
617 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
619 case 2: /* VLAN filter mask */
620 case 3: /* MC filter mask */
621 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
622 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
624 case 4: /* FC MAC addresses */
625 max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
626 max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
628 case 5: /* Mgmt MAC addresses */
629 max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
630 max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
632 case 6: /* Mgmt VLAN addresses */
633 max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
634 max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
636 case 7: /* Mgmt IPv4 address */
637 max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
638 max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
640 case 8: /* Mgmt IPv6 address */
641 max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
642 max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
644 case 9: /* Mgmt TCP/UDP Dest port */
645 max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
646 max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
649 netdev_err(qdev->ndev, "Bad type!!! 0x%08x\n", type);
654 for (index = 0; index < max_index; index++) {
655 for (offset = 0; offset < max_offset; offset++) {
657 | (type << MAC_ADDR_TYPE_SHIFT)
658 | (index << MAC_ADDR_IDX_SHIFT)
660 ql_write32(qdev, MAC_ADDR_IDX, val);
662 while ((result_index & MAC_ADDR_MR) == 0) {
663 result_index = ql_read32(qdev,
666 result_data = ql_read32(qdev, MAC_ADDR_DATA);
676 static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
678 u32 func_num, reg, reg_val;
681 for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
682 reg = MPI_NIC_REG_BLOCK
683 | (func_num << MPI_NIC_FUNCTION_SHIFT)
685 status = ql_read_mpi_reg(qdev, reg, ®_val);
687 /* if the read failed then dead fill the element. */
694 /* Create a coredump segment header */
695 static void ql_build_coredump_seg_header(
696 struct mpi_coredump_segment_header *seg_hdr,
697 u32 seg_number, u32 seg_size, u8 *desc)
699 memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
700 seg_hdr->cookie = MPI_COREDUMP_COOKIE;
701 seg_hdr->seg_num = seg_number;
702 seg_hdr->seg_size = seg_size;
703 strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
707 * This function should be called when a coredump / probedump
708 * is to be extracted from the HBA. It is assumed there is a
709 * qdev structure that contains the base address of the register
710 * space for this function as well as a coredump structure that
711 * will contain the dump.
713 int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
719 netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
723 /* Try to get the spinlock, but dont worry if
724 * it isn't available. If the firmware died it
725 * might be holding the sem.
727 ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
729 status = ql_pause_mpi_risc(qdev);
731 netif_err(qdev, drv, qdev->ndev,
732 "Failed RISC pause. Status = 0x%.08x\n", status);
736 /* Insert the global header */
737 memset(&(mpi_coredump->mpi_global_header), 0,
738 sizeof(struct mpi_coredump_global_header));
739 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
740 mpi_coredump->mpi_global_header.header_size =
741 sizeof(struct mpi_coredump_global_header);
742 mpi_coredump->mpi_global_header.image_size =
743 sizeof(struct ql_mpi_coredump);
744 strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
745 sizeof(mpi_coredump->mpi_global_header.id_string));
747 /* Get generic NIC reg dump */
748 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
749 NIC1_CONTROL_SEG_NUM,
750 sizeof(struct mpi_coredump_segment_header) +
751 sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
753 ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
754 NIC2_CONTROL_SEG_NUM,
755 sizeof(struct mpi_coredump_segment_header) +
756 sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
758 /* Get XGMac registers. (Segment 18, Rev C. step 21) */
759 ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
761 sizeof(struct mpi_coredump_segment_header) +
762 sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
764 ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
766 sizeof(struct mpi_coredump_segment_header) +
767 sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
769 if (qdev->func & 1) {
770 /* Odd means our function is NIC 2 */
771 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
772 mpi_coredump->nic2_regs[i] =
773 ql_read32(qdev, i * sizeof(u32));
775 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
776 mpi_coredump->nic_regs[i] =
777 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
779 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
780 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
782 /* Even means our function is NIC 1 */
783 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
784 mpi_coredump->nic_regs[i] =
785 ql_read32(qdev, i * sizeof(u32));
786 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
787 mpi_coredump->nic2_regs[i] =
788 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
790 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
791 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
794 /* Rev C. Step 20a */
795 ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
797 sizeof(struct mpi_coredump_segment_header) +
798 sizeof(mpi_coredump->serdes_xaui_an),
799 "XAUI AN Registers");
801 /* Rev C. Step 20b */
802 ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
803 XAUI_HSS_PCS_SEG_NUM,
804 sizeof(struct mpi_coredump_segment_header) +
805 sizeof(mpi_coredump->serdes_xaui_hss_pcs),
806 "XAUI HSS PCS Registers");
808 ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
809 sizeof(struct mpi_coredump_segment_header) +
810 sizeof(mpi_coredump->serdes_xfi_an),
813 ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
815 sizeof(struct mpi_coredump_segment_header) +
816 sizeof(mpi_coredump->serdes_xfi_train),
817 "XFI TRAIN Registers");
819 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
821 sizeof(struct mpi_coredump_segment_header) +
822 sizeof(mpi_coredump->serdes_xfi_hss_pcs),
823 "XFI HSS PCS Registers");
825 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
827 sizeof(struct mpi_coredump_segment_header) +
828 sizeof(mpi_coredump->serdes_xfi_hss_tx),
829 "XFI HSS TX Registers");
831 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
833 sizeof(struct mpi_coredump_segment_header) +
834 sizeof(mpi_coredump->serdes_xfi_hss_rx),
835 "XFI HSS RX Registers");
837 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
839 sizeof(struct mpi_coredump_segment_header) +
840 sizeof(mpi_coredump->serdes_xfi_hss_pll),
841 "XFI HSS PLL Registers");
843 ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
845 sizeof(struct mpi_coredump_segment_header) +
846 sizeof(mpi_coredump->serdes2_xaui_an),
847 "XAUI2 AN Registers");
849 ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
850 XAUI2_HSS_PCS_SEG_NUM,
851 sizeof(struct mpi_coredump_segment_header) +
852 sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
853 "XAUI2 HSS PCS Registers");
855 ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
857 sizeof(struct mpi_coredump_segment_header) +
858 sizeof(mpi_coredump->serdes2_xfi_an),
859 "XFI2 AN Registers");
861 ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
863 sizeof(struct mpi_coredump_segment_header) +
864 sizeof(mpi_coredump->serdes2_xfi_train),
865 "XFI2 TRAIN Registers");
867 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
868 XFI2_HSS_PCS_SEG_NUM,
869 sizeof(struct mpi_coredump_segment_header) +
870 sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
871 "XFI2 HSS PCS Registers");
873 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
875 sizeof(struct mpi_coredump_segment_header) +
876 sizeof(mpi_coredump->serdes2_xfi_hss_tx),
877 "XFI2 HSS TX Registers");
879 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
881 sizeof(struct mpi_coredump_segment_header) +
882 sizeof(mpi_coredump->serdes2_xfi_hss_rx),
883 "XFI2 HSS RX Registers");
885 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
886 XFI2_HSS_PLL_SEG_NUM,
887 sizeof(struct mpi_coredump_segment_header) +
888 sizeof(mpi_coredump->serdes2_xfi_hss_pll),
889 "XFI2 HSS PLL Registers");
891 status = ql_get_serdes_regs(qdev, mpi_coredump);
893 netif_err(qdev, drv, qdev->ndev,
894 "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
899 ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
901 sizeof(mpi_coredump->core_regs_seg_hdr) +
902 sizeof(mpi_coredump->mpi_core_regs) +
903 sizeof(mpi_coredump->mpi_core_sh_regs),
906 /* Get the MPI Core Registers */
907 status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
908 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
911 /* Get the 16 MPI shadow registers */
912 status = ql_get_mpi_shadow_regs(qdev,
913 &mpi_coredump->mpi_core_sh_regs[0]);
917 /* Get the Test Logic Registers */
918 ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
920 sizeof(struct mpi_coredump_segment_header)
921 + sizeof(mpi_coredump->test_logic_regs),
923 status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
924 TEST_REGS_ADDR, TEST_REGS_CNT);
928 /* Get the RMII Registers */
929 ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
931 sizeof(struct mpi_coredump_segment_header)
932 + sizeof(mpi_coredump->rmii_regs),
934 status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
935 RMII_REGS_ADDR, RMII_REGS_CNT);
939 /* Get the FCMAC1 Registers */
940 ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
942 sizeof(struct mpi_coredump_segment_header)
943 + sizeof(mpi_coredump->fcmac1_regs),
945 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
946 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
950 /* Get the FCMAC2 Registers */
952 ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
954 sizeof(struct mpi_coredump_segment_header)
955 + sizeof(mpi_coredump->fcmac2_regs),
958 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
959 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
963 /* Get the FC1 MBX Registers */
964 ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
966 sizeof(struct mpi_coredump_segment_header)
967 + sizeof(mpi_coredump->fc1_mbx_regs),
969 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
970 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
974 /* Get the IDE Registers */
975 ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
977 sizeof(struct mpi_coredump_segment_header)
978 + sizeof(mpi_coredump->ide_regs),
980 status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
981 IDE_REGS_ADDR, IDE_REGS_CNT);
985 /* Get the NIC1 MBX Registers */
986 ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
988 sizeof(struct mpi_coredump_segment_header)
989 + sizeof(mpi_coredump->nic1_mbx_regs),
991 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
992 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
996 /* Get the SMBus Registers */
997 ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
999 sizeof(struct mpi_coredump_segment_header)
1000 + sizeof(mpi_coredump->smbus_regs),
1002 status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1003 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1007 /* Get the FC2 MBX Registers */
1008 ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1010 sizeof(struct mpi_coredump_segment_header)
1011 + sizeof(mpi_coredump->fc2_mbx_regs),
1013 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1014 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1018 /* Get the NIC2 MBX Registers */
1019 ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1021 sizeof(struct mpi_coredump_segment_header)
1022 + sizeof(mpi_coredump->nic2_mbx_regs),
1024 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1025 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1029 /* Get the I2C Registers */
1030 ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1032 sizeof(struct mpi_coredump_segment_header)
1033 + sizeof(mpi_coredump->i2c_regs),
1035 status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1036 I2C_REGS_ADDR, I2C_REGS_CNT);
1040 /* Get the MEMC Registers */
1041 ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1043 sizeof(struct mpi_coredump_segment_header)
1044 + sizeof(mpi_coredump->memc_regs),
1046 status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1047 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1051 /* Get the PBus Registers */
1052 ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1054 sizeof(struct mpi_coredump_segment_header)
1055 + sizeof(mpi_coredump->pbus_regs),
1057 status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1058 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1062 /* Get the MDE Registers */
1063 ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1065 sizeof(struct mpi_coredump_segment_header)
1066 + sizeof(mpi_coredump->mde_regs),
1068 status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1069 MDE_REGS_ADDR, MDE_REGS_CNT);
1073 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1074 MISC_NIC_INFO_SEG_NUM,
1075 sizeof(struct mpi_coredump_segment_header)
1076 + sizeof(mpi_coredump->misc_nic_info),
1078 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1079 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1080 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1081 mpi_coredump->misc_nic_info.function = qdev->func;
1084 /* Get indexed register values. */
1085 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1086 INTR_STATES_SEG_NUM,
1087 sizeof(struct mpi_coredump_segment_header)
1088 + sizeof(mpi_coredump->intr_states),
1090 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1092 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1093 CAM_ENTRIES_SEG_NUM,
1094 sizeof(struct mpi_coredump_segment_header)
1095 + sizeof(mpi_coredump->cam_entries),
1097 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1101 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1102 ROUTING_WORDS_SEG_NUM,
1103 sizeof(struct mpi_coredump_segment_header)
1104 + sizeof(mpi_coredump->nic_routing_words),
1106 status = ql_get_routing_entries(qdev,
1107 &mpi_coredump->nic_routing_words[0]);
1111 /* Segment 34 (Rev C. step 23) */
1112 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1114 sizeof(struct mpi_coredump_segment_header)
1115 + sizeof(mpi_coredump->ets),
1117 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1121 ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1123 sizeof(struct mpi_coredump_segment_header)
1124 + sizeof(mpi_coredump->probe_dump),
1126 ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1128 ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1129 ROUTING_INDEX_SEG_NUM,
1130 sizeof(struct mpi_coredump_segment_header)
1131 + sizeof(mpi_coredump->routing_regs),
1133 status = ql_get_routing_index_registers(qdev,
1134 &mpi_coredump->routing_regs[0]);
1138 ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1139 MAC_PROTOCOL_SEG_NUM,
1140 sizeof(struct mpi_coredump_segment_header)
1141 + sizeof(mpi_coredump->mac_prot_regs),
1143 ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1145 /* Get the semaphore registers for all 5 functions */
1146 ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1148 sizeof(struct mpi_coredump_segment_header) +
1149 sizeof(mpi_coredump->sem_regs), "Sem Registers");
1151 ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1153 /* Prevent the mpi restarting while we dump the memory.*/
1154 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1156 /* clear the pause */
1157 status = ql_unpause_mpi_risc(qdev);
1159 netif_err(qdev, drv, qdev->ndev,
1160 "Failed RISC unpause. Status = 0x%.08x\n", status);
1164 /* Reset the RISC so we can dump RAM */
1165 status = ql_hard_reset_mpi_risc(qdev);
1167 netif_err(qdev, drv, qdev->ndev,
1168 "Failed RISC reset. Status = 0x%.08x\n", status);
1172 ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1174 sizeof(struct mpi_coredump_segment_header)
1175 + sizeof(mpi_coredump->code_ram),
1177 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1178 CODE_RAM_ADDR, CODE_RAM_CNT);
1180 netif_err(qdev, drv, qdev->ndev,
1181 "Failed Dump of CODE RAM. Status = 0x%.08x\n",
1186 /* Insert the segment header */
1187 ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1189 sizeof(struct mpi_coredump_segment_header)
1190 + sizeof(mpi_coredump->memc_ram),
1192 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1193 MEMC_RAM_ADDR, MEMC_RAM_CNT);
1195 netif_err(qdev, drv, qdev->ndev,
1196 "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
1201 ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
1205 static void ql_get_core_dump(struct ql_adapter *qdev)
1207 if (!ql_own_firmware(qdev)) {
1208 netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1212 if (!netif_running(qdev->ndev)) {
1213 netif_err(qdev, ifup, qdev->ndev,
1214 "Force Coredump can only be done from interface that is up\n");
1217 ql_queue_fw_error(qdev);
1220 static void ql_gen_reg_dump(struct ql_adapter *qdev,
1221 struct ql_reg_dump *mpi_coredump)
1225 memset(&(mpi_coredump->mpi_global_header), 0,
1226 sizeof(struct mpi_coredump_global_header));
1227 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
1228 mpi_coredump->mpi_global_header.header_size =
1229 sizeof(struct mpi_coredump_global_header);
1230 mpi_coredump->mpi_global_header.image_size =
1231 sizeof(struct ql_reg_dump);
1232 strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump",
1233 sizeof(mpi_coredump->mpi_global_header.id_string));
1236 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1237 MISC_NIC_INFO_SEG_NUM,
1238 sizeof(struct mpi_coredump_segment_header)
1239 + sizeof(mpi_coredump->misc_nic_info),
1241 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1242 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1243 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1244 mpi_coredump->misc_nic_info.function = qdev->func;
1246 /* Segment 16, Rev C. Step 18 */
1247 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
1248 NIC1_CONTROL_SEG_NUM,
1249 sizeof(struct mpi_coredump_segment_header)
1250 + sizeof(mpi_coredump->nic_regs),
1252 /* Get generic reg dump */
1253 for (i = 0; i < 64; i++)
1254 mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
1257 /* Get indexed register values. */
1258 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1259 INTR_STATES_SEG_NUM,
1260 sizeof(struct mpi_coredump_segment_header)
1261 + sizeof(mpi_coredump->intr_states),
1263 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1265 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1266 CAM_ENTRIES_SEG_NUM,
1267 sizeof(struct mpi_coredump_segment_header)
1268 + sizeof(mpi_coredump->cam_entries),
1270 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1274 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1275 ROUTING_WORDS_SEG_NUM,
1276 sizeof(struct mpi_coredump_segment_header)
1277 + sizeof(mpi_coredump->nic_routing_words),
1279 status = ql_get_routing_entries(qdev,
1280 &mpi_coredump->nic_routing_words[0]);
1284 /* Segment 34 (Rev C. step 23) */
1285 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1287 sizeof(struct mpi_coredump_segment_header)
1288 + sizeof(mpi_coredump->ets),
1290 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1295 void ql_get_dump(struct ql_adapter *qdev, void *buff)
1298 * If the dump has already been taken and is stored
1299 * in our internal buffer and if force dump is set then
1300 * just start the spool to dump it to the log file
1301 * and also, take a snapshot of the general regs
1302 * to the user's buffer or else take complete dump
1303 * to the user's buffer if force is not set.
1306 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1307 if (!ql_core_dump(qdev, buff))
1308 ql_soft_reset_mpi_risc(qdev);
1310 netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1312 ql_gen_reg_dump(qdev, buff);
1313 ql_get_core_dump(qdev);
1317 /* Coredump to messages log file using separate worker thread */
1318 void ql_mpi_core_to_log(struct work_struct *work)
1320 struct ql_adapter *qdev =
1321 container_of(work, struct ql_adapter, mpi_core_to_log.work);
1323 print_hex_dump(KERN_DEBUG, "Core is dumping to log file!\n",
1324 DUMP_PREFIX_OFFSET, 32, 4, qdev->mpi_coredump,
1325 sizeof(*qdev->mpi_coredump), false);
1329 static void ql_dump_intr_states(struct ql_adapter *qdev)
1334 for (i = 0; i < qdev->intr_count; i++) {
1335 ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
1336 value = ql_read32(qdev, INTR_EN);
1337 netdev_err(qdev->ndev, "Interrupt %d is %s\n", i,
1338 (value & INTR_EN_EN ? "enabled" : "disabled"));
1342 #define DUMP_XGMAC(qdev, reg) \
1345 ql_read_xgmac_reg(qdev, reg, &data); \
1346 netdev_err(qdev->ndev, "%s = 0x%.08x\n", #reg, data); \
1349 void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
1351 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
1352 netdev_err(qdev->ndev, "%s: Couldn't get xgmac sem\n",
1356 DUMP_XGMAC(qdev, PAUSE_SRC_LO);
1357 DUMP_XGMAC(qdev, PAUSE_SRC_HI);
1358 DUMP_XGMAC(qdev, GLOBAL_CFG);
1359 DUMP_XGMAC(qdev, TX_CFG);
1360 DUMP_XGMAC(qdev, RX_CFG);
1361 DUMP_XGMAC(qdev, FLOW_CTL);
1362 DUMP_XGMAC(qdev, PAUSE_OPCODE);
1363 DUMP_XGMAC(qdev, PAUSE_TIMER);
1364 DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
1365 DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
1366 DUMP_XGMAC(qdev, MAC_TX_PARAMS);
1367 DUMP_XGMAC(qdev, MAC_RX_PARAMS);
1368 DUMP_XGMAC(qdev, MAC_SYS_INT);
1369 DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
1370 DUMP_XGMAC(qdev, MAC_MGMT_INT);
1371 DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
1372 DUMP_XGMAC(qdev, EXT_ARB_MODE);
1373 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1376 static void ql_dump_ets_regs(struct ql_adapter *qdev)
1380 static void ql_dump_cam_entries(struct ql_adapter *qdev)
1385 i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1388 for (i = 0; i < 4; i++) {
1389 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
1390 netdev_err(qdev->ndev,
1391 "%s: Failed read of mac index register\n",
1396 netdev_err(qdev->ndev,
1397 "CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
1398 i, value[1], value[0], value[2]);
1400 for (i = 0; i < 32; i++) {
1401 if (ql_get_mac_addr_reg
1402 (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
1403 netdev_err(qdev->ndev,
1404 "%s: Failed read of mac index register\n",
1409 netdev_err(qdev->ndev,
1410 "MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
1411 i, value[1], value[0]);
1413 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1416 void ql_dump_routing_entries(struct ql_adapter *qdev)
1421 i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
1424 for (i = 0; i < 16; i++) {
1426 if (ql_get_routing_reg(qdev, i, &value)) {
1427 netdev_err(qdev->ndev,
1428 "%s: Failed read of routing index register\n",
1433 netdev_err(qdev->ndev,
1434 "Routing Mask %d = 0x%.08x\n",
1437 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
1440 #define DUMP_REG(qdev, reg) \
1441 netdev_err(qdev->ndev, "%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
1443 void ql_dump_regs(struct ql_adapter *qdev)
1445 netdev_err(qdev->ndev, "reg dump for function #%d\n", qdev->func);
1446 DUMP_REG(qdev, SYS);
1447 DUMP_REG(qdev, RST_FO);
1448 DUMP_REG(qdev, FSC);
1449 DUMP_REG(qdev, CSR);
1450 DUMP_REG(qdev, ICB_RID);
1451 DUMP_REG(qdev, ICB_L);
1452 DUMP_REG(qdev, ICB_H);
1453 DUMP_REG(qdev, CFG);
1454 DUMP_REG(qdev, BIOS_ADDR);
1455 DUMP_REG(qdev, STS);
1456 DUMP_REG(qdev, INTR_EN);
1457 DUMP_REG(qdev, INTR_MASK);
1458 DUMP_REG(qdev, ISR1);
1459 DUMP_REG(qdev, ISR2);
1460 DUMP_REG(qdev, ISR3);
1461 DUMP_REG(qdev, ISR4);
1462 DUMP_REG(qdev, REV_ID);
1463 DUMP_REG(qdev, FRC_ECC_ERR);
1464 DUMP_REG(qdev, ERR_STS);
1465 DUMP_REG(qdev, RAM_DBG_ADDR);
1466 DUMP_REG(qdev, RAM_DBG_DATA);
1467 DUMP_REG(qdev, ECC_ERR_CNT);
1468 DUMP_REG(qdev, SEM);
1469 DUMP_REG(qdev, GPIO_1);
1470 DUMP_REG(qdev, GPIO_2);
1471 DUMP_REG(qdev, GPIO_3);
1472 DUMP_REG(qdev, XGMAC_ADDR);
1473 DUMP_REG(qdev, XGMAC_DATA);
1474 DUMP_REG(qdev, NIC_ETS);
1475 DUMP_REG(qdev, CNA_ETS);
1476 DUMP_REG(qdev, FLASH_ADDR);
1477 DUMP_REG(qdev, FLASH_DATA);
1478 DUMP_REG(qdev, CQ_STOP);
1479 DUMP_REG(qdev, PAGE_TBL_RID);
1480 DUMP_REG(qdev, WQ_PAGE_TBL_LO);
1481 DUMP_REG(qdev, WQ_PAGE_TBL_HI);
1482 DUMP_REG(qdev, CQ_PAGE_TBL_LO);
1483 DUMP_REG(qdev, CQ_PAGE_TBL_HI);
1484 DUMP_REG(qdev, COS_DFLT_CQ1);
1485 DUMP_REG(qdev, COS_DFLT_CQ2);
1486 DUMP_REG(qdev, SPLT_HDR);
1487 DUMP_REG(qdev, FC_PAUSE_THRES);
1488 DUMP_REG(qdev, NIC_PAUSE_THRES);
1489 DUMP_REG(qdev, FC_ETHERTYPE);
1490 DUMP_REG(qdev, FC_RCV_CFG);
1491 DUMP_REG(qdev, NIC_RCV_CFG);
1492 DUMP_REG(qdev, FC_COS_TAGS);
1493 DUMP_REG(qdev, NIC_COS_TAGS);
1494 DUMP_REG(qdev, MGMT_RCV_CFG);
1495 DUMP_REG(qdev, XG_SERDES_ADDR);
1496 DUMP_REG(qdev, XG_SERDES_DATA);
1497 DUMP_REG(qdev, PRB_MX_ADDR);
1498 DUMP_REG(qdev, PRB_MX_DATA);
1499 ql_dump_intr_states(qdev);
1500 ql_dump_xgmac_control_regs(qdev);
1501 ql_dump_ets_regs(qdev);
1502 ql_dump_cam_entries(qdev);
1503 ql_dump_routing_entries(qdev);
1509 #define DUMP_STAT(qdev, stat) \
1510 netdev_err(qdev->ndev, "%s = %ld\n", #stat, \
1511 (unsigned long)(qdev)->nic_stats.stat)
1513 void ql_dump_stat(struct ql_adapter *qdev)
1515 netdev_err(qdev->ndev, "%s: Enter\n", __func__);
1516 DUMP_STAT(qdev, tx_pkts);
1517 DUMP_STAT(qdev, tx_bytes);
1518 DUMP_STAT(qdev, tx_mcast_pkts);
1519 DUMP_STAT(qdev, tx_bcast_pkts);
1520 DUMP_STAT(qdev, tx_ucast_pkts);
1521 DUMP_STAT(qdev, tx_ctl_pkts);
1522 DUMP_STAT(qdev, tx_pause_pkts);
1523 DUMP_STAT(qdev, tx_64_pkt);
1524 DUMP_STAT(qdev, tx_65_to_127_pkt);
1525 DUMP_STAT(qdev, tx_128_to_255_pkt);
1526 DUMP_STAT(qdev, tx_256_511_pkt);
1527 DUMP_STAT(qdev, tx_512_to_1023_pkt);
1528 DUMP_STAT(qdev, tx_1024_to_1518_pkt);
1529 DUMP_STAT(qdev, tx_1519_to_max_pkt);
1530 DUMP_STAT(qdev, tx_undersize_pkt);
1531 DUMP_STAT(qdev, tx_oversize_pkt);
1532 DUMP_STAT(qdev, rx_bytes);
1533 DUMP_STAT(qdev, rx_bytes_ok);
1534 DUMP_STAT(qdev, rx_pkts);
1535 DUMP_STAT(qdev, rx_pkts_ok);
1536 DUMP_STAT(qdev, rx_bcast_pkts);
1537 DUMP_STAT(qdev, rx_mcast_pkts);
1538 DUMP_STAT(qdev, rx_ucast_pkts);
1539 DUMP_STAT(qdev, rx_undersize_pkts);
1540 DUMP_STAT(qdev, rx_oversize_pkts);
1541 DUMP_STAT(qdev, rx_jabber_pkts);
1542 DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
1543 DUMP_STAT(qdev, rx_drop_events);
1544 DUMP_STAT(qdev, rx_fcerr_pkts);
1545 DUMP_STAT(qdev, rx_align_err);
1546 DUMP_STAT(qdev, rx_symbol_err);
1547 DUMP_STAT(qdev, rx_mac_err);
1548 DUMP_STAT(qdev, rx_ctl_pkts);
1549 DUMP_STAT(qdev, rx_pause_pkts);
1550 DUMP_STAT(qdev, rx_64_pkts);
1551 DUMP_STAT(qdev, rx_65_to_127_pkts);
1552 DUMP_STAT(qdev, rx_128_255_pkts);
1553 DUMP_STAT(qdev, rx_256_511_pkts);
1554 DUMP_STAT(qdev, rx_512_to_1023_pkts);
1555 DUMP_STAT(qdev, rx_1024_to_1518_pkts);
1556 DUMP_STAT(qdev, rx_1519_to_max_pkts);
1557 DUMP_STAT(qdev, rx_len_err_pkts);
1563 #define DUMP_QDEV_FIELD(qdev, type, field) \
1564 netdev_err(qdev->ndev, "qdev->%-24s = " type "\n", #field, (qdev)->field)
1565 #define DUMP_QDEV_DMA_FIELD(qdev, field) \
1566 netdev_err(qdev->ndev, "qdev->%-24s = %llx\n", #field, \
1567 (unsigned long long)qdev->field)
1568 #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
1569 netdev_err(qdev->ndev, "%s[%d].%s = " type "\n", \
1570 #array, index, #field, (qdev)->array[index].field)
1571 void ql_dump_qdev(struct ql_adapter *qdev)
1575 DUMP_QDEV_FIELD(qdev, "%lx", flags);
1576 DUMP_QDEV_FIELD(qdev, "%p", pdev);
1577 DUMP_QDEV_FIELD(qdev, "%p", ndev);
1578 DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
1579 DUMP_QDEV_FIELD(qdev, "%p", reg_base);
1580 DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
1581 DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
1582 DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
1583 DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
1584 DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
1585 DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
1586 DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
1587 DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1588 if (qdev->msi_x_entry)
1589 for (i = 0; i < qdev->intr_count; i++) {
1590 DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
1591 DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
1593 for (i = 0; i < qdev->intr_count; i++) {
1594 DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
1595 DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
1596 DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
1597 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
1598 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
1599 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
1601 DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
1602 DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
1603 DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
1604 DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
1605 DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1606 DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
1607 DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
1608 DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
1609 DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
1610 DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
1611 DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
1612 DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
1613 DUMP_QDEV_FIELD(qdev, "%u", lbq_buf_size);
1618 void ql_dump_wqicb(struct wqicb *wqicb)
1620 struct tx_ring *tx_ring = container_of(wqicb, struct tx_ring, wqicb);
1621 struct ql_adapter *qdev = tx_ring->qdev;
1623 netdev_err(qdev->ndev, "Dumping wqicb stuff...\n");
1624 netdev_err(qdev->ndev, "wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
1625 netdev_err(qdev->ndev, "wqicb->flags = %x\n",
1626 le16_to_cpu(wqicb->flags));
1627 netdev_err(qdev->ndev, "wqicb->cq_id_rss = %d\n",
1628 le16_to_cpu(wqicb->cq_id_rss));
1629 netdev_err(qdev->ndev, "wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
1630 netdev_err(qdev->ndev, "wqicb->wq_addr = 0x%llx\n",
1631 (unsigned long long)le64_to_cpu(wqicb->addr));
1632 netdev_err(qdev->ndev, "wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
1633 (unsigned long long)le64_to_cpu(wqicb->cnsmr_idx_addr));
1636 void ql_dump_tx_ring(struct tx_ring *tx_ring)
1638 struct ql_adapter *qdev = tx_ring->qdev;
1640 netdev_err(qdev->ndev, "===================== Dumping tx_ring %d ===============\n",
1642 netdev_err(qdev->ndev, "tx_ring->base = %p\n", tx_ring->wq_base);
1643 netdev_err(qdev->ndev, "tx_ring->base_dma = 0x%llx\n",
1644 (unsigned long long)tx_ring->wq_base_dma);
1645 netdev_err(qdev->ndev, "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
1646 tx_ring->cnsmr_idx_sh_reg,
1647 tx_ring->cnsmr_idx_sh_reg
1648 ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
1649 netdev_err(qdev->ndev, "tx_ring->size = %d\n", tx_ring->wq_size);
1650 netdev_err(qdev->ndev, "tx_ring->len = %d\n", tx_ring->wq_len);
1651 netdev_err(qdev->ndev, "tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
1652 netdev_err(qdev->ndev, "tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
1653 netdev_err(qdev->ndev, "tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
1654 netdev_err(qdev->ndev, "tx_ring->cq_id = %d\n", tx_ring->cq_id);
1655 netdev_err(qdev->ndev, "tx_ring->wq_id = %d\n", tx_ring->wq_id);
1656 netdev_err(qdev->ndev, "tx_ring->q = %p\n", tx_ring->q);
1657 netdev_err(qdev->ndev, "tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
1660 void ql_dump_ricb(struct ricb *ricb)
1663 struct ql_adapter *qdev =
1664 container_of(ricb, struct ql_adapter, ricb);
1666 netdev_err(qdev->ndev, "===================== Dumping ricb ===============\n");
1667 netdev_err(qdev->ndev, "Dumping ricb stuff...\n");
1669 netdev_err(qdev->ndev, "ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
1670 netdev_err(qdev->ndev, "ricb->flags = %s%s%s%s%s%s%s%s%s\n",
1671 ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
1672 ricb->flags & RSS_L6K ? "RSS_L6K " : "",
1673 ricb->flags & RSS_LI ? "RSS_LI " : "",
1674 ricb->flags & RSS_LB ? "RSS_LB " : "",
1675 ricb->flags & RSS_LM ? "RSS_LM " : "",
1676 ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
1677 ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
1678 ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
1679 ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
1680 netdev_err(qdev->ndev, "ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
1681 for (i = 0; i < 16; i++)
1682 netdev_err(qdev->ndev, "ricb->hash_cq_id[%d] = 0x%.08x\n", i,
1683 le32_to_cpu(ricb->hash_cq_id[i]));
1684 for (i = 0; i < 10; i++)
1685 netdev_err(qdev->ndev, "ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
1686 le32_to_cpu(ricb->ipv6_hash_key[i]));
1687 for (i = 0; i < 4; i++)
1688 netdev_err(qdev->ndev, "ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
1689 le32_to_cpu(ricb->ipv4_hash_key[i]));
1692 void ql_dump_cqicb(struct cqicb *cqicb)
1694 struct rx_ring *rx_ring = container_of(cqicb, struct rx_ring, cqicb);
1695 struct ql_adapter *qdev = rx_ring->qdev;
1697 netdev_err(qdev->ndev, "Dumping cqicb stuff...\n");
1699 netdev_err(qdev->ndev, "cqicb->msix_vect = %d\n", cqicb->msix_vect);
1700 netdev_err(qdev->ndev, "cqicb->flags = %x\n", cqicb->flags);
1701 netdev_err(qdev->ndev, "cqicb->len = %d\n", le16_to_cpu(cqicb->len));
1702 netdev_err(qdev->ndev, "cqicb->addr = 0x%llx\n",
1703 (unsigned long long)le64_to_cpu(cqicb->addr));
1704 netdev_err(qdev->ndev, "cqicb->prod_idx_addr = 0x%llx\n",
1705 (unsigned long long)le64_to_cpu(cqicb->prod_idx_addr));
1706 netdev_err(qdev->ndev, "cqicb->pkt_delay = 0x%.04x\n",
1707 le16_to_cpu(cqicb->pkt_delay));
1708 netdev_err(qdev->ndev, "cqicb->irq_delay = 0x%.04x\n",
1709 le16_to_cpu(cqicb->irq_delay));
1710 netdev_err(qdev->ndev, "cqicb->lbq_addr = 0x%llx\n",
1711 (unsigned long long)le64_to_cpu(cqicb->lbq_addr));
1712 netdev_err(qdev->ndev, "cqicb->lbq_buf_size = 0x%.04x\n",
1713 le16_to_cpu(cqicb->lbq_buf_size));
1714 netdev_err(qdev->ndev, "cqicb->lbq_len = 0x%.04x\n",
1715 le16_to_cpu(cqicb->lbq_len));
1716 netdev_err(qdev->ndev, "cqicb->sbq_addr = 0x%llx\n",
1717 (unsigned long long)le64_to_cpu(cqicb->sbq_addr));
1718 netdev_err(qdev->ndev, "cqicb->sbq_buf_size = 0x%.04x\n",
1719 le16_to_cpu(cqicb->sbq_buf_size));
1720 netdev_err(qdev->ndev, "cqicb->sbq_len = 0x%.04x\n",
1721 le16_to_cpu(cqicb->sbq_len));
1724 static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring)
1726 struct ql_adapter *qdev = rx_ring->qdev;
1728 if (rx_ring->cq_id < qdev->rss_ring_count)
1729 return "RX COMPLETION";
1731 return "TX COMPLETION";
1734 void ql_dump_rx_ring(struct rx_ring *rx_ring)
1736 struct ql_adapter *qdev = rx_ring->qdev;
1738 netdev_err(qdev->ndev,
1739 "===================== Dumping rx_ring %d ===============\n",
1741 netdev_err(qdev->ndev,
1742 "Dumping rx_ring %d, type = %s\n", rx_ring->cq_id,
1743 qlge_rx_ring_type_name(rx_ring));
1744 netdev_err(qdev->ndev, "rx_ring->cqicb = %p\n", &rx_ring->cqicb);
1745 netdev_err(qdev->ndev, "rx_ring->cq_base = %p\n", rx_ring->cq_base);
1746 netdev_err(qdev->ndev, "rx_ring->cq_base_dma = %llx\n",
1747 (unsigned long long)rx_ring->cq_base_dma);
1748 netdev_err(qdev->ndev, "rx_ring->cq_size = %d\n", rx_ring->cq_size);
1749 netdev_err(qdev->ndev, "rx_ring->cq_len = %d\n", rx_ring->cq_len);
1750 netdev_err(qdev->ndev,
1751 "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
1752 rx_ring->prod_idx_sh_reg,
1753 rx_ring->prod_idx_sh_reg ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
1754 netdev_err(qdev->ndev, "rx_ring->prod_idx_sh_reg_dma = %llx\n",
1755 (unsigned long long)rx_ring->prod_idx_sh_reg_dma);
1756 netdev_err(qdev->ndev, "rx_ring->cnsmr_idx_db_reg = %p\n",
1757 rx_ring->cnsmr_idx_db_reg);
1758 netdev_err(qdev->ndev, "rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
1759 netdev_err(qdev->ndev, "rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
1760 netdev_err(qdev->ndev, "rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
1762 netdev_err(qdev->ndev, "rx_ring->lbq.base = %p\n", rx_ring->lbq.base);
1763 netdev_err(qdev->ndev, "rx_ring->lbq.base_dma = %llx\n",
1764 (unsigned long long)rx_ring->lbq.base_dma);
1765 netdev_err(qdev->ndev, "rx_ring->lbq.base_indirect = %p\n",
1766 rx_ring->lbq.base_indirect);
1767 netdev_err(qdev->ndev, "rx_ring->lbq.base_indirect_dma = %llx\n",
1768 (unsigned long long)rx_ring->lbq.base_indirect_dma);
1769 netdev_err(qdev->ndev, "rx_ring->lbq = %p\n", rx_ring->lbq.queue);
1770 netdev_err(qdev->ndev, "rx_ring->lbq.prod_idx_db_reg = %p\n",
1771 rx_ring->lbq.prod_idx_db_reg);
1772 netdev_err(qdev->ndev, "rx_ring->lbq.next_to_use = %d\n", rx_ring->lbq.next_to_use);
1773 netdev_err(qdev->ndev, "rx_ring->lbq.next_to_clean = %d\n", rx_ring->lbq.next_to_clean);
1775 netdev_err(qdev->ndev, "rx_ring->sbq.base = %p\n", rx_ring->sbq.base);
1776 netdev_err(qdev->ndev, "rx_ring->sbq.base_dma = %llx\n",
1777 (unsigned long long)rx_ring->sbq.base_dma);
1778 netdev_err(qdev->ndev, "rx_ring->sbq.base_indirect = %p\n",
1779 rx_ring->sbq.base_indirect);
1780 netdev_err(qdev->ndev, "rx_ring->sbq.base_indirect_dma = %llx\n",
1781 (unsigned long long)rx_ring->sbq.base_indirect_dma);
1782 netdev_err(qdev->ndev, "rx_ring->sbq = %p\n", rx_ring->sbq.queue);
1783 netdev_err(qdev->ndev, "rx_ring->sbq.prod_idx_db_reg addr = %p\n",
1784 rx_ring->sbq.prod_idx_db_reg);
1785 netdev_err(qdev->ndev, "rx_ring->sbq.next_to_use = %d\n", rx_ring->sbq.next_to_use);
1786 netdev_err(qdev->ndev, "rx_ring->sbq.next_to_clean = %d\n", rx_ring->sbq.next_to_clean);
1787 netdev_err(qdev->ndev, "rx_ring->cq_id = %d\n", rx_ring->cq_id);
1788 netdev_err(qdev->ndev, "rx_ring->irq = %d\n", rx_ring->irq);
1789 netdev_err(qdev->ndev, "rx_ring->cpu = %d\n", rx_ring->cpu);
1790 netdev_err(qdev->ndev, "rx_ring->qdev = %p\n", rx_ring->qdev);
1793 void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
1797 netdev_err(qdev->ndev, "%s: Enter\n", __func__);
1799 ptr = kmalloc(size, GFP_ATOMIC);
1803 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
1804 netdev_err(qdev->ndev, "%s: Failed to upload control block!\n", __func__);
1809 ql_dump_wqicb((struct wqicb *)ptr);
1812 ql_dump_cqicb((struct cqicb *)ptr);
1815 ql_dump_ricb((struct ricb *)ptr);
1818 netdev_err(qdev->ndev, "%s: Invalid bit value = %x\n", __func__, bit);
1827 void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd)
1829 netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
1830 le64_to_cpu((u64)tbd->addr));
1831 netdev_err(qdev->ndev, "tbd->len = %d\n",
1832 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1833 netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
1834 tbd->len & TX_DESC_C ? "C" : ".",
1835 tbd->len & TX_DESC_E ? "E" : ".");
1837 netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
1838 le64_to_cpu((u64)tbd->addr));
1839 netdev_err(qdev->ndev, "tbd->len = %d\n",
1840 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1841 netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
1842 tbd->len & TX_DESC_C ? "C" : ".",
1843 tbd->len & TX_DESC_E ? "E" : ".");
1845 netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n",
1846 le64_to_cpu((u64)tbd->addr));
1847 netdev_err(qdev->ndev, "tbd->len = %d\n",
1848 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1849 netdev_err(qdev->ndev, "tbd->flags = %s %s\n",
1850 tbd->len & TX_DESC_C ? "C" : ".",
1851 tbd->len & TX_DESC_E ? "E" : ".");
1854 void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb)
1856 struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
1857 (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
1858 struct tx_buf_desc *tbd;
1861 netdev_err(qdev->ndev, "%s\n", __func__);
1862 netdev_err(qdev->ndev, "opcode = %s\n",
1863 (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
1864 netdev_err(qdev->ndev, "flags1 = %s %s %s %s %s\n",
1865 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
1866 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
1867 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
1868 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
1869 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
1870 netdev_err(qdev->ndev, "flags2 = %s %s %s\n",
1871 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
1872 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
1873 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
1874 netdev_err(qdev->ndev, "flags3 = %s %s %s\n",
1875 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
1876 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
1877 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
1878 netdev_err(qdev->ndev, "tid = %x\n", ob_mac_iocb->tid);
1879 netdev_err(qdev->ndev, "txq_idx = %d\n", ob_mac_iocb->txq_idx);
1880 netdev_err(qdev->ndev, "vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
1881 if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
1882 netdev_err(qdev->ndev, "frame_len = %d\n",
1883 le32_to_cpu(ob_mac_tso_iocb->frame_len));
1884 netdev_err(qdev->ndev, "mss = %d\n",
1885 le16_to_cpu(ob_mac_tso_iocb->mss));
1886 netdev_err(qdev->ndev, "prot_hdr_len = %d\n",
1887 le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
1888 netdev_err(qdev->ndev, "hdr_offset = 0x%.04x\n",
1889 le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
1890 frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
1892 netdev_err(qdev->ndev, "frame_len = %d\n",
1893 le16_to_cpu(ob_mac_iocb->frame_len));
1894 frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
1896 tbd = &ob_mac_iocb->tbd[0];
1897 ql_dump_tx_desc(qdev, tbd);
1900 void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp)
1902 netdev_err(qdev->ndev, "%s\n", __func__);
1903 netdev_err(qdev->ndev, "opcode = %d\n", ob_mac_rsp->opcode);
1904 netdev_err(qdev->ndev, "flags = %s %s %s %s %s %s %s\n",
1905 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ?
1906 "OI" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
1907 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
1908 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
1909 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
1910 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
1911 ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
1912 netdev_err(qdev->ndev, "tid = %x\n", ob_mac_rsp->tid);
1917 void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp)
1919 netdev_err(qdev->ndev, "%s\n", __func__);
1920 netdev_err(qdev->ndev, "opcode = 0x%x\n", ib_mac_rsp->opcode);
1921 netdev_err(qdev->ndev, "flags1 = %s%s%s%s%s%s\n",
1922 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
1923 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
1924 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
1925 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
1926 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
1927 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
1929 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
1930 netdev_err(qdev->ndev, "%s%s%s Multicast\n",
1931 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1932 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1933 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1934 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1935 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1936 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1938 netdev_err(qdev->ndev, "flags2 = %s%s%s%s%s\n",
1939 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
1940 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
1941 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
1942 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
1943 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
1945 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
1946 netdev_err(qdev->ndev, "%s%s%s%s%s error\n",
1947 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1948 IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
1949 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1950 IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
1951 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1952 IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
1953 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1954 IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
1955 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1956 IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
1958 netdev_err(qdev->ndev, "flags3 = %s%s\n",
1959 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
1960 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
1962 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1963 netdev_err(qdev->ndev, "RSS flags = %s%s%s%s\n",
1964 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1965 IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
1966 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1967 IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
1968 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1969 IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
1970 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1971 IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
1973 netdev_err(qdev->ndev, "data_len = %d\n",
1974 le32_to_cpu(ib_mac_rsp->data_len));
1975 netdev_err(qdev->ndev, "data_addr = 0x%llx\n",
1976 (unsigned long long)le64_to_cpu(ib_mac_rsp->data_addr));
1977 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1978 netdev_err(qdev->ndev, "rss = %x\n",
1979 le32_to_cpu(ib_mac_rsp->rss));
1980 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
1981 netdev_err(qdev->ndev, "vlan_id = %x\n",
1982 le16_to_cpu(ib_mac_rsp->vlan_id));
1984 netdev_err(qdev->ndev, "flags4 = %s%s%s\n",
1985 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
1986 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
1987 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
1989 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1990 netdev_err(qdev->ndev, "hdr length = %d\n",
1991 le32_to_cpu(ib_mac_rsp->hdr_len));
1992 netdev_err(qdev->ndev, "hdr addr = 0x%llx\n",
1993 (unsigned long long)le64_to_cpu(ib_mac_rsp->hdr_addr));
1999 void ql_dump_all(struct ql_adapter *qdev)
2005 for (i = 0; i < qdev->tx_ring_count; i++) {
2006 QL_DUMP_TX_RING(&qdev->tx_ring[i]);
2007 QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
2009 for (i = 0; i < qdev->rx_ring_count; i++) {
2010 QL_DUMP_RX_RING(&qdev->rx_ring[i]);
2011 QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);