1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/module.h>
8 #include <linux/vmalloc.h>
9 #include <linux/crc32.h>
15 #include "qed_reg_addr.h"
17 /* Memory groups enum */
35 MEM_GROUP_CONN_CFC_MEM,
38 MEM_GROUP_CAU_MEM_EXT,
48 MEM_GROUP_TASK_CFC_MEM,
52 /* Memory groups names */
53 static const char * const s_mem_group_names[] = {
86 /* Idle check conditions */
88 static u32 cond5(const u32 *r, const u32 *imm)
90 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
93 static u32 cond7(const u32 *r, const u32 *imm)
95 return ((r[0] >> imm[0]) & imm[1]) != imm[2];
98 static u32 cond6(const u32 *r, const u32 *imm)
100 return (r[0] & imm[0]) != imm[1];
103 static u32 cond9(const u32 *r, const u32 *imm)
105 return ((r[0] & imm[0]) >> imm[1]) !=
106 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
109 static u32 cond10(const u32 *r, const u32 *imm)
111 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
114 static u32 cond4(const u32 *r, const u32 *imm)
116 return (r[0] & ~imm[0]) != imm[1];
119 static u32 cond0(const u32 *r, const u32 *imm)
121 return (r[0] & ~r[1]) != imm[0];
124 static u32 cond1(const u32 *r, const u32 *imm)
126 return r[0] != imm[0];
129 static u32 cond11(const u32 *r, const u32 *imm)
131 return r[0] != r[1] && r[2] == imm[0];
134 static u32 cond12(const u32 *r, const u32 *imm)
136 return r[0] != r[1] && r[2] > imm[0];
139 static u32 cond3(const u32 *r, const u32 *imm)
144 static u32 cond13(const u32 *r, const u32 *imm)
146 return r[0] & imm[0];
149 static u32 cond8(const u32 *r, const u32 *imm)
151 return r[0] < (r[1] - imm[0]);
154 static u32 cond2(const u32 *r, const u32 *imm)
156 return r[0] > imm[0];
159 /* Array of Idle Check conditions */
160 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
177 #define NUM_PHYS_BLOCKS 84
179 #define NUM_DBG_RESET_REGS 8
181 /******************************* Data Types **********************************/
192 /* CM context types */
201 /* Debug bus frame modes */
202 enum dbg_bus_frame_modes {
203 DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */
204 DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */
205 DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */
206 DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */
207 DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */
208 DBG_BUS_NUM_FRAME_MODES
211 /* Chip constant definitions */
217 /* HW type constant definitions */
218 struct hw_type_defs {
225 /* RBC reset definitions */
226 struct rbc_reset_defs {
228 u32 reset_val[MAX_CHIP_IDS];
231 /* Storm constant definitions.
232 * Addresses are in bytes, sizes are in quad-regs.
236 enum block_id sem_block_id;
237 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
239 u32 sem_fast_mem_addr;
240 u32 sem_frame_mode_addr;
241 u32 sem_slow_enable_addr;
242 u32 sem_slow_mode_addr;
243 u32 sem_slow_mode1_conf_addr;
244 u32 sem_sync_dbg_empty_addr;
245 u32 sem_gpre_vect_addr;
247 u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
248 u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
251 /* Debug Bus Constraint operation constant definitions */
252 struct dbg_bus_constraint_op_defs {
257 /* Storm Mode definitions */
258 struct storm_mode_defs {
262 u32 src_disable_reg_addr;
264 bool exists[MAX_CHIP_IDS];
267 struct grc_param_defs {
268 u32 default_val[MAX_CHIP_IDS];
273 u32 exclude_all_preset_val;
274 u32 crash_preset_val[MAX_CHIP_IDS];
277 /* Address is in 128b units. Width is in bits. */
278 struct rss_mem_defs {
279 const char *mem_name;
280 const char *type_name;
283 u32 num_entries[MAX_CHIP_IDS];
286 struct vfc_ram_defs {
287 const char *mem_name;
288 const char *type_name;
293 struct big_ram_defs {
294 const char *instance_name;
295 enum mem_groups mem_group_id;
296 enum mem_groups ram_mem_group_id;
297 enum dbg_grc_params grc_param;
300 u32 is_256b_reg_addr;
301 u32 is_256b_bit_offset[MAX_CHIP_IDS];
302 u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
306 const char *phy_name;
308 /* PHY base GRC address */
311 /* Relative address of indirect TBUS address register (bits 0..7) */
312 u32 tbus_addr_lo_addr;
314 /* Relative address of indirect TBUS address register (bits 8..10) */
315 u32 tbus_addr_hi_addr;
317 /* Relative address of indirect TBUS data register (bits 0..7) */
318 u32 tbus_data_lo_addr;
320 /* Relative address of indirect TBUS data register (bits 8..11) */
321 u32 tbus_data_hi_addr;
324 /* Split type definitions */
325 struct split_type_defs {
329 /******************************** Constants **********************************/
331 #define BYTES_IN_DWORD sizeof(u32)
332 /* In the macros below, size and offset are specified in bits */
333 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
334 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
335 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
336 #define FIELD_DWORD_OFFSET(type, field) \
337 (int)(FIELD_BIT_OFFSET(type, field) / 32)
338 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
339 #define FIELD_BIT_MASK(type, field) \
340 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
341 FIELD_DWORD_SHIFT(type, field))
343 #define SET_VAR_FIELD(var, type, field, val) \
345 var[FIELD_DWORD_OFFSET(type, field)] &= \
346 (~FIELD_BIT_MASK(type, field)); \
347 var[FIELD_DWORD_OFFSET(type, field)] |= \
348 (val) << FIELD_DWORD_SHIFT(type, field); \
351 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
353 for (i = 0; i < (arr_size); i++) \
354 qed_wr(dev, ptt, addr, (arr)[i]); \
357 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
358 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
360 /* extra lines include a signature line + optional latency events line */
361 #define NUM_EXTRA_DBG_LINES(block) \
362 (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
363 #define NUM_DBG_LINES(block) \
364 ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
366 #define USE_DMAE true
367 #define PROTECT_WIDE_BUS true
369 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
370 #define RAM_LINES_TO_BYTES(lines) \
371 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
373 #define REG_DUMP_LEN_SHIFT 24
374 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
375 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
377 #define IDLE_CHK_RULE_SIZE_DWORDS \
378 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
380 #define IDLE_CHK_RESULT_HDR_DWORDS \
381 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
383 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
384 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
386 #define PAGE_MEM_DESC_SIZE_DWORDS \
387 BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
389 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
391 /* The sizes and offsets below are specified in bits */
392 #define VFC_CAM_CMD_STRUCT_SIZE 64
393 #define VFC_CAM_CMD_ROW_OFFSET 48
394 #define VFC_CAM_CMD_ROW_SIZE 9
395 #define VFC_CAM_ADDR_STRUCT_SIZE 16
396 #define VFC_CAM_ADDR_OP_OFFSET 0
397 #define VFC_CAM_ADDR_OP_SIZE 4
398 #define VFC_CAM_RESP_STRUCT_SIZE 256
399 #define VFC_RAM_ADDR_STRUCT_SIZE 16
400 #define VFC_RAM_ADDR_OP_OFFSET 0
401 #define VFC_RAM_ADDR_OP_SIZE 2
402 #define VFC_RAM_ADDR_ROW_OFFSET 2
403 #define VFC_RAM_ADDR_ROW_SIZE 10
404 #define VFC_RAM_RESP_STRUCT_SIZE 256
406 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
407 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
408 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
409 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
410 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
411 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
413 #define NUM_VFC_RAM_TYPES 4
415 #define VFC_CAM_NUM_ROWS 512
417 #define VFC_OPCODE_CAM_RD 14
418 #define VFC_OPCODE_RAM_RD 0
420 #define NUM_RSS_MEM_TYPES 5
422 #define NUM_BIG_RAM_TYPES 3
423 #define BIG_RAM_NAME_LEN 3
425 #define NUM_PHY_TBUS_ADDRESSES 2048
426 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
428 #define RESET_REG_UNRESET_OFFSET 4
430 #define STALL_DELAY_MS 500
432 #define STATIC_DEBUG_LINE_DWORDS 9
434 #define NUM_COMMON_GLOBAL_PARAMS 9
436 #define MAX_RECURSION_DEPTH 10
438 #define FW_IMG_MAIN 1
440 #define REG_FIFO_ELEMENT_DWORDS 2
441 #define REG_FIFO_DEPTH_ELEMENTS 32
442 #define REG_FIFO_DEPTH_DWORDS \
443 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
445 #define IGU_FIFO_ELEMENT_DWORDS 4
446 #define IGU_FIFO_DEPTH_ELEMENTS 64
447 #define IGU_FIFO_DEPTH_DWORDS \
448 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
450 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
451 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
452 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
453 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
454 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
456 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
458 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
460 #define MAX_SW_PLTAFORM_STR_SIZE 64
462 #define EMPTY_FW_VERSION_STR "???_???_???_???"
463 #define EMPTY_FW_IMAGE_STR "???????????????"
465 /***************************** Constant Arrays *******************************/
467 /* Chip constant definitions array */
468 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
469 {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
470 {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
473 /* Storm constant definitions array */
474 static struct storm_defs s_storm_defs[] = {
477 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
479 TSEM_REG_FAST_MEMORY,
480 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
481 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
482 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
483 TCM_REG_CTX_RBC_ACCS,
484 {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
485 TCM_REG_SM_TASK_CTX},
486 {{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
491 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
493 MSEM_REG_FAST_MEMORY,
494 MSEM_REG_DBG_FRAME_MODE_BB_K2,
495 MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
496 MSEM_REG_SLOW_DBG_MODE_BB_K2,
497 MSEM_REG_DBG_MODE1_CFG_BB_K2,
498 MSEM_REG_SYNC_DBG_EMPTY,
499 MSEM_REG_DBG_GPRE_VECT,
500 MCM_REG_CTX_RBC_ACCS,
501 {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
502 MCM_REG_SM_TASK_CTX },
503 {{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
508 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
510 USEM_REG_FAST_MEMORY,
511 USEM_REG_DBG_FRAME_MODE_BB_K2,
512 USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
513 USEM_REG_SLOW_DBG_MODE_BB_K2,
514 USEM_REG_DBG_MODE1_CFG_BB_K2,
515 USEM_REG_SYNC_DBG_EMPTY,
516 USEM_REG_DBG_GPRE_VECT,
517 UCM_REG_CTX_RBC_ACCS,
518 {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
519 UCM_REG_SM_TASK_CTX},
520 {{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
525 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
527 XSEM_REG_FAST_MEMORY,
528 XSEM_REG_DBG_FRAME_MODE_BB_K2,
529 XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
530 XSEM_REG_SLOW_DBG_MODE_BB_K2,
531 XSEM_REG_DBG_MODE1_CFG_BB_K2,
532 XSEM_REG_SYNC_DBG_EMPTY,
533 XSEM_REG_DBG_GPRE_VECT,
534 XCM_REG_CTX_RBC_ACCS,
535 {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
536 {{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */
541 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
543 YSEM_REG_FAST_MEMORY,
544 YSEM_REG_DBG_FRAME_MODE_BB_K2,
545 YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
546 YSEM_REG_SLOW_DBG_MODE_BB_K2,
547 YSEM_REG_DBG_MODE1_CFG_BB_K2,
548 YSEM_REG_SYNC_DBG_EMPTY,
549 YSEM_REG_DBG_GPRE_VECT,
550 YCM_REG_CTX_RBC_ACCS,
551 {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
552 YCM_REG_SM_TASK_CTX},
553 {{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
558 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
560 PSEM_REG_FAST_MEMORY,
561 PSEM_REG_DBG_FRAME_MODE_BB_K2,
562 PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
563 PSEM_REG_SLOW_DBG_MODE_BB_K2,
564 PSEM_REG_DBG_MODE1_CFG_BB_K2,
565 PSEM_REG_SYNC_DBG_EMPTY,
566 PSEM_REG_DBG_GPRE_VECT,
567 PCM_REG_CTX_RBC_ACCS,
568 {0, PCM_REG_SM_CON_CTX, 0, 0},
569 {{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
573 static struct hw_type_defs s_hw_type_defs[] = {
575 {"asic", 1, 256, 32768},
576 {"reserved", 0, 0, 0},
577 {"reserved2", 0, 0, 0},
578 {"reserved3", 0, 0, 0}
581 static struct grc_param_defs s_grc_param_defs[] = {
582 /* DBG_GRC_PARAM_DUMP_TSTORM */
583 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
585 /* DBG_GRC_PARAM_DUMP_MSTORM */
586 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
588 /* DBG_GRC_PARAM_DUMP_USTORM */
589 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
591 /* DBG_GRC_PARAM_DUMP_XSTORM */
592 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
594 /* DBG_GRC_PARAM_DUMP_YSTORM */
595 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
597 /* DBG_GRC_PARAM_DUMP_PSTORM */
598 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
600 /* DBG_GRC_PARAM_DUMP_REGS */
601 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
603 /* DBG_GRC_PARAM_DUMP_RAM */
604 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
606 /* DBG_GRC_PARAM_DUMP_PBUF */
607 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
609 /* DBG_GRC_PARAM_DUMP_IOR */
610 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
612 /* DBG_GRC_PARAM_DUMP_VFC */
613 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
615 /* DBG_GRC_PARAM_DUMP_CM_CTX */
616 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
618 /* DBG_GRC_PARAM_DUMP_ILT */
619 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
621 /* DBG_GRC_PARAM_DUMP_RSS */
622 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
624 /* DBG_GRC_PARAM_DUMP_CAU */
625 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
627 /* DBG_GRC_PARAM_DUMP_QM */
628 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
630 /* DBG_GRC_PARAM_DUMP_MCP */
631 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
633 /* DBG_GRC_PARAM_DUMP_DORQ */
634 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
636 /* DBG_GRC_PARAM_DUMP_CFC */
637 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
639 /* DBG_GRC_PARAM_DUMP_IGU */
640 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
642 /* DBG_GRC_PARAM_DUMP_BRB */
643 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
645 /* DBG_GRC_PARAM_DUMP_BTB */
646 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
648 /* DBG_GRC_PARAM_DUMP_BMB */
649 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
651 /* DBG_GRC_PARAM_RESERVED1 */
652 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
654 /* DBG_GRC_PARAM_DUMP_MULD */
655 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
657 /* DBG_GRC_PARAM_DUMP_PRS */
658 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
660 /* DBG_GRC_PARAM_DUMP_DMAE */
661 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
663 /* DBG_GRC_PARAM_DUMP_TM */
664 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
666 /* DBG_GRC_PARAM_DUMP_SDM */
667 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
669 /* DBG_GRC_PARAM_DUMP_DIF */
670 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
672 /* DBG_GRC_PARAM_DUMP_STATIC */
673 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
675 /* DBG_GRC_PARAM_UNSTALL */
676 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
678 /* DBG_GRC_PARAM_RESERVED2 */
679 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
681 /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
682 {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
684 /* DBG_GRC_PARAM_EXCLUDE_ALL */
685 {{0, 0}, 0, 1, true, false, 0, {0, 0}},
687 /* DBG_GRC_PARAM_CRASH */
688 {{0, 0}, 0, 1, true, false, 0, {0, 0}},
690 /* DBG_GRC_PARAM_PARITY_SAFE */
691 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
693 /* DBG_GRC_PARAM_DUMP_CM */
694 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
696 /* DBG_GRC_PARAM_DUMP_PHY */
697 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
699 /* DBG_GRC_PARAM_NO_MCP */
700 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
702 /* DBG_GRC_PARAM_NO_FW_VER */
703 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
705 /* DBG_GRC_PARAM_RESERVED3 */
706 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
708 /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
709 {{0, 1}, 0, 1, false, false, 0, {0, 1}},
711 /* DBG_GRC_PARAM_DUMP_ILT_CDUC */
712 {{1, 1}, 0, 1, false, false, 0, {0, 0}},
714 /* DBG_GRC_PARAM_DUMP_ILT_CDUT */
715 {{1, 1}, 0, 1, false, false, 0, {0, 0}},
717 /* DBG_GRC_PARAM_DUMP_CAU_EXT */
718 {{0, 0}, 0, 1, false, false, 0, {1, 1}}
721 static struct rss_mem_defs s_rss_mem_defs[] = {
722 {"rss_mem_cid", "rss_cid", 0, 32,
725 {"rss_mem_key_msb", "rss_key", 1024, 256,
728 {"rss_mem_key_lsb", "rss_key", 2048, 64,
731 {"rss_mem_info", "rss_info", 3072, 16,
734 {"rss_mem_ind", "rss_ind", 4096, 16,
738 static struct vfc_ram_defs s_vfc_ram_defs[] = {
739 {"vfc_ram_tt1", "vfc_ram", 0, 512},
740 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
741 {"vfc_ram_stt2", "vfc_ram", 640, 32},
742 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
745 static struct big_ram_defs s_big_ram_defs[] = {
746 {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
747 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
748 MISC_REG_BLOCK_256B_EN, {0, 0},
751 {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
752 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
753 MISC_REG_BLOCK_256B_EN, {0, 1},
756 {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
757 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
758 MISCS_REG_BLOCK_256B_EN, {0, 0},
762 static struct rbc_reset_defs s_rbc_reset_defs[] = {
763 {MISCS_REG_RESET_PL_HV,
765 {MISC_REG_RESET_PL_PDA_VMAIN_1,
766 {0x4404040, 0x4404040}},
767 {MISC_REG_RESET_PL_PDA_VMAIN_2,
769 {MISC_REG_RESET_PL_PDA_VAUX,
773 static struct phy_defs s_phy_defs[] = {
774 {"nw_phy", NWS_REG_NWS_CMU_K2,
775 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
776 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
777 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
778 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
779 {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
780 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
781 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
782 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
783 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
784 {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
785 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
786 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
787 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
788 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
789 {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
790 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
791 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
792 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
793 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
796 static struct split_type_defs s_split_type_defs[] = {
797 /* SPLIT_TYPE_NONE */
800 /* SPLIT_TYPE_PORT */
806 /* SPLIT_TYPE_PORT_PF */
813 /**************************** Private Functions ******************************/
815 /* Reads and returns a single dword from the specified unaligned buffer */
816 static u32 qed_read_unaligned_dword(u8 *buf)
820 memcpy((u8 *)&dword, buf, sizeof(dword));
824 /* Sets the value of the specified GRC param */
825 static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
826 enum dbg_grc_params grc_param, u32 val)
828 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
830 dev_data->grc.param_val[grc_param] = val;
833 /* Returns the value of the specified GRC param */
834 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
835 enum dbg_grc_params grc_param)
837 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
839 return dev_data->grc.param_val[grc_param];
842 /* Initializes the GRC parameters */
843 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
845 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
847 if (!dev_data->grc.params_initialized) {
848 qed_dbg_grc_set_params_default(p_hwfn);
849 dev_data->grc.params_initialized = 1;
853 /* Sets pointer and size for the specified binary buffer type */
854 static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
855 enum bin_dbg_buffer_type buf_type,
856 const u32 *ptr, u32 size)
858 struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
860 buf->ptr = (void *)ptr;
864 /* Initializes debug data for the specified device */
865 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
867 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
868 u8 num_pfs = 0, max_pfs_per_port = 0;
870 if (dev_data->initialized)
871 return DBG_STATUS_OK;
874 if (QED_IS_K2(p_hwfn->cdev)) {
875 dev_data->chip_id = CHIP_K2;
876 dev_data->mode_enable[MODE_K2] = 1;
877 dev_data->num_vfs = MAX_NUM_VFS_K2;
878 num_pfs = MAX_NUM_PFS_K2;
879 max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
880 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
881 dev_data->chip_id = CHIP_BB;
882 dev_data->mode_enable[MODE_BB] = 1;
883 dev_data->num_vfs = MAX_NUM_VFS_BB;
884 num_pfs = MAX_NUM_PFS_BB;
885 max_pfs_per_port = MAX_NUM_PFS_BB;
887 return DBG_STATUS_UNKNOWN_CHIP;
891 dev_data->hw_type = HW_TYPE_ASIC;
892 dev_data->mode_enable[MODE_ASIC] = 1;
895 switch (p_hwfn->cdev->num_ports_in_engine) {
897 dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
900 dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
903 dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
908 if (QED_IS_CMT(p_hwfn->cdev))
909 dev_data->mode_enable[MODE_100G] = 1;
911 /* Set number of ports */
912 if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
913 dev_data->mode_enable[MODE_100G])
914 dev_data->num_ports = 1;
915 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
916 dev_data->num_ports = 2;
917 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
918 dev_data->num_ports = 4;
920 /* Set number of PFs per port */
921 dev_data->num_pfs_per_port = min_t(u32,
922 num_pfs / dev_data->num_ports,
925 /* Initializes the GRC parameters */
926 qed_dbg_grc_init_params(p_hwfn);
928 dev_data->use_dmae = true;
929 dev_data->initialized = 1;
931 return DBG_STATUS_OK;
934 static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
935 enum block_id block_id)
937 const struct dbg_block *dbg_block;
939 dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
940 return dbg_block + block_id;
943 static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
948 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
950 return (const struct dbg_block_chip *)
951 p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
952 block_id * MAX_CHIP_IDS + dev_data->chip_id;
955 static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
959 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
961 return (const struct dbg_reset_reg *)
962 p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
963 reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
966 /* Reads the FW info structure for the specified Storm from the chip,
967 * and writes it to the specified fw_info pointer.
969 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
970 struct qed_ptt *p_ptt,
971 u8 storm_id, struct fw_info *fw_info)
973 struct storm_defs *storm = &s_storm_defs[storm_id];
974 struct fw_info_location fw_info_location;
975 u32 addr, i, size, *dest;
977 memset(&fw_info_location, 0, sizeof(fw_info_location));
978 memset(fw_info, 0, sizeof(*fw_info));
980 /* Read first the address that points to fw_info location.
981 * The address is located in the last line of the Storm RAM.
983 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
984 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
985 sizeof(fw_info_location);
987 dest = (u32 *)&fw_info_location;
988 size = BYTES_TO_DWORDS(sizeof(fw_info_location));
990 for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
991 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
993 /* qed_rq() fetches data in CPU byteorder. Swap it back to
994 * the device's to get right structure layout.
996 cpu_to_le32_array(dest, size);
998 /* Read FW version info from Storm RAM */
999 size = le32_to_cpu(fw_info_location.size);
1000 if (!size || size > sizeof(*fw_info))
1003 addr = le32_to_cpu(fw_info_location.grc_addr);
1004 dest = (u32 *)fw_info;
1005 size = BYTES_TO_DWORDS(size);
1007 for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
1008 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1010 cpu_to_le32_array(dest, size);
1013 /* Dumps the specified string to the specified buffer.
1014 * Returns the dumped size in bytes.
1016 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1019 strcpy(dump_buf, str);
1021 return (u32)strlen(str) + 1;
1024 /* Dumps zeros to align the specified buffer to dwords.
1025 * Returns the dumped size in bytes.
1027 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1029 u8 offset_in_dword, align_size;
1031 offset_in_dword = (u8)(byte_offset & 0x3);
1032 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1034 if (dump && align_size)
1035 memset(dump_buf, 0, align_size);
1040 /* Writes the specified string param to the specified buffer.
1041 * Returns the dumped size in dwords.
1043 static u32 qed_dump_str_param(u32 *dump_buf,
1045 const char *param_name, const char *param_val)
1047 char *char_buf = (char *)dump_buf;
1050 /* Dump param name */
1051 offset += qed_dump_str(char_buf + offset, dump, param_name);
1053 /* Indicate a string param value */
1055 *(char_buf + offset) = 1;
1058 /* Dump param value */
1059 offset += qed_dump_str(char_buf + offset, dump, param_val);
1061 /* Align buffer to next dword */
1062 offset += qed_dump_align(char_buf + offset, dump, offset);
1064 return BYTES_TO_DWORDS(offset);
1067 /* Writes the specified numeric param to the specified buffer.
1068 * Returns the dumped size in dwords.
1070 static u32 qed_dump_num_param(u32 *dump_buf,
1071 bool dump, const char *param_name, u32 param_val)
1073 char *char_buf = (char *)dump_buf;
1076 /* Dump param name */
1077 offset += qed_dump_str(char_buf + offset, dump, param_name);
1079 /* Indicate a numeric param value */
1081 *(char_buf + offset) = 0;
1084 /* Align buffer to next dword */
1085 offset += qed_dump_align(char_buf + offset, dump, offset);
1087 /* Dump param value (and change offset from bytes to dwords) */
1088 offset = BYTES_TO_DWORDS(offset);
1090 *(dump_buf + offset) = param_val;
1096 /* Reads the FW version and writes it as a param to the specified buffer.
1097 * Returns the dumped size in dwords.
1099 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1100 struct qed_ptt *p_ptt,
1101 u32 *dump_buf, bool dump)
1103 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1104 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1105 struct fw_info fw_info = { {0}, {0} };
1108 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1109 /* Read FW info from chip */
1110 qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1112 /* Create FW version/image strings */
1113 if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1114 "%d_%d_%d_%d", fw_info.ver.num.major,
1115 fw_info.ver.num.minor, fw_info.ver.num.rev,
1116 fw_info.ver.num.eng) < 0)
1118 "Unexpected debug error: invalid FW version string\n");
1119 switch (fw_info.ver.image_id) {
1121 strcpy(fw_img_str, "main");
1124 strcpy(fw_img_str, "unknown");
1129 /* Dump FW version, image and timestamp */
1130 offset += qed_dump_str_param(dump_buf + offset,
1131 dump, "fw-version", fw_ver_str);
1132 offset += qed_dump_str_param(dump_buf + offset,
1133 dump, "fw-image", fw_img_str);
1134 offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
1135 le32_to_cpu(fw_info.ver.timestamp));
1140 /* Reads the MFW version and writes it as a param to the specified buffer.
1141 * Returns the dumped size in dwords.
1143 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1144 struct qed_ptt *p_ptt,
1145 u32 *dump_buf, bool dump)
1147 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1150 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1151 u32 global_section_offsize, global_section_addr, mfw_ver;
1152 u32 public_data_addr, global_section_offsize_addr;
1154 /* Find MCP public data GRC address. Needs to be ORed with
1155 * MCP_REG_SCRATCH due to a HW bug.
1157 public_data_addr = qed_rd(p_hwfn,
1159 MISC_REG_SHARED_MEM_ADDR) |
1162 /* Find MCP public global section offset */
1163 global_section_offsize_addr = public_data_addr +
1164 offsetof(struct mcp_public_data,
1166 sizeof(offsize_t) * PUBLIC_GLOBAL;
1167 global_section_offsize = qed_rd(p_hwfn, p_ptt,
1168 global_section_offsize_addr);
1169 global_section_addr =
1171 (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1173 /* Read MFW version from MCP public global section */
1174 mfw_ver = qed_rd(p_hwfn, p_ptt,
1175 global_section_addr +
1176 offsetof(struct public_global, mfw_ver));
1178 /* Dump MFW version param */
1179 if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1180 (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1181 (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1183 "Unexpected debug error: invalid MFW version string\n");
1186 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1189 /* Reads the chip revision from the chip and writes it as a param to the
1190 * specified buffer. Returns the dumped size in dwords.
1192 static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
1193 struct qed_ptt *p_ptt,
1194 u32 *dump_buf, bool dump)
1196 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1197 char param_str[3] = "??";
1199 if (dev_data->hw_type == HW_TYPE_ASIC) {
1200 u32 chip_rev, chip_metal;
1202 chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1203 chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1205 param_str[0] = 'a' + (u8)chip_rev;
1206 param_str[1] = '0' + (u8)chip_metal;
1209 return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1212 /* Writes a section header to the specified buffer.
1213 * Returns the dumped size in dwords.
1215 static u32 qed_dump_section_hdr(u32 *dump_buf,
1216 bool dump, const char *name, u32 num_params)
1218 return qed_dump_num_param(dump_buf, dump, name, num_params);
1221 /* Writes the common global params to the specified buffer.
1222 * Returns the dumped size in dwords.
1224 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1225 struct qed_ptt *p_ptt,
1228 u8 num_specific_global_params)
1230 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1234 /* Dump global params section header */
1235 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1236 (dev_data->chip_id == CHIP_BB ? 1 : 0);
1237 offset += qed_dump_section_hdr(dump_buf + offset,
1238 dump, "global_params", num_params);
1241 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1242 offset += qed_dump_mfw_ver_param(p_hwfn,
1243 p_ptt, dump_buf + offset, dump);
1244 offset += qed_dump_chip_revision_param(p_hwfn,
1245 p_ptt, dump_buf + offset, dump);
1246 offset += qed_dump_num_param(dump_buf + offset,
1247 dump, "tools-version", TOOLS_VERSION);
1248 offset += qed_dump_str_param(dump_buf + offset,
1251 s_chip_defs[dev_data->chip_id].name);
1252 offset += qed_dump_str_param(dump_buf + offset,
1255 s_hw_type_defs[dev_data->hw_type].name);
1256 offset += qed_dump_num_param(dump_buf + offset,
1257 dump, "pci-func", p_hwfn->abs_pf_id);
1258 if (dev_data->chip_id == CHIP_BB)
1259 offset += qed_dump_num_param(dump_buf + offset,
1260 dump, "path", QED_PATH_ID(p_hwfn));
1265 /* Writes the "last" section (including CRC) to the specified buffer at the
1266 * given offset. Returns the dumped size in dwords.
1268 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1270 u32 start_offset = offset;
1272 /* Dump CRC section header */
1273 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1275 /* Calculate CRC32 and add it to the dword after the "last" section */
1277 *(dump_buf + offset) = ~crc32(0xffffffff,
1279 DWORDS_TO_BYTES(offset));
1283 return offset - start_offset;
1286 /* Update blocks reset state */
1287 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1288 struct qed_ptt *p_ptt)
1290 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1291 u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1295 /* Read reset registers */
1296 for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1297 const struct dbg_reset_reg *rst_reg;
1298 bool rst_reg_removed;
1301 rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1302 rst_reg_removed = GET_FIELD(rst_reg->data,
1303 DBG_RESET_REG_IS_REMOVED);
1304 rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1305 DBG_RESET_REG_ADDR));
1307 if (!rst_reg_removed)
1308 reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
1312 /* Check if blocks are in reset */
1313 for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1314 const struct dbg_block_chip *blk;
1318 blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1319 is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1320 has_rst_reg = GET_FIELD(blk->flags,
1321 DBG_BLOCK_CHIP_HAS_RESET_REG);
1323 if (!is_removed && has_rst_reg)
1324 dev_data->block_in_reset[blk_id] =
1325 !(reg_val[blk->reset_reg_id] &
1326 BIT(blk->reset_reg_bit_offset));
1330 /* is_mode_match recursive function */
1331 static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
1332 u16 *modes_buf_offset, u8 rec_depth)
1334 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1339 if (rec_depth > MAX_RECURSION_DEPTH) {
1341 "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1345 /* Get next element from modes tree buffer */
1346 dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1347 tree_val = dbg_array[(*modes_buf_offset)++];
1350 case INIT_MODE_OP_NOT:
1351 return !qed_is_mode_match_rec(p_hwfn,
1352 modes_buf_offset, rec_depth + 1);
1353 case INIT_MODE_OP_OR:
1354 case INIT_MODE_OP_AND:
1355 arg1 = qed_is_mode_match_rec(p_hwfn,
1356 modes_buf_offset, rec_depth + 1);
1357 arg2 = qed_is_mode_match_rec(p_hwfn,
1358 modes_buf_offset, rec_depth + 1);
1359 return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1360 arg2) : (arg1 && arg2);
1362 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1366 /* Returns true if the mode (specified using modes_buf_offset) is enabled */
1367 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1369 return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1372 /* Enable / disable the Debug block */
1373 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1374 struct qed_ptt *p_ptt, bool enable)
1376 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1379 /* Resets the Debug block */
1380 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1381 struct qed_ptt *p_ptt)
1383 u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1384 const struct dbg_reset_reg *reset_reg;
1385 const struct dbg_block_chip *block;
1387 block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1388 reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1390 DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1392 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
1394 old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
1396 qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1397 qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
1400 /* Enable / disable Debug Bus clients according to the specified mask
1401 * (1 = enable, 0 = disable).
1403 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1404 struct qed_ptt *p_ptt, u32 client_mask)
1406 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1409 static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
1410 struct qed_ptt *p_ptt,
1411 enum block_id block_id,
1415 u8 force_valid_mask, u8 force_frame_mask)
1417 const struct dbg_block_chip *block =
1418 qed_get_dbg_block_per_chip(p_hwfn, block_id);
1420 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1422 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1424 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1426 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1428 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1432 /* Disable debug bus in all blocks */
1433 static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
1434 struct qed_ptt *p_ptt)
1436 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1439 /* Disable all blocks */
1440 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1441 const struct dbg_block_chip *block_per_chip =
1442 qed_get_dbg_block_per_chip(p_hwfn,
1443 (enum block_id)block_id);
1445 if (GET_FIELD(block_per_chip->flags,
1446 DBG_BLOCK_CHIP_IS_REMOVED) ||
1447 dev_data->block_in_reset[block_id])
1450 /* Disable debug bus */
1451 if (GET_FIELD(block_per_chip->flags,
1452 DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1454 block_per_chip->dbg_dword_enable_reg_addr;
1455 u16 modes_buf_offset =
1456 GET_FIELD(block_per_chip->dbg_bus_mode.data,
1457 DBG_MODE_HDR_MODES_BUF_OFFSET);
1459 GET_FIELD(block_per_chip->dbg_bus_mode.data,
1460 DBG_MODE_HDR_EVAL_MODE) > 0;
1463 qed_is_mode_match(p_hwfn, &modes_buf_offset))
1464 qed_wr(p_hwfn, p_ptt,
1465 DWORDS_TO_BYTES(dbg_en_addr),
1471 /* Returns true if the specified entity (indicated by GRC param) should be
1472 * included in the dump, false otherwise.
1474 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1475 enum dbg_grc_params grc_param)
1477 return qed_grc_get_param(p_hwfn, grc_param) > 0;
1480 /* Returns the storm_id that matches the specified Storm letter,
1481 * or MAX_DBG_STORMS if invalid storm letter.
1483 static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1487 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1488 if (s_storm_defs[storm_id].letter == storm_letter)
1489 return (enum dbg_storms)storm_id;
1491 return MAX_DBG_STORMS;
1494 /* Returns true of the specified Storm should be included in the dump, false
1497 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1498 enum dbg_storms storm)
1500 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1503 /* Returns true if the specified memory should be included in the dump, false
1506 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1507 enum block_id block_id, u8 mem_group_id)
1509 const struct dbg_block *block;
1512 block = get_dbg_block(p_hwfn, block_id);
1514 /* If the block is associated with a Storm, check Storm match */
1515 if (block->associated_storm_letter) {
1516 enum dbg_storms associated_storm_id =
1517 qed_get_id_from_letter(block->associated_storm_letter);
1519 if (associated_storm_id == MAX_DBG_STORMS ||
1520 !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1524 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1525 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1527 if (mem_group_id == big_ram->mem_group_id ||
1528 mem_group_id == big_ram->ram_mem_group_id)
1529 return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1532 switch (mem_group_id) {
1533 case MEM_GROUP_PXP_ILT:
1534 case MEM_GROUP_PXP_MEM:
1535 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1537 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1538 case MEM_GROUP_PBUF:
1539 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1540 case MEM_GROUP_CAU_MEM:
1541 case MEM_GROUP_CAU_SB:
1542 case MEM_GROUP_CAU_PI:
1543 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1544 case MEM_GROUP_CAU_MEM_EXT:
1545 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1546 case MEM_GROUP_QM_MEM:
1547 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1548 case MEM_GROUP_CFC_MEM:
1549 case MEM_GROUP_CONN_CFC_MEM:
1550 case MEM_GROUP_TASK_CFC_MEM:
1551 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1552 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1553 case MEM_GROUP_DORQ_MEM:
1554 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1555 case MEM_GROUP_IGU_MEM:
1556 case MEM_GROUP_IGU_MSIX:
1557 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1558 case MEM_GROUP_MULD_MEM:
1559 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1560 case MEM_GROUP_PRS_MEM:
1561 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1562 case MEM_GROUP_DMAE_MEM:
1563 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1564 case MEM_GROUP_TM_MEM:
1565 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1566 case MEM_GROUP_SDM_MEM:
1567 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1568 case MEM_GROUP_TDIF_CTX:
1569 case MEM_GROUP_RDIF_CTX:
1570 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1571 case MEM_GROUP_CM_MEM:
1572 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1574 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1580 /* Stalls all Storms */
1581 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1582 struct qed_ptt *p_ptt, bool stall)
1587 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1588 if (!qed_grc_is_storm_included(p_hwfn,
1589 (enum dbg_storms)storm_id))
1592 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1593 SEM_FAST_REG_STALL_0_BB_K2;
1594 qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1597 msleep(STALL_DELAY_MS);
1600 /* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
1601 * taken out of reset.
1603 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1604 struct qed_ptt *p_ptt, bool rbc_only)
1606 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1607 u8 chip_id = dev_data->chip_id;
1610 /* Take RBCs out of reset */
1611 for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
1612 if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1615 s_rbc_reset_defs[i].reset_reg_addr +
1616 RESET_REG_UNRESET_OFFSET,
1617 s_rbc_reset_defs[i].reset_val[chip_id]);
1620 u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1624 /* Fill reset regs values */
1625 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1626 bool is_removed, has_reset_reg, unreset_before_dump;
1627 const struct dbg_block_chip *block;
1629 block = qed_get_dbg_block_per_chip(p_hwfn,
1633 GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1635 GET_FIELD(block->flags,
1636 DBG_BLOCK_CHIP_HAS_RESET_REG);
1637 unreset_before_dump =
1638 GET_FIELD(block->flags,
1639 DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1641 if (!is_removed && has_reset_reg && unreset_before_dump)
1642 reg_val[block->reset_reg_id] |=
1643 BIT(block->reset_reg_bit_offset);
1646 /* Write reset registers */
1647 for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1649 const struct dbg_reset_reg *reset_reg;
1652 reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
1655 (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1658 if (reg_val[reset_reg_id]) {
1660 GET_FIELD(reset_reg->data,
1661 DBG_RESET_REG_ADDR);
1664 DWORDS_TO_BYTES(reset_reg_addr) +
1665 RESET_REG_UNRESET_OFFSET,
1666 reg_val[reset_reg_id]);
1672 /* Returns the attention block data of the specified block */
1673 static const struct dbg_attn_block_type_data *
1674 qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
1675 enum block_id block_id, enum dbg_attn_type attn_type)
1677 const struct dbg_attn_block *base_attn_block_arr =
1678 (const struct dbg_attn_block *)
1679 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1681 return &base_attn_block_arr[block_id].per_type_data[attn_type];
1684 /* Returns the attention registers of the specified block */
1685 static const struct dbg_attn_reg *
1686 qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
1687 enum block_id block_id, enum dbg_attn_type attn_type,
1690 const struct dbg_attn_block_type_data *block_type_data =
1691 qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1693 *num_attn_regs = block_type_data->num_regs;
1695 return (const struct dbg_attn_reg *)
1696 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1697 block_type_data->regs_offset;
1700 /* For each block, clear the status of all parities */
1701 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
1702 struct qed_ptt *p_ptt)
1704 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1705 const struct dbg_attn_reg *attn_reg_arr;
1706 u8 reg_idx, num_attn_regs;
1709 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1710 if (dev_data->block_in_reset[block_id])
1713 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1714 (enum block_id)block_id,
1718 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1719 const struct dbg_attn_reg *reg_data =
1720 &attn_reg_arr[reg_idx];
1721 u16 modes_buf_offset;
1725 eval_mode = GET_FIELD(reg_data->mode.data,
1726 DBG_MODE_HDR_EVAL_MODE) > 0;
1728 GET_FIELD(reg_data->mode.data,
1729 DBG_MODE_HDR_MODES_BUF_OFFSET);
1731 /* If Mode match: clear parity status */
1733 qed_is_mode_match(p_hwfn, &modes_buf_offset))
1734 qed_rd(p_hwfn, p_ptt,
1735 DWORDS_TO_BYTES(reg_data->
1741 /* Dumps GRC registers section header. Returns the dumped size in dwords.
1742 * the following parameters are dumped:
1743 * - count: no. of dumped entries
1744 * - split_type: split type
1745 * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
1746 * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
1748 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1750 u32 num_reg_entries,
1751 enum init_split_types split_type,
1752 u8 split_id, const char *reg_type_name)
1755 (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1758 offset += qed_dump_section_hdr(dump_buf + offset,
1759 dump, "grc_regs", num_params);
1760 offset += qed_dump_num_param(dump_buf + offset,
1761 dump, "count", num_reg_entries);
1762 offset += qed_dump_str_param(dump_buf + offset,
1764 s_split_type_defs[split_type].name);
1765 if (split_type != SPLIT_TYPE_NONE)
1766 offset += qed_dump_num_param(dump_buf + offset,
1767 dump, "id", split_id);
1769 offset += qed_dump_str_param(dump_buf + offset,
1770 dump, "type", reg_type_name);
1775 /* Reads the specified registers into the specified buffer.
1776 * The addr and len arguments are specified in dwords.
1778 void qed_read_regs(struct qed_hwfn *p_hwfn,
1779 struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1783 for (i = 0; i < len; i++)
1784 buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1787 /* Dumps the GRC registers in the specified address range.
1788 * Returns the dumped size in dwords.
1789 * The addr and len arguments are specified in dwords.
1791 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
1792 struct qed_ptt *p_ptt,
1794 bool dump, u32 addr, u32 len, bool wide_bus,
1795 enum init_split_types split_type,
1798 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1799 u8 port_id = 0, pf_id = 0, vf_id = 0;
1800 bool read_using_dmae = false;
1807 switch (split_type) {
1808 case SPLIT_TYPE_PORT:
1814 case SPLIT_TYPE_PORT_PF:
1815 port_id = split_id / dev_data->num_pfs_per_port;
1816 pf_id = port_id + dev_data->num_ports *
1817 (split_id % dev_data->num_pfs_per_port);
1826 /* Try reading using DMAE */
1827 if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
1828 (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
1829 (PROTECT_WIDE_BUS && wide_bus))) {
1830 struct qed_dmae_params dmae_params;
1832 /* Set DMAE params */
1833 memset(&dmae_params, 0, sizeof(dmae_params));
1834 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
1835 switch (split_type) {
1836 case SPLIT_TYPE_PORT:
1837 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1839 dmae_params.port_id = port_id;
1842 SET_FIELD(dmae_params.flags,
1843 QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1844 dmae_params.src_pfid = pf_id;
1846 case SPLIT_TYPE_PORT_PF:
1847 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1849 SET_FIELD(dmae_params.flags,
1850 QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1851 dmae_params.port_id = port_id;
1852 dmae_params.src_pfid = pf_id;
1858 /* Execute DMAE command */
1859 read_using_dmae = !qed_dmae_grc2host(p_hwfn,
1861 DWORDS_TO_BYTES(addr),
1862 (u64)(uintptr_t)(dump_buf),
1864 if (!read_using_dmae) {
1865 dev_data->use_dmae = 0;
1868 "Failed reading from chip using DMAE, using GRC instead\n");
1872 if (read_using_dmae)
1875 /* If not read using DMAE, read using GRC */
1878 if (split_type != dev_data->pretend.split_type ||
1879 split_id != dev_data->pretend.split_id) {
1880 switch (split_type) {
1881 case SPLIT_TYPE_PORT:
1882 qed_port_pretend(p_hwfn, p_ptt, port_id);
1885 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1887 qed_fid_pretend(p_hwfn, p_ptt, fid);
1889 case SPLIT_TYPE_PORT_PF:
1890 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1892 qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
1895 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
1896 | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
1898 qed_fid_pretend(p_hwfn, p_ptt, fid);
1904 dev_data->pretend.split_type = (u8)split_type;
1905 dev_data->pretend.split_id = split_id;
1908 /* Read registers using GRC */
1909 qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
1913 dev_data->num_regs_read += len;
1914 thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
1915 if ((dev_data->num_regs_read / thresh) >
1916 ((dev_data->num_regs_read - len) / thresh))
1919 "Dumped %d registers...\n", dev_data->num_regs_read);
1924 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
1925 * The addr and len arguments are specified in dwords.
1927 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
1928 bool dump, u32 addr, u32 len)
1931 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
1936 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
1937 * The addr and len arguments are specified in dwords.
1939 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
1940 struct qed_ptt *p_ptt,
1942 bool dump, u32 addr, u32 len, bool wide_bus,
1943 enum init_split_types split_type, u8 split_id)
1947 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
1948 offset += qed_grc_dump_addr_range(p_hwfn,
1951 dump, addr, len, wide_bus,
1952 split_type, split_id);
1957 /* Dumps GRC registers sequence with skip cycle.
1958 * Returns the dumped size in dwords.
1959 * - addr: start GRC address in dwords
1960 * - total_len: total no. of dwords to dump
1961 * - read_len: no. consecutive dwords to read
1962 * - skip_len: no. of dwords to skip (and fill with zeros)
1964 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
1965 struct qed_ptt *p_ptt,
1970 u32 read_len, u32 skip_len)
1972 u32 offset = 0, reg_offset = 0;
1974 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
1977 return offset + total_len;
1979 while (reg_offset < total_len) {
1980 u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
1982 offset += qed_grc_dump_addr_range(p_hwfn,
1985 dump, addr, curr_len, false,
1986 SPLIT_TYPE_NONE, 0);
1987 reg_offset += curr_len;
1990 if (reg_offset < total_len) {
1991 curr_len = min_t(u32, skip_len, total_len - skip_len);
1992 memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
1994 reg_offset += curr_len;
2002 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2003 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2004 struct qed_ptt *p_ptt,
2005 struct virt_mem_desc input_regs_arr,
2008 enum init_split_types split_type,
2010 bool block_enable[MAX_BLOCK_ID],
2011 u32 *num_dumped_reg_entries)
2013 u32 i, offset = 0, input_offset = 0;
2014 bool mode_match = true;
2016 *num_dumped_reg_entries = 0;
2018 while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2019 const struct dbg_dump_cond_hdr *cond_hdr =
2020 (const struct dbg_dump_cond_hdr *)
2021 input_regs_arr.ptr + input_offset++;
2022 u16 modes_buf_offset;
2025 /* Check mode/block */
2026 eval_mode = GET_FIELD(cond_hdr->mode.data,
2027 DBG_MODE_HDR_EVAL_MODE) > 0;
2030 GET_FIELD(cond_hdr->mode.data,
2031 DBG_MODE_HDR_MODES_BUF_OFFSET);
2032 mode_match = qed_is_mode_match(p_hwfn,
2036 if (!mode_match || !block_enable[cond_hdr->block_id]) {
2037 input_offset += cond_hdr->data_size;
2041 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2042 const struct dbg_dump_reg *reg =
2043 (const struct dbg_dump_reg *)
2044 input_regs_arr.ptr + input_offset;
2048 addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2049 len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2050 wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2051 offset += qed_grc_dump_reg_entry(p_hwfn,
2058 split_type, split_id);
2059 (*num_dumped_reg_entries)++;
2066 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2067 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2068 struct qed_ptt *p_ptt,
2069 struct virt_mem_desc input_regs_arr,
2072 bool block_enable[MAX_BLOCK_ID],
2073 enum init_split_types split_type,
2074 u8 split_id, const char *reg_type_name)
2076 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2077 enum init_split_types hdr_split_type = split_type;
2078 u32 num_dumped_reg_entries, offset;
2079 u8 hdr_split_id = split_id;
2081 /* In PORT_PF split type, print a port split header */
2082 if (split_type == SPLIT_TYPE_PORT_PF) {
2083 hdr_split_type = SPLIT_TYPE_PORT;
2084 hdr_split_id = split_id / dev_data->num_pfs_per_port;
2087 /* Calculate register dump header size (and skip it for now) */
2088 offset = qed_grc_dump_regs_hdr(dump_buf,
2092 hdr_split_id, reg_type_name);
2094 /* Dump registers */
2095 offset += qed_grc_dump_regs_entries(p_hwfn,
2103 &num_dumped_reg_entries);
2105 /* Write register dump header */
2106 if (dump && num_dumped_reg_entries > 0)
2107 qed_grc_dump_regs_hdr(dump_buf,
2109 num_dumped_reg_entries,
2111 hdr_split_id, reg_type_name);
2113 return num_dumped_reg_entries > 0 ? offset : 0;
2116 /* Dumps registers according to the input registers array. Returns the dumped
2119 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2120 struct qed_ptt *p_ptt,
2123 bool block_enable[MAX_BLOCK_ID],
2124 const char *reg_type_name)
2126 struct virt_mem_desc *dbg_buf =
2127 &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2128 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2129 u32 offset = 0, input_offset = 0;
2131 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2132 const struct dbg_dump_split_hdr *split_hdr;
2133 struct virt_mem_desc curr_input_regs_arr;
2134 enum init_split_types split_type;
2135 u16 split_count = 0;
2136 u32 split_data_size;
2140 (const struct dbg_dump_split_hdr *)
2141 dbg_buf->ptr + input_offset++;
2143 GET_FIELD(split_hdr->hdr,
2144 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2145 split_data_size = GET_FIELD(split_hdr->hdr,
2146 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2147 curr_input_regs_arr.ptr =
2148 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2150 curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2152 switch (split_type) {
2153 case SPLIT_TYPE_NONE:
2156 case SPLIT_TYPE_PORT:
2157 split_count = dev_data->num_ports;
2160 case SPLIT_TYPE_PORT_PF:
2161 split_count = dev_data->num_ports *
2162 dev_data->num_pfs_per_port;
2165 split_count = dev_data->num_vfs;
2171 for (split_id = 0; split_id < split_count; split_id++)
2172 offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2173 curr_input_regs_arr,
2180 input_offset += split_data_size;
2183 /* Cancel pretends (pretend to original PF) */
2185 qed_fid_pretend(p_hwfn, p_ptt,
2186 FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2187 p_hwfn->rel_pf_id));
2188 dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2189 dev_data->pretend.split_id = 0;
2195 /* Dump reset registers. Returns the dumped size in dwords. */
2196 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2197 struct qed_ptt *p_ptt,
2198 u32 *dump_buf, bool dump)
2200 u32 offset = 0, num_regs = 0;
2203 /* Calculate header size */
2204 offset += qed_grc_dump_regs_hdr(dump_buf,
2206 0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2208 /* Write reset registers */
2209 for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2211 const struct dbg_reset_reg *reset_reg;
2214 reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2216 if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2219 reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2220 offset += qed_grc_dump_reg_entry(p_hwfn,
2225 1, false, SPLIT_TYPE_NONE, 0);
2231 qed_grc_dump_regs_hdr(dump_buf,
2232 true, num_regs, SPLIT_TYPE_NONE,
2238 /* Dump registers that are modified during GRC Dump and therefore must be
2239 * dumped first. Returns the dumped size in dwords.
2241 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2242 struct qed_ptt *p_ptt,
2243 u32 *dump_buf, bool dump)
2245 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2246 u32 block_id, offset = 0, stall_regs_offset;
2247 const struct dbg_attn_reg *attn_reg_arr;
2248 u8 storm_id, reg_idx, num_attn_regs;
2249 u32 num_reg_entries = 0;
2251 /* Write empty header for attention registers */
2252 offset += qed_grc_dump_regs_hdr(dump_buf,
2254 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2256 /* Write parity registers */
2257 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2258 if (dev_data->block_in_reset[block_id] && dump)
2261 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2262 (enum block_id)block_id,
2266 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2267 const struct dbg_attn_reg *reg_data =
2268 &attn_reg_arr[reg_idx];
2269 u16 modes_buf_offset;
2274 eval_mode = GET_FIELD(reg_data->mode.data,
2275 DBG_MODE_HDR_EVAL_MODE) > 0;
2277 GET_FIELD(reg_data->mode.data,
2278 DBG_MODE_HDR_MODES_BUF_OFFSET);
2280 !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2283 /* Mode match: read & dump registers */
2284 addr = reg_data->mask_address;
2285 offset += qed_grc_dump_reg_entry(p_hwfn,
2291 SPLIT_TYPE_NONE, 0);
2292 addr = GET_FIELD(reg_data->data,
2293 DBG_ATTN_REG_STS_ADDRESS);
2294 offset += qed_grc_dump_reg_entry(p_hwfn,
2300 SPLIT_TYPE_NONE, 0);
2301 num_reg_entries += 2;
2305 /* Overwrite header for attention registers */
2307 qed_grc_dump_regs_hdr(dump_buf,
2310 SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2312 /* Write empty header for stall registers */
2313 stall_regs_offset = offset;
2314 offset += qed_grc_dump_regs_hdr(dump_buf,
2315 false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2317 /* Write Storm stall status registers */
2318 for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2320 struct storm_defs *storm = &s_storm_defs[storm_id];
2323 if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2327 BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2328 SEM_FAST_REG_STALLED);
2329 offset += qed_grc_dump_reg_entry(p_hwfn,
2335 false, SPLIT_TYPE_NONE, 0);
2339 /* Overwrite header for stall registers */
2341 qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2344 SPLIT_TYPE_NONE, 0, "REGS");
2349 /* Dumps registers that can't be represented in the debug arrays */
2350 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2351 struct qed_ptt *p_ptt,
2352 u32 *dump_buf, bool dump)
2354 u32 offset = 0, addr;
2356 offset += qed_grc_dump_regs_hdr(dump_buf,
2357 dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
2359 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2362 addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2363 offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2368 RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2371 addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2373 qed_grc_dump_reg_entry_skip(p_hwfn,
2378 TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2385 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2386 * dwords. The following parameters are dumped:
2387 * - name: dumped only if it's not NULL.
2388 * - addr: in dwords, dumped only if name is NULL.
2389 * - len: in dwords, always dumped.
2390 * - width: dumped if it's not zero.
2391 * - packed: dumped only if it's not false.
2392 * - mem_group: always dumped.
2393 * - is_storm: true only if the memory is related to a Storm.
2394 * - storm_letter: valid only if is_storm is true.
2397 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2405 const char *mem_group, char storm_letter)
2413 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2420 /* Dump section header */
2421 offset += qed_dump_section_hdr(dump_buf + offset,
2422 dump, "grc_mem", num_params);
2427 strcpy(buf, "?STORM_");
2428 buf[0] = storm_letter;
2429 strcpy(buf + strlen(buf), name);
2434 offset += qed_dump_str_param(dump_buf + offset,
2438 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2440 offset += qed_dump_num_param(dump_buf + offset,
2441 dump, "addr", addr_in_bytes);
2445 offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2447 /* Dump bit width */
2449 offset += qed_dump_num_param(dump_buf + offset,
2450 dump, "width", bit_width);
2454 offset += qed_dump_num_param(dump_buf + offset,
2459 strcpy(buf, "?STORM_");
2460 buf[0] = storm_letter;
2461 strcpy(buf + strlen(buf), mem_group);
2463 strcpy(buf, mem_group);
2466 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2471 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2472 * Returns the dumped size in dwords.
2473 * The addr and len arguments are specified in dwords.
2475 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2476 struct qed_ptt *p_ptt,
2485 const char *mem_group, char storm_letter)
2489 offset += qed_grc_dump_mem_hdr(p_hwfn,
2496 packed, mem_group, storm_letter);
2497 offset += qed_grc_dump_addr_range(p_hwfn,
2500 dump, addr, len, wide_bus,
2501 SPLIT_TYPE_NONE, 0);
2506 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
2507 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2508 struct qed_ptt *p_ptt,
2509 struct virt_mem_desc input_mems_arr,
2510 u32 *dump_buf, bool dump)
2512 u32 i, offset = 0, input_offset = 0;
2513 bool mode_match = true;
2515 while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2516 const struct dbg_dump_cond_hdr *cond_hdr;
2517 u16 modes_buf_offset;
2522 (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2524 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2526 /* Check required mode */
2527 eval_mode = GET_FIELD(cond_hdr->mode.data,
2528 DBG_MODE_HDR_EVAL_MODE) > 0;
2531 GET_FIELD(cond_hdr->mode.data,
2532 DBG_MODE_HDR_MODES_BUF_OFFSET);
2533 mode_match = qed_is_mode_match(p_hwfn,
2538 input_offset += cond_hdr->data_size;
2542 for (i = 0; i < num_entries;
2543 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2544 const struct dbg_dump_mem *mem =
2545 (const struct dbg_dump_mem *)((u32 *)
2548 const struct dbg_block *block;
2549 char storm_letter = 0;
2550 u32 mem_addr, mem_len;
2554 mem_group_id = GET_FIELD(mem->dword0,
2555 DBG_DUMP_MEM_MEM_GROUP_ID);
2556 if (mem_group_id >= MEM_GROUPS_NUM) {
2557 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2561 if (!qed_grc_is_mem_included(p_hwfn,
2567 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2568 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2569 mem_wide_bus = GET_FIELD(mem->dword1,
2570 DBG_DUMP_MEM_WIDE_BUS);
2572 block = get_dbg_block(p_hwfn,
2573 cond_hdr->block_id);
2575 /* If memory is associated with Storm,
2576 * update storm details
2578 if (block->associated_storm_letter)
2579 storm_letter = block->associated_storm_letter;
2582 offset += qed_grc_dump_mem(p_hwfn,
2592 s_mem_group_names[mem_group_id],
2600 /* Dumps GRC memories according to the input array dump_mem.
2601 * Returns the dumped size in dwords.
2603 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2604 struct qed_ptt *p_ptt,
2605 u32 *dump_buf, bool dump)
2607 struct virt_mem_desc *dbg_buf =
2608 &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2609 u32 offset = 0, input_offset = 0;
2611 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2612 const struct dbg_dump_split_hdr *split_hdr;
2613 struct virt_mem_desc curr_input_mems_arr;
2614 enum init_split_types split_type;
2615 u32 split_data_size;
2618 (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2620 split_type = GET_FIELD(split_hdr->hdr,
2621 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2622 split_data_size = GET_FIELD(split_hdr->hdr,
2623 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2624 curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2625 curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
2627 if (split_type == SPLIT_TYPE_NONE)
2628 offset += qed_grc_dump_mem_entries(p_hwfn,
2630 curr_input_mems_arr,
2635 "Dumping split memories is currently not supported\n");
2637 input_offset += split_data_size;
2643 /* Dumps GRC context data for the specified Storm.
2644 * Returns the dumped size in dwords.
2645 * The lid_size argument is specified in quad-regs.
2647 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2648 struct qed_ptt *p_ptt,
2653 enum cm_ctx_types ctx_type, u8 storm_id)
2655 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2656 struct storm_defs *storm = &s_storm_defs[storm_id];
2657 u32 i, lid, lid_size, total_size;
2658 u32 rd_reg_addr, offset = 0;
2660 /* Convert quad-regs to dwords */
2661 lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2666 total_size = num_lids * lid_size;
2668 offset += qed_grc_dump_mem_hdr(p_hwfn,
2675 false, name, storm->letter);
2678 return offset + total_size;
2680 rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2682 /* Dump context data */
2683 for (lid = 0; lid < num_lids; lid++) {
2684 for (i = 0; i < lid_size; i++) {
2686 p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2687 offset += qed_grc_dump_addr_range(p_hwfn,
2694 SPLIT_TYPE_NONE, 0);
2701 /* Dumps GRC contexts. Returns the dumped size in dwords. */
2702 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2703 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2708 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2709 if (!qed_grc_is_storm_included(p_hwfn,
2710 (enum dbg_storms)storm_id))
2713 /* Dump Conn AG context size */
2714 offset += qed_grc_dump_ctx_data(p_hwfn,
2720 CM_CTX_CONN_AG, storm_id);
2722 /* Dump Conn ST context size */
2723 offset += qed_grc_dump_ctx_data(p_hwfn,
2729 CM_CTX_CONN_ST, storm_id);
2731 /* Dump Task AG context size */
2732 offset += qed_grc_dump_ctx_data(p_hwfn,
2738 CM_CTX_TASK_AG, storm_id);
2740 /* Dump Task ST context size */
2741 offset += qed_grc_dump_ctx_data(p_hwfn,
2747 CM_CTX_TASK_ST, storm_id);
2753 #define VFC_STATUS_RESP_READY_BIT 0
2754 #define VFC_STATUS_BUSY_BIT 1
2755 #define VFC_STATUS_SENDING_CMD_BIT 2
2757 #define VFC_POLLING_DELAY_MS 1
2758 #define VFC_POLLING_COUNT 20
2760 /* Reads data from VFC. Returns the number of dwords read (0 on error).
2761 * Sizes are specified in dwords.
2763 static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
2764 struct qed_ptt *p_ptt,
2765 struct storm_defs *storm,
2770 u32 resp_size, u32 *dump_buf)
2772 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2773 u32 vfc_status, polling_ms, polling_count = 0, i;
2774 u32 reg_addr, sem_base;
2775 bool is_ready = false;
2777 sem_base = storm->sem_fast_mem_addr;
2778 polling_ms = VFC_POLLING_DELAY_MS *
2779 s_hw_type_defs[dev_data->hw_type].delay_factor;
2781 /* Write VFC command */
2784 sem_base + SEM_FAST_REG_VFC_DATA_WR,
2785 cmd_data, cmd_size);
2787 /* Write VFC address */
2790 sem_base + SEM_FAST_REG_VFC_ADDR,
2791 addr_data, addr_size);
2794 for (i = 0; i < resp_size; i++) {
2795 /* Poll until ready */
2797 reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2798 qed_grc_dump_addr_range(p_hwfn,
2802 BYTES_TO_DWORDS(reg_addr),
2804 false, SPLIT_TYPE_NONE, 0);
2805 is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
2808 if (polling_count++ == VFC_POLLING_COUNT)
2813 } while (!is_ready);
2815 reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2816 qed_grc_dump_addr_range(p_hwfn,
2820 BYTES_TO_DWORDS(reg_addr),
2821 1, false, SPLIT_TYPE_NONE, 0);
2827 /* Dump VFC CAM. Returns the dumped size in dwords. */
2828 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
2829 struct qed_ptt *p_ptt,
2830 u32 *dump_buf, bool dump, u8 storm_id)
2832 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
2833 struct storm_defs *storm = &s_storm_defs[storm_id];
2834 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
2835 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
2836 u32 row, offset = 0;
2838 offset += qed_grc_dump_mem_hdr(p_hwfn,
2845 false, "vfc_cam", storm->letter);
2848 return offset + total_size;
2850 /* Prepare CAM address */
2851 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
2853 /* Read VFC CAM data */
2854 for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
2855 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
2856 offset += qed_grc_dump_read_from_vfc(p_hwfn,
2862 VFC_CAM_ADDR_DWORDS,
2863 VFC_CAM_RESP_DWORDS,
2870 /* Dump VFC RAM. Returns the dumped size in dwords. */
2871 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
2872 struct qed_ptt *p_ptt,
2875 u8 storm_id, struct vfc_ram_defs *ram_defs)
2877 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
2878 struct storm_defs *storm = &s_storm_defs[storm_id];
2879 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
2880 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
2881 u32 row, offset = 0;
2883 offset += qed_grc_dump_mem_hdr(p_hwfn,
2891 ram_defs->type_name,
2895 return offset + total_size;
2897 /* Prepare RAM address */
2898 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
2900 /* Read VFC RAM data */
2901 for (row = ram_defs->base_row;
2902 row < ram_defs->base_row + ram_defs->num_rows; row++) {
2903 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
2904 offset += qed_grc_dump_read_from_vfc(p_hwfn,
2910 VFC_RAM_ADDR_DWORDS,
2911 VFC_RAM_RESP_DWORDS,
2918 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
2919 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
2920 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2925 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2926 if (!qed_grc_is_storm_included(p_hwfn,
2927 (enum dbg_storms)storm_id) ||
2928 !s_storm_defs[storm_id].has_vfc)
2932 offset += qed_grc_dump_vfc_cam(p_hwfn,
2938 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
2939 offset += qed_grc_dump_vfc_ram(p_hwfn,
2944 &s_vfc_ram_defs[i]);
2950 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
2951 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
2952 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2954 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2958 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
2959 u32 rss_addr, num_entries, total_dwords;
2960 struct rss_mem_defs *rss_defs;
2961 u32 addr, num_dwords_to_read;
2964 rss_defs = &s_rss_mem_defs[rss_mem_id];
2965 rss_addr = rss_defs->addr;
2966 num_entries = rss_defs->num_entries[dev_data->chip_id];
2967 total_dwords = (num_entries * rss_defs->entry_width) / 32;
2968 packed = (rss_defs->entry_width == 16);
2970 offset += qed_grc_dump_mem_hdr(p_hwfn,
2976 rss_defs->entry_width,
2978 rss_defs->type_name, 0);
2982 offset += total_dwords;
2986 addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
2987 while (total_dwords) {
2988 num_dwords_to_read = min_t(u32,
2989 RSS_REG_RSS_RAM_DATA_SIZE,
2991 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
2992 offset += qed_grc_dump_addr_range(p_hwfn,
2999 SPLIT_TYPE_NONE, 0);
3000 total_dwords -= num_dwords_to_read;
3008 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3009 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3010 struct qed_ptt *p_ptt,
3011 u32 *dump_buf, bool dump, u8 big_ram_id)
3013 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3014 u32 block_size, ram_size, offset = 0, reg_val, i;
3015 char mem_name[12] = "???_BIG_RAM";
3016 char type_name[8] = "???_RAM";
3017 struct big_ram_defs *big_ram;
3019 big_ram = &s_big_ram_defs[big_ram_id];
3020 ram_size = big_ram->ram_size[dev_data->chip_id];
3022 reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3023 block_size = reg_val &
3024 BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3027 strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3028 strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3030 /* Dump memory header */
3031 offset += qed_grc_dump_mem_hdr(p_hwfn,
3038 false, type_name, 0);
3040 /* Read and dump Big RAM data */
3042 return offset + ram_size;
3045 for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3049 qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3050 addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3051 len = BRB_REG_BIG_RAM_DATA_SIZE;
3052 offset += qed_grc_dump_addr_range(p_hwfn,
3058 false, SPLIT_TYPE_NONE, 0);
3064 /* Dumps MCP scratchpad. Returns the dumped size in dwords. */
3065 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3066 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3068 bool block_enable[MAX_BLOCK_ID] = { 0 };
3069 u32 offset = 0, addr;
3070 bool halted = false;
3073 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3074 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3076 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3079 /* Dump MCP scratchpad */
3080 offset += qed_grc_dump_mem(p_hwfn,
3085 BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3086 MCP_REG_SCRATCH_SIZE,
3087 false, 0, false, "MCP", 0);
3089 /* Dump MCP cpu_reg_file */
3090 offset += qed_grc_dump_mem(p_hwfn,
3095 BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3096 MCP_REG_CPU_REG_FILE_SIZE,
3097 false, 0, false, "MCP", 0);
3099 /* Dump MCP registers */
3100 block_enable[BLOCK_MCP] = true;
3101 offset += qed_grc_dump_registers(p_hwfn,
3104 dump, block_enable, "MCP");
3106 /* Dump required non-MCP registers */
3107 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3108 dump, 1, SPLIT_TYPE_NONE, 0,
3110 addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3111 offset += qed_grc_dump_reg_entry(p_hwfn,
3117 false, SPLIT_TYPE_NONE, 0);
3120 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3121 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3126 /* Dumps the tbus indirect memory for all PHYs.
3127 * Returns the dumped size in dwords.
3129 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3130 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3132 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3136 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3137 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3138 struct phy_defs *phy_defs;
3141 phy_defs = &s_phy_defs[phy_id];
3142 addr_lo_addr = phy_defs->base_addr +
3143 phy_defs->tbus_addr_lo_addr;
3144 addr_hi_addr = phy_defs->base_addr +
3145 phy_defs->tbus_addr_hi_addr;
3146 data_lo_addr = phy_defs->base_addr +
3147 phy_defs->tbus_data_lo_addr;
3148 data_hi_addr = phy_defs->base_addr +
3149 phy_defs->tbus_data_hi_addr;
3151 if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3152 phy_defs->phy_name) < 0)
3154 "Unexpected debug error: invalid PHY memory name\n");
3156 offset += qed_grc_dump_mem_hdr(p_hwfn,
3161 PHY_DUMP_SIZE_DWORDS,
3162 16, true, mem_name, 0);
3165 offset += PHY_DUMP_SIZE_DWORDS;
3169 bytes_buf = (u8 *)(dump_buf + offset);
3170 for (tbus_hi_offset = 0;
3171 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3173 qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3174 for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3177 p_ptt, addr_lo_addr, tbus_lo_offset);
3178 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3181 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3187 offset += PHY_DUMP_SIZE_DWORDS;
3193 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3194 struct qed_ptt *p_ptt,
3196 u32 *nvram_offset_bytes,
3197 u32 *nvram_size_bytes);
3199 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3200 struct qed_ptt *p_ptt,
3201 u32 nvram_offset_bytes,
3202 u32 nvram_size_bytes, u32 *ret_buf);
3204 /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
3205 static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
3206 struct qed_ptt *p_ptt,
3207 u32 *dump_buf, bool dump)
3209 u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3210 u32 hw_dump_size_dwords = 0, offset = 0;
3211 enum dbg_status status;
3213 /* Read HW dump image from NVRAM */
3214 status = qed_find_nvram_image(p_hwfn,
3216 NVM_TYPE_HW_DUMP_OUT,
3217 &hw_dump_offset_bytes,
3218 &hw_dump_size_bytes);
3219 if (status != DBG_STATUS_OK)
3222 hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3224 /* Dump HW dump image section */
3225 offset += qed_dump_section_hdr(dump_buf + offset,
3226 dump, "mcp_hw_dump", 1);
3227 offset += qed_dump_num_param(dump_buf + offset,
3228 dump, "size", hw_dump_size_dwords);
3230 /* Read MCP HW dump image into dump buffer */
3231 if (dump && hw_dump_size_dwords) {
3232 status = qed_nvram_read(p_hwfn,
3234 hw_dump_offset_bytes,
3235 hw_dump_size_bytes, dump_buf + offset);
3236 if (status != DBG_STATUS_OK) {
3238 "Failed to read MCP HW Dump image from NVRAM\n");
3242 offset += hw_dump_size_dwords;
3247 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3248 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3249 struct qed_ptt *p_ptt,
3250 u32 *dump_buf, bool dump)
3252 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3253 u32 block_id, line_id, offset = 0, addr, len;
3255 /* Don't dump static debug if a debug bus recording is in progress */
3256 if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3260 /* Disable debug bus in all blocks */
3261 qed_bus_disable_blocks(p_hwfn, p_ptt);
3263 qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3265 p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3267 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3268 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3269 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3272 /* Dump all static debug lines for each relevant block */
3273 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3274 const struct dbg_block_chip *block_per_chip;
3275 const struct dbg_block *block;
3276 bool is_removed, has_dbg_bus;
3277 u16 modes_buf_offset;
3281 qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3282 is_removed = GET_FIELD(block_per_chip->flags,
3283 DBG_BLOCK_CHIP_IS_REMOVED);
3284 has_dbg_bus = GET_FIELD(block_per_chip->flags,
3285 DBG_BLOCK_CHIP_HAS_DBG_BUS);
3287 /* read+clear for NWS parity is not working, skip NWS block */
3288 if (block_id == BLOCK_NWS)
3291 if (!is_removed && has_dbg_bus &&
3292 GET_FIELD(block_per_chip->dbg_bus_mode.data,
3293 DBG_MODE_HDR_EVAL_MODE) > 0) {
3295 GET_FIELD(block_per_chip->dbg_bus_mode.data,
3296 DBG_MODE_HDR_MODES_BUF_OFFSET);
3297 if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3298 has_dbg_bus = false;
3301 if (is_removed || !has_dbg_bus)
3304 block_dwords = NUM_DBG_LINES(block_per_chip) *
3305 STATIC_DEBUG_LINE_DWORDS;
3307 /* Dump static section params */
3308 block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3309 offset += qed_grc_dump_mem_hdr(p_hwfn,
3315 32, false, "STATIC", 0);
3318 offset += block_dwords;
3322 /* If all lines are invalid - dump zeros */
3323 if (dev_data->block_in_reset[block_id]) {
3324 memset(dump_buf + offset, 0,
3325 DWORDS_TO_BYTES(block_dwords));
3326 offset += block_dwords;
3330 /* Enable block's client */
3331 qed_bus_enable_clients(p_hwfn,
3333 BIT(block_per_chip->dbg_client_id));
3335 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3336 len = STATIC_DEBUG_LINE_DWORDS;
3337 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3339 /* Configure debug line ID */
3340 qed_bus_config_dbg_line(p_hwfn,
3342 (enum block_id)block_id,
3343 (u8)line_id, 0xf, 0, 0, 0);
3345 /* Read debug line info */
3346 offset += qed_grc_dump_addr_range(p_hwfn,
3352 true, SPLIT_TYPE_NONE,
3356 /* Disable block's client and debug output */
3357 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3358 qed_bus_config_dbg_line(p_hwfn, p_ptt,
3359 (enum block_id)block_id, 0, 0, 0, 0, 0);
3363 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3364 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3370 /* Performs GRC Dump to the specified buffer.
3371 * Returns the dumped size in dwords.
3373 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3374 struct qed_ptt *p_ptt,
3376 bool dump, u32 *num_dumped_dwords)
3378 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3379 u32 dwords_read, offset = 0;
3380 bool parities_masked = false;
3383 *num_dumped_dwords = 0;
3384 dev_data->num_regs_read = 0;
3386 /* Update reset state */
3388 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3390 /* Dump global params */
3391 offset += qed_dump_common_global_params(p_hwfn,
3393 dump_buf + offset, dump, 4);
3394 offset += qed_dump_str_param(dump_buf + offset,
3395 dump, "dump-type", "grc-dump");
3396 offset += qed_dump_num_param(dump_buf + offset,
3400 offset += qed_dump_num_param(dump_buf + offset,
3404 offset += qed_dump_num_param(dump_buf + offset,
3405 dump, "num-ports", dev_data->num_ports);
3407 /* Dump reset registers (dumped before taking blocks out of reset ) */
3408 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3409 offset += qed_grc_dump_reset_regs(p_hwfn,
3411 dump_buf + offset, dump);
3413 /* Take all blocks out of reset (using reset registers) */
3415 qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3416 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3419 /* Disable all parities using MFW command */
3421 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3422 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3423 if (!parities_masked) {
3425 "Failed to mask parities using MFW\n");
3426 if (qed_grc_get_param
3427 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3428 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3432 /* Dump modified registers (dumped before modifying them) */
3433 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3434 offset += qed_grc_dump_modified_regs(p_hwfn,
3436 dump_buf + offset, dump);
3440 (qed_grc_is_included(p_hwfn,
3441 DBG_GRC_PARAM_DUMP_IOR) ||
3442 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3443 qed_grc_stall_storms(p_hwfn, p_ptt, true);
3446 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3447 bool block_enable[MAX_BLOCK_ID];
3449 /* Dump all blocks except MCP */
3450 for (i = 0; i < MAX_BLOCK_ID; i++)
3451 block_enable[i] = true;
3452 block_enable[BLOCK_MCP] = false;
3453 offset += qed_grc_dump_registers(p_hwfn,
3458 block_enable, NULL);
3460 /* Dump special registers */
3461 offset += qed_grc_dump_special_regs(p_hwfn,
3463 dump_buf + offset, dump);
3467 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3470 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3471 offset += qed_grc_dump_mcp(p_hwfn,
3472 p_ptt, dump_buf + offset, dump);
3475 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3476 offset += qed_grc_dump_ctx(p_hwfn,
3477 p_ptt, dump_buf + offset, dump);
3479 /* Dump RSS memories */
3480 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3481 offset += qed_grc_dump_rss(p_hwfn,
3482 p_ptt, dump_buf + offset, dump);
3485 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3486 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3487 offset += qed_grc_dump_big_ram(p_hwfn,
3493 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3494 dwords_read = qed_grc_dump_vfc(p_hwfn,
3495 p_ptt, dump_buf + offset, dump);
3496 offset += dwords_read;
3498 return DBG_STATUS_VFC_READ_ERROR;
3502 if (qed_grc_is_included(p_hwfn,
3503 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3504 CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3505 offset += qed_grc_dump_phy(p_hwfn,
3506 p_ptt, dump_buf + offset, dump);
3508 /* Dump MCP HW Dump */
3509 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3510 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
3511 offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3513 dump_buf + offset, dump);
3515 /* Dump static debug data (only if not during debug bus recording) */
3516 if (qed_grc_is_included(p_hwfn,
3517 DBG_GRC_PARAM_DUMP_STATIC) &&
3518 (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3519 offset += qed_grc_dump_static_debug(p_hwfn,
3521 dump_buf + offset, dump);
3523 /* Dump last section */
3524 offset += qed_dump_last_section(dump_buf, offset, dump);
3527 /* Unstall storms */
3528 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3529 qed_grc_stall_storms(p_hwfn, p_ptt, false);
3531 /* Clear parity status */
3532 qed_grc_clear_all_prty(p_hwfn, p_ptt);
3534 /* Enable all parities using MFW command */
3535 if (parities_masked)
3536 qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3539 *num_dumped_dwords = offset;
3541 return DBG_STATUS_OK;
3544 /* Writes the specified failing Idle Check rule to the specified buffer.
3545 * Returns the dumped size in dwords.
3547 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3548 struct qed_ptt *p_ptt,
3553 const struct dbg_idle_chk_rule *rule,
3554 u16 fail_entry_id, u32 *cond_reg_values)
3556 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3557 const struct dbg_idle_chk_cond_reg *cond_regs;
3558 const struct dbg_idle_chk_info_reg *info_regs;
3559 u32 i, next_reg_offset = 0, offset = 0;
3560 struct dbg_idle_chk_result_hdr *hdr;
3561 const union dbg_idle_chk_reg *regs;
3564 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3565 regs = (const union dbg_idle_chk_reg *)
3566 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3568 cond_regs = ®s[0].cond_reg;
3569 info_regs = ®s[rule->num_cond_regs].info_reg;
3571 /* Dump rule data */
3573 memset(hdr, 0, sizeof(*hdr));
3574 hdr->rule_id = rule_id;
3575 hdr->mem_entry_id = fail_entry_id;
3576 hdr->severity = rule->severity;
3577 hdr->num_dumped_cond_regs = rule->num_cond_regs;
3580 offset += IDLE_CHK_RESULT_HDR_DWORDS;
3582 /* Dump condition register values */
3583 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3584 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3585 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3588 (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3590 /* Write register header */
3592 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3597 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3598 memset(reg_hdr, 0, sizeof(*reg_hdr));
3599 reg_hdr->start_entry = reg->start_entry;
3600 reg_hdr->size = reg->entry_size;
3601 SET_FIELD(reg_hdr->data,
3602 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3603 reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3604 SET_FIELD(reg_hdr->data,
3605 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3607 /* Write register values */
3608 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3609 dump_buf[offset] = cond_reg_values[next_reg_offset];
3612 /* Dump info register values */
3613 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3614 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3617 /* Check if register's block is in reset */
3619 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3623 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3624 if (block_id >= MAX_BLOCK_ID) {
3625 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3629 if (!dev_data->block_in_reset[block_id]) {
3630 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3631 bool wide_bus, eval_mode, mode_match = true;
3632 u16 modes_buf_offset;
3635 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3636 (dump_buf + offset);
3639 eval_mode = GET_FIELD(reg->mode.data,
3640 DBG_MODE_HDR_EVAL_MODE) > 0;
3643 GET_FIELD(reg->mode.data,
3644 DBG_MODE_HDR_MODES_BUF_OFFSET);
3646 qed_is_mode_match(p_hwfn,
3653 addr = GET_FIELD(reg->data,
3654 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3655 wide_bus = GET_FIELD(reg->data,
3656 DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3658 /* Write register header */
3659 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3660 hdr->num_dumped_info_regs++;
3661 memset(reg_hdr, 0, sizeof(*reg_hdr));
3662 reg_hdr->size = reg->size;
3663 SET_FIELD(reg_hdr->data,
3664 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3665 rule->num_cond_regs + reg_id);
3667 /* Write register values */
3668 offset += qed_grc_dump_addr_range(p_hwfn,
3673 reg->size, wide_bus,
3674 SPLIT_TYPE_NONE, 0);
3681 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3683 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3684 u32 *dump_buf, bool dump,
3685 const struct dbg_idle_chk_rule *input_rules,
3686 u32 num_input_rules, u32 *num_failing_rules)
3688 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3689 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3694 *num_failing_rules = 0;
3696 for (i = 0; i < num_input_rules; i++) {
3697 const struct dbg_idle_chk_cond_reg *cond_regs;
3698 const struct dbg_idle_chk_rule *rule;
3699 const union dbg_idle_chk_reg *regs;
3700 u16 num_reg_entries = 1;
3701 bool check_rule = true;
3702 const u32 *imm_values;
3704 rule = &input_rules[i];
3705 regs = (const union dbg_idle_chk_reg *)
3706 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3708 cond_regs = ®s[0].cond_reg;
3710 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3713 /* Check if all condition register blocks are out of reset, and
3714 * find maximal number of entries (all condition registers that
3715 * are memories must have the same size, which is > 1).
3717 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3720 GET_FIELD(cond_regs[reg_id].data,
3721 DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3723 if (block_id >= MAX_BLOCK_ID) {
3724 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3728 check_rule = !dev_data->block_in_reset[block_id];
3729 if (cond_regs[reg_id].num_entries > num_reg_entries)
3730 num_reg_entries = cond_regs[reg_id].num_entries;
3733 if (!check_rule && dump)
3737 u32 entry_dump_size =
3738 qed_idle_chk_dump_failure(p_hwfn,
3747 offset += num_reg_entries * entry_dump_size;
3748 (*num_failing_rules) += num_reg_entries;
3752 /* Go over all register entries (number of entries is the same
3753 * for all condition registers).
3755 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3756 u32 next_reg_offset = 0;
3758 /* Read current entry of all condition registers */
3759 for (reg_id = 0; reg_id < rule->num_cond_regs;
3761 const struct dbg_idle_chk_cond_reg *reg =
3763 u32 padded_entry_size, addr;
3766 /* Find GRC address (if it's a memory, the
3767 * address of the specific entry is calculated).
3769 addr = GET_FIELD(reg->data,
3770 DBG_IDLE_CHK_COND_REG_ADDRESS);
3772 GET_FIELD(reg->data,
3773 DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3774 if (reg->num_entries > 1 ||
3775 reg->start_entry > 0) {
3777 reg->entry_size > 1 ?
3778 roundup_pow_of_two(reg->entry_size) :
3780 addr += (reg->start_entry + entry_id) *
3784 /* Read registers */
3785 if (next_reg_offset + reg->entry_size >=
3786 IDLE_CHK_MAX_ENTRIES_SIZE) {
3788 "idle check registers entry is too large\n");
3793 qed_grc_dump_addr_range(p_hwfn, p_ptt,
3799 SPLIT_TYPE_NONE, 0);
3802 /* Call rule condition function.
3803 * If returns true, it's a failure.
3805 if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3807 offset += qed_idle_chk_dump_failure(p_hwfn,
3815 (*num_failing_rules)++;
3823 /* Performs Idle Check Dump to the specified buffer.
3824 * Returns the dumped size in dwords.
3826 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3827 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3829 struct virt_mem_desc *dbg_buf =
3830 &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3831 u32 num_failing_rules_offset, offset = 0,
3832 input_offset = 0, num_failing_rules = 0;
3834 /* Dump global params - 1 must match below amount of params */
3835 offset += qed_dump_common_global_params(p_hwfn,
3837 dump_buf + offset, dump, 1);
3838 offset += qed_dump_str_param(dump_buf + offset,
3839 dump, "dump-type", "idle-chk");
3841 /* Dump idle check section header with a single parameter */
3842 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
3843 num_failing_rules_offset = offset;
3844 offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
3846 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
3847 const struct dbg_idle_chk_cond_hdr *cond_hdr =
3848 (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
3850 bool eval_mode, mode_match = true;
3851 u32 curr_failing_rules;
3852 u16 modes_buf_offset;
3855 eval_mode = GET_FIELD(cond_hdr->mode.data,
3856 DBG_MODE_HDR_EVAL_MODE) > 0;
3859 GET_FIELD(cond_hdr->mode.data,
3860 DBG_MODE_HDR_MODES_BUF_OFFSET);
3861 mode_match = qed_is_mode_match(p_hwfn,
3866 const struct dbg_idle_chk_rule *rule =
3867 (const struct dbg_idle_chk_rule *)((u32 *)
3870 u32 num_input_rules =
3871 cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
3873 qed_idle_chk_dump_rule_entries(p_hwfn,
3880 &curr_failing_rules);
3881 num_failing_rules += curr_failing_rules;
3884 input_offset += cond_hdr->data_size;
3887 /* Overwrite num_rules parameter */
3889 qed_dump_num_param(dump_buf + num_failing_rules_offset,
3890 dump, "num_rules", num_failing_rules);
3892 /* Dump last section */
3893 offset += qed_dump_last_section(dump_buf, offset, dump);
3898 /* Finds the meta data image in NVRAM */
3899 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3900 struct qed_ptt *p_ptt,
3902 u32 *nvram_offset_bytes,
3903 u32 *nvram_size_bytes)
3905 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
3906 struct mcp_file_att file_att;
3909 /* Call NVRAM get file command */
3910 nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
3912 DRV_MSG_CODE_NVM_GET_FILE_ATT,
3916 &ret_txn_size, (u32 *)&file_att);
3918 /* Check response */
3920 (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3921 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
3923 /* Update return values */
3924 *nvram_offset_bytes = file_att.nvm_start_addr;
3925 *nvram_size_bytes = file_att.len;
3929 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
3930 image_type, *nvram_offset_bytes, *nvram_size_bytes);
3932 /* Check alignment */
3933 if (*nvram_size_bytes & 0x3)
3934 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
3936 return DBG_STATUS_OK;
3939 /* Reads data from NVRAM */
3940 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3941 struct qed_ptt *p_ptt,
3942 u32 nvram_offset_bytes,
3943 u32 nvram_size_bytes, u32 *ret_buf)
3945 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
3946 s32 bytes_left = nvram_size_bytes;
3947 u32 read_offset = 0, param = 0;
3951 "nvram_read: reading image of size %d bytes from NVRAM\n",
3957 MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
3959 /* Call NVRAM read command */
3960 SET_MFW_FIELD(param,
3961 DRV_MB_PARAM_NVM_OFFSET,
3962 nvram_offset_bytes + read_offset);
3963 SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
3964 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3965 DRV_MSG_CODE_NVM_READ_NVRAM, param,
3967 &ret_mcp_param, &ret_read_size,
3968 (u32 *)((u8 *)ret_buf + read_offset)))
3969 return DBG_STATUS_NVRAM_READ_FAILED;
3971 /* Check response */
3972 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3973 return DBG_STATUS_NVRAM_READ_FAILED;
3975 /* Update read offset */
3976 read_offset += ret_read_size;
3977 bytes_left -= ret_read_size;
3978 } while (bytes_left > 0);
3980 return DBG_STATUS_OK;
3983 /* Get info on the MCP Trace data in the scratchpad:
3984 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
3985 * - trace_data_size (OUT): trace data size in bytes (without the header)
3987 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
3988 struct qed_ptt *p_ptt,
3989 u32 *trace_data_grc_addr,
3990 u32 *trace_data_size)
3992 u32 spad_trace_offsize, signature;
3994 /* Read trace section offsize structure from MCP scratchpad */
3995 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
3997 /* Extract trace section address from offsize (in scratchpad) */
3998 *trace_data_grc_addr =
3999 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4001 /* Read signature from MCP trace section */
4002 signature = qed_rd(p_hwfn, p_ptt,
4003 *trace_data_grc_addr +
4004 offsetof(struct mcp_trace, signature));
4006 if (signature != MFW_TRACE_SIGNATURE)
4007 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4009 /* Read trace size from MCP trace section */
4010 *trace_data_size = qed_rd(p_hwfn,
4012 *trace_data_grc_addr +
4013 offsetof(struct mcp_trace, size));
4015 return DBG_STATUS_OK;
4018 /* Reads MCP trace meta data image from NVRAM
4019 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4020 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4021 * loaded from file).
4022 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4024 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4025 struct qed_ptt *p_ptt,
4026 u32 trace_data_size_bytes,
4027 u32 *running_bundle_id,
4028 u32 *trace_meta_offset,
4029 u32 *trace_meta_size)
4031 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4033 /* Read MCP trace section offsize structure from MCP scratchpad */
4034 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4036 /* Find running bundle ID */
4038 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4039 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4040 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4041 if (*running_bundle_id > 1)
4042 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4044 /* Find image in NVRAM */
4046 (*running_bundle_id ==
4047 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4048 return qed_find_nvram_image(p_hwfn,
4051 trace_meta_offset, trace_meta_size);
4054 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4055 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4056 struct qed_ptt *p_ptt,
4057 u32 nvram_offset_in_bytes,
4058 u32 size_in_bytes, u32 *buf)
4060 u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4061 enum dbg_status status;
4064 /* Read meta data from NVRAM */
4065 status = qed_nvram_read(p_hwfn,
4067 nvram_offset_in_bytes, size_in_bytes, buf);
4068 if (status != DBG_STATUS_OK)
4071 /* Extract and check first signature */
4072 signature = qed_read_unaligned_dword(byte_buf);
4073 byte_buf += sizeof(signature);
4074 if (signature != NVM_MAGIC_VALUE)
4075 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4077 /* Extract number of modules */
4078 modules_num = *(byte_buf++);
4080 /* Skip all modules */
4081 for (i = 0; i < modules_num; i++) {
4082 module_len = *(byte_buf++);
4083 byte_buf += module_len;
4086 /* Extract and check second signature */
4087 signature = qed_read_unaligned_dword(byte_buf);
4088 byte_buf += sizeof(signature);
4089 if (signature != NVM_MAGIC_VALUE)
4090 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4092 return DBG_STATUS_OK;
4095 /* Dump MCP Trace */
4096 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4097 struct qed_ptt *p_ptt,
4099 bool dump, u32 *num_dumped_dwords)
4101 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4102 u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4103 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4104 enum dbg_status status;
4108 *num_dumped_dwords = 0;
4110 use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4112 /* Get trace data info */
4113 status = qed_mcp_trace_get_data_info(p_hwfn,
4115 &trace_data_grc_addr,
4116 &trace_data_size_bytes);
4117 if (status != DBG_STATUS_OK)
4120 /* Dump global params */
4121 offset += qed_dump_common_global_params(p_hwfn,
4123 dump_buf + offset, dump, 1);
4124 offset += qed_dump_str_param(dump_buf + offset,
4125 dump, "dump-type", "mcp-trace");
4127 /* Halt MCP while reading from scratchpad so the read data will be
4128 * consistent. if halt fails, MCP trace is taken anyway, with a small
4129 * risk that it may be corrupt.
4131 if (dump && use_mfw) {
4132 halted = !qed_mcp_halt(p_hwfn, p_ptt);
4134 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4137 /* Find trace data size */
4138 trace_data_size_dwords =
4139 DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4142 /* Dump trace data section header and param */
4143 offset += qed_dump_section_hdr(dump_buf + offset,
4144 dump, "mcp_trace_data", 1);
4145 offset += qed_dump_num_param(dump_buf + offset,
4146 dump, "size", trace_data_size_dwords);
4148 /* Read trace data from scratchpad into dump buffer */
4149 offset += qed_grc_dump_addr_range(p_hwfn,
4153 BYTES_TO_DWORDS(trace_data_grc_addr),
4154 trace_data_size_dwords, false,
4155 SPLIT_TYPE_NONE, 0);
4157 /* Resume MCP (only if halt succeeded) */
4158 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4159 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4161 /* Dump trace meta section header */
4162 offset += qed_dump_section_hdr(dump_buf + offset,
4163 dump, "mcp_trace_meta", 1);
4165 /* If MCP Trace meta size parameter was set, use it.
4166 * Otherwise, read trace meta.
4167 * trace_meta_size_bytes is dword-aligned.
4169 trace_meta_size_bytes =
4170 qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4171 if ((!trace_meta_size_bytes || dump) && use_mfw)
4172 status = qed_mcp_trace_get_meta_info(p_hwfn,
4174 trace_data_size_bytes,
4176 &trace_meta_offset_bytes,
4177 &trace_meta_size_bytes);
4178 if (status == DBG_STATUS_OK)
4179 trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4181 /* Dump trace meta size param */
4182 offset += qed_dump_num_param(dump_buf + offset,
4183 dump, "size", trace_meta_size_dwords);
4185 /* Read trace meta image into dump buffer */
4186 if (dump && trace_meta_size_dwords)
4187 status = qed_mcp_trace_read_meta(p_hwfn,
4189 trace_meta_offset_bytes,
4190 trace_meta_size_bytes,
4192 if (status == DBG_STATUS_OK)
4193 offset += trace_meta_size_dwords;
4195 /* Dump last section */
4196 offset += qed_dump_last_section(dump_buf, offset, dump);
4198 *num_dumped_dwords = offset;
4200 /* If no mcp access, indicate that the dump doesn't contain the meta
4203 return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4207 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4208 struct qed_ptt *p_ptt,
4210 bool dump, u32 *num_dumped_dwords)
4212 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4215 *num_dumped_dwords = 0;
4217 /* Dump global params */
4218 offset += qed_dump_common_global_params(p_hwfn,
4220 dump_buf + offset, dump, 1);
4221 offset += qed_dump_str_param(dump_buf + offset,
4222 dump, "dump-type", "reg-fifo");
4224 /* Dump fifo data section header and param. The size param is 0 for
4225 * now, and is overwritten after reading the FIFO.
4227 offset += qed_dump_section_hdr(dump_buf + offset,
4228 dump, "reg_fifo_data", 1);
4229 size_param_offset = offset;
4230 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4233 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4234 * test how much data is available, except for reading it.
4236 offset += REG_FIFO_DEPTH_DWORDS;
4240 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4241 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4243 /* Pull available data from fifo. Use DMAE since this is widebus memory
4244 * and must be accessed atomically. Test for dwords_read not passing
4245 * buffer size since more entries could be added to the buffer as we are
4248 addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4249 len = REG_FIFO_ELEMENT_DWORDS;
4250 for (dwords_read = 0;
4251 fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4252 dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4253 offset += qed_grc_dump_addr_range(p_hwfn,
4259 true, SPLIT_TYPE_NONE,
4261 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4262 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4265 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4268 /* Dump last section */
4269 offset += qed_dump_last_section(dump_buf, offset, dump);
4271 *num_dumped_dwords = offset;
4273 return DBG_STATUS_OK;
4277 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4278 struct qed_ptt *p_ptt,
4280 bool dump, u32 *num_dumped_dwords)
4282 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4285 *num_dumped_dwords = 0;
4287 /* Dump global params */
4288 offset += qed_dump_common_global_params(p_hwfn,
4290 dump_buf + offset, dump, 1);
4291 offset += qed_dump_str_param(dump_buf + offset,
4292 dump, "dump-type", "igu-fifo");
4294 /* Dump fifo data section header and param. The size param is 0 for
4295 * now, and is overwritten after reading the FIFO.
4297 offset += qed_dump_section_hdr(dump_buf + offset,
4298 dump, "igu_fifo_data", 1);
4299 size_param_offset = offset;
4300 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4303 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4304 * test how much data is available, except for reading it.
4306 offset += IGU_FIFO_DEPTH_DWORDS;
4310 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4311 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4313 /* Pull available data from fifo. Use DMAE since this is widebus memory
4314 * and must be accessed atomically. Test for dwords_read not passing
4315 * buffer size since more entries could be added to the buffer as we are
4318 addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4319 len = IGU_FIFO_ELEMENT_DWORDS;
4320 for (dwords_read = 0;
4321 fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4322 dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4323 offset += qed_grc_dump_addr_range(p_hwfn,
4329 true, SPLIT_TYPE_NONE,
4331 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4332 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4335 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4338 /* Dump last section */
4339 offset += qed_dump_last_section(dump_buf, offset, dump);
4341 *num_dumped_dwords = offset;
4343 return DBG_STATUS_OK;
4346 /* Protection Override dump */
4347 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4348 struct qed_ptt *p_ptt,
4351 u32 *num_dumped_dwords)
4353 u32 size_param_offset, override_window_dwords, offset = 0, addr;
4355 *num_dumped_dwords = 0;
4357 /* Dump global params */
4358 offset += qed_dump_common_global_params(p_hwfn,
4360 dump_buf + offset, dump, 1);
4361 offset += qed_dump_str_param(dump_buf + offset,
4362 dump, "dump-type", "protection-override");
4364 /* Dump data section header and param. The size param is 0 for now,
4365 * and is overwritten after reading the data.
4367 offset += qed_dump_section_hdr(dump_buf + offset,
4368 dump, "protection_override_data", 1);
4369 size_param_offset = offset;
4370 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4373 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4377 /* Add override window info to buffer */
4378 override_window_dwords =
4379 qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4380 PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4381 if (override_window_dwords) {
4382 addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4383 offset += qed_grc_dump_addr_range(p_hwfn,
4388 override_window_dwords,
4389 true, SPLIT_TYPE_NONE, 0);
4390 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4391 override_window_dwords);
4394 /* Dump last section */
4395 offset += qed_dump_last_section(dump_buf, offset, dump);
4397 *num_dumped_dwords = offset;
4399 return DBG_STATUS_OK;
4402 /* Performs FW Asserts Dump to the specified buffer.
4403 * Returns the dumped size in dwords.
4405 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4406 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4408 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4409 struct fw_asserts_ram_section *asserts;
4410 char storm_letter_str[2] = "?";
4411 struct fw_info fw_info;
4415 /* Dump global params */
4416 offset += qed_dump_common_global_params(p_hwfn,
4418 dump_buf + offset, dump, 1);
4419 offset += qed_dump_str_param(dump_buf + offset,
4420 dump, "dump-type", "fw-asserts");
4422 /* Find Storm dump size */
4423 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4424 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4425 struct storm_defs *storm = &s_storm_defs[storm_id];
4426 u32 last_list_idx, addr;
4428 if (dev_data->block_in_reset[storm->sem_block_id])
4431 /* Read FW info for the current Storm */
4432 qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4434 asserts = &fw_info.fw_asserts_section;
4436 /* Dump FW Asserts section header and params */
4437 storm_letter_str[0] = storm->letter;
4438 offset += qed_dump_section_hdr(dump_buf + offset,
4439 dump, "fw_asserts", 2);
4440 offset += qed_dump_str_param(dump_buf + offset,
4441 dump, "storm", storm_letter_str);
4442 offset += qed_dump_num_param(dump_buf + offset,
4445 asserts->list_element_dword_size);
4447 /* Read and dump FW Asserts data */
4449 offset += asserts->list_element_dword_size;
4453 addr = le16_to_cpu(asserts->section_ram_line_offset);
4454 fw_asserts_section_addr = storm->sem_fast_mem_addr +
4455 SEM_FAST_REG_INT_RAM +
4456 RAM_LINES_TO_BYTES(addr);
4458 next_list_idx_addr = fw_asserts_section_addr +
4459 DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4460 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4461 last_list_idx = (next_list_idx > 0 ?
4463 asserts->list_num_elements) - 1;
4464 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4465 asserts->list_dword_offset +
4466 last_list_idx * asserts->list_element_dword_size;
4468 qed_grc_dump_addr_range(p_hwfn, p_ptt,
4471 asserts->list_element_dword_size,
4472 false, SPLIT_TYPE_NONE, 0);
4475 /* Dump last section */
4476 offset += qed_dump_last_section(dump_buf, offset, dump);
4481 /* Dumps the specified ILT pages to the specified buffer.
4482 * Returns the dumped size in dwords.
4484 static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
4488 struct phys_mem_desc *ilt_pages,
4491 u32 page_id, end_page_id, offset = 0;
4496 end_page_id = start_page_id + num_pages - 1;
4498 for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
4499 struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
4503 * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
4507 if (!ilt_pages[page_id].virt_addr)
4510 if (dump_page_ids) {
4511 /* Copy page ID to dump buffer */
4513 *(dump_buf + offset) = page_id;
4516 /* Copy page memory to dump buffer */
4518 memcpy(dump_buf + offset,
4519 mem_desc->virt_addr, mem_desc->size);
4520 offset += BYTES_TO_DWORDS(mem_desc->size);
4527 /* Dumps a section containing the dumped ILT pages.
4528 * Returns the dumped size in dwords.
4530 static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
4533 u32 valid_conn_pf_pages,
4534 u32 valid_conn_vf_pages,
4535 struct phys_mem_desc *ilt_pages,
4538 struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4539 u32 pf_start_line, start_page_id, offset = 0;
4540 u32 cdut_pf_init_pages, cdut_vf_init_pages;
4541 u32 cdut_pf_work_pages, cdut_vf_work_pages;
4542 u32 base_data_offset, size_param_offset;
4543 u32 cdut_pf_pages, cdut_vf_pages;
4544 const char *section_name;
4547 section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
4548 cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
4549 cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
4550 cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
4551 cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
4552 cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
4553 cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
4554 pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
4557 qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
4559 /* Dump size parameter (0 for now, overwritten with real size later) */
4560 size_param_offset = offset;
4561 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4562 base_data_offset = offset;
4564 /* CDUC pages are ordered as follows:
4565 * - PF pages - valid section (included in PF connection type mapping)
4566 * - PF pages - invalid section (not dumped)
4567 * - For each VF in the PF:
4568 * - VF pages - valid section (included in VF connection type mapping)
4569 * - VF pages - invalid section (not dumped)
4571 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
4572 /* Dump connection PF pages */
4573 start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
4574 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4577 valid_conn_pf_pages,
4578 ilt_pages, dump_page_ids);
4580 /* Dump connection VF pages */
4581 start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
4582 for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4583 i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
4584 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4587 valid_conn_vf_pages,
4592 /* CDUT pages are ordered as follows:
4593 * - PF init pages (not dumped)
4595 * - For each VF in the PF:
4596 * - VF init pages (not dumped)
4599 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
4600 /* Dump task PF pages */
4601 start_page_id = clients[ILT_CLI_CDUT].first.val +
4602 cdut_pf_init_pages - pf_start_line;
4603 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4607 ilt_pages, dump_page_ids);
4609 /* Dump task VF pages */
4610 start_page_id = clients[ILT_CLI_CDUT].first.val +
4611 cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
4612 for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4613 i++, start_page_id += cdut_vf_pages)
4614 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4622 /* Overwrite size param */
4624 qed_dump_num_param(dump_buf + size_param_offset,
4625 dump, "size", offset - base_data_offset);
4630 /* Performs ILT Dump to the specified buffer.
4631 * Returns the dumped size in dwords.
4633 static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
4634 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4636 struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4637 u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
4638 u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
4639 u32 num_cids_per_page, conn_ctx_size;
4640 u32 cduc_page_size, cdut_page_size;
4641 struct phys_mem_desc *ilt_pages;
4644 cduc_page_size = 1 <<
4645 (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4646 cdut_page_size = 1 <<
4647 (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4648 conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
4649 num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
4650 ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
4652 /* Dump global params - 22 must match number of params below */
4653 offset += qed_dump_common_global_params(p_hwfn, p_ptt,
4654 dump_buf + offset, dump, 22);
4655 offset += qed_dump_str_param(dump_buf + offset,
4656 dump, "dump-type", "ilt-dump");
4657 offset += qed_dump_num_param(dump_buf + offset,
4659 "cduc-page-size", cduc_page_size);
4660 offset += qed_dump_num_param(dump_buf + offset,
4662 "cduc-first-page-id",
4663 clients[ILT_CLI_CDUC].first.val);
4664 offset += qed_dump_num_param(dump_buf + offset,
4666 "cduc-last-page-id",
4667 clients[ILT_CLI_CDUC].last.val);
4668 offset += qed_dump_num_param(dump_buf + offset,
4670 "cduc-num-pf-pages",
4672 [ILT_CLI_CDUC].pf_total_lines);
4673 offset += qed_dump_num_param(dump_buf + offset,
4675 "cduc-num-vf-pages",
4677 [ILT_CLI_CDUC].vf_total_lines);
4678 offset += qed_dump_num_param(dump_buf + offset,
4680 "max-conn-ctx-size",
4682 offset += qed_dump_num_param(dump_buf + offset,
4684 "cdut-page-size", cdut_page_size);
4685 offset += qed_dump_num_param(dump_buf + offset,
4687 "cdut-first-page-id",
4688 clients[ILT_CLI_CDUT].first.val);
4689 offset += qed_dump_num_param(dump_buf + offset,
4691 "cdut-last-page-id",
4692 clients[ILT_CLI_CDUT].last.val);
4693 offset += qed_dump_num_param(dump_buf + offset,
4695 "cdut-num-pf-init-pages",
4696 qed_get_cdut_num_pf_init_pages(p_hwfn));
4697 offset += qed_dump_num_param(dump_buf + offset,
4699 "cdut-num-vf-init-pages",
4700 qed_get_cdut_num_vf_init_pages(p_hwfn));
4701 offset += qed_dump_num_param(dump_buf + offset,
4703 "cdut-num-pf-work-pages",
4704 qed_get_cdut_num_pf_work_pages(p_hwfn));
4705 offset += qed_dump_num_param(dump_buf + offset,
4707 "cdut-num-vf-work-pages",
4708 qed_get_cdut_num_vf_work_pages(p_hwfn));
4709 offset += qed_dump_num_param(dump_buf + offset,
4711 "max-task-ctx-size",
4712 p_hwfn->p_cxt_mngr->task_ctx_size);
4713 offset += qed_dump_num_param(dump_buf + offset,
4716 p_hwfn->p_cxt_mngr->task_type_id);
4717 offset += qed_dump_num_param(dump_buf + offset,
4719 "first-vf-id-in-pf",
4720 p_hwfn->p_cxt_mngr->first_vf_in_pf);
4721 offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
4724 p_hwfn->p_cxt_mngr->vf_count);
4725 offset += qed_dump_num_param(dump_buf + offset,
4727 "ptr-size-bytes", sizeof(void *));
4728 offset += qed_dump_num_param(dump_buf + offset,
4731 p_hwfn->p_cxt_mngr->pf_start_line);
4732 offset += qed_dump_num_param(dump_buf + offset,
4734 "page-mem-desc-size-dwords",
4735 PAGE_MEM_DESC_SIZE_DWORDS);
4736 offset += qed_dump_num_param(dump_buf + offset,
4739 p_hwfn->p_cxt_mngr->ilt_shadow_size);
4740 /* Additional/Less parameters require matching of number in call to
4741 * dump_common_global_params()
4744 /* Dump section containing number of PF CIDs per connection type */
4745 offset += qed_dump_section_hdr(dump_buf + offset,
4746 dump, "num_pf_cids_per_conn_type", 1);
4747 offset += qed_dump_num_param(dump_buf + offset,
4748 dump, "size", NUM_OF_CONNECTION_TYPES_E4);
4749 for (conn_type = 0, valid_conn_pf_cids = 0;
4750 conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
4752 p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
4755 *(dump_buf + offset) = num_pf_cids;
4756 valid_conn_pf_cids += num_pf_cids;
4759 /* Dump section containing number of VF CIDs per connection type */
4760 offset += qed_dump_section_hdr(dump_buf + offset,
4761 dump, "num_vf_cids_per_conn_type", 1);
4762 offset += qed_dump_num_param(dump_buf + offset,
4763 dump, "size", NUM_OF_CONNECTION_TYPES_E4);
4764 for (conn_type = 0, valid_conn_vf_cids = 0;
4765 conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
4767 p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
4770 *(dump_buf + offset) = num_vf_cids;
4771 valid_conn_vf_cids += num_vf_cids;
4774 /* Dump section containing physical memory descs for each ILT page */
4775 num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
4776 offset += qed_dump_section_hdr(dump_buf + offset,
4777 dump, "ilt_page_desc", 1);
4778 offset += qed_dump_num_param(dump_buf + offset,
4781 num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
4783 /* Copy memory descriptors to dump buffer */
4787 for (page_id = 0; page_id < num_pages;
4788 page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
4789 memcpy(dump_buf + offset,
4790 &ilt_pages[page_id],
4791 DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
4793 offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
4796 valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
4798 valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
4801 /* Dump ILT pages IDs */
4802 offset += qed_ilt_dump_pages_section(p_hwfn,
4805 valid_conn_pf_pages,
4806 valid_conn_vf_pages,
4809 /* Dump ILT pages memory */
4810 offset += qed_ilt_dump_pages_section(p_hwfn,
4813 valid_conn_pf_pages,
4814 valid_conn_vf_pages,
4817 /* Dump last section */
4818 offset += qed_dump_last_section(dump_buf, offset, dump);
4823 /***************************** Public Functions *******************************/
4825 enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
4826 const u8 * const bin_ptr)
4828 struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
4831 /* Convert binary data to debug arrays */
4832 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
4833 qed_set_dbg_bin_buf(p_hwfn,
4835 (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
4836 buf_hdrs[buf_id].length);
4838 return DBG_STATUS_OK;
4841 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
4842 struct qed_ptt *p_ptt, struct fw_info *fw_info)
4844 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4847 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4848 struct storm_defs *storm = &s_storm_defs[storm_id];
4850 /* Skip Storm if it's in reset */
4851 if (dev_data->block_in_reset[storm->sem_block_id])
4854 /* Read FW info for the current Storm */
4855 qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
4863 enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
4864 enum dbg_grc_params grc_param, u32 val)
4866 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4867 enum dbg_status status;
4872 "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
4874 status = qed_dbg_dev_init(p_hwfn);
4875 if (status != DBG_STATUS_OK)
4878 /* Initializes the GRC parameters (if not initialized). Needed in order
4879 * to set the default parameter values for the first time.
4881 qed_dbg_grc_init_params(p_hwfn);
4883 if (grc_param >= MAX_DBG_GRC_PARAMS)
4884 return DBG_STATUS_INVALID_ARGS;
4885 if (val < s_grc_param_defs[grc_param].min ||
4886 val > s_grc_param_defs[grc_param].max)
4887 return DBG_STATUS_INVALID_ARGS;
4889 if (s_grc_param_defs[grc_param].is_preset) {
4892 /* Disabling a preset is not allowed. Call
4893 * dbg_grc_set_params_default instead.
4896 return DBG_STATUS_INVALID_ARGS;
4898 /* Update all params with the preset values */
4899 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
4900 struct grc_param_defs *defs = &s_grc_param_defs[i];
4902 /* Skip persistent params */
4903 if (defs->is_persistent)
4906 /* Find preset value */
4907 if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
4909 defs->exclude_all_preset_val;
4910 else if (grc_param == DBG_GRC_PARAM_CRASH)
4912 defs->crash_preset_val[dev_data->chip_id];
4914 return DBG_STATUS_INVALID_ARGS;
4916 qed_grc_set_param(p_hwfn, i, preset_val);
4919 /* Regular param - set its value */
4920 qed_grc_set_param(p_hwfn, grc_param, val);
4923 return DBG_STATUS_OK;
4926 /* Assign default GRC param values */
4927 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
4929 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4932 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
4933 if (!s_grc_param_defs[i].is_persistent)
4934 dev_data->grc.param_val[i] =
4935 s_grc_param_defs[i].default_val[dev_data->chip_id];
4938 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4939 struct qed_ptt *p_ptt,
4942 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
4946 if (status != DBG_STATUS_OK)
4949 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4950 !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
4951 !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
4952 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
4953 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
4954 return DBG_STATUS_DBG_ARRAY_NOT_SET;
4956 return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4959 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
4960 struct qed_ptt *p_ptt,
4962 u32 buf_size_in_dwords,
4963 u32 *num_dumped_dwords)
4965 u32 needed_buf_size_in_dwords;
4966 enum dbg_status status;
4968 *num_dumped_dwords = 0;
4970 status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
4972 &needed_buf_size_in_dwords);
4973 if (status != DBG_STATUS_OK)
4976 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4977 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4980 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
4982 /* Revert GRC params to their default */
4983 qed_dbg_grc_set_params_default(p_hwfn);
4988 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4989 struct qed_ptt *p_ptt,
4992 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4993 struct idle_chk_data *idle_chk = &dev_data->idle_chk;
4994 enum dbg_status status;
4998 status = qed_dbg_dev_init(p_hwfn);
4999 if (status != DBG_STATUS_OK)
5002 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5003 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5004 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5005 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5006 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5008 if (!idle_chk->buf_size_set) {
5009 idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5010 p_ptt, NULL, false);
5011 idle_chk->buf_size_set = true;
5014 *buf_size = idle_chk->buf_size;
5016 return DBG_STATUS_OK;
5019 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5020 struct qed_ptt *p_ptt,
5022 u32 buf_size_in_dwords,
5023 u32 *num_dumped_dwords)
5025 u32 needed_buf_size_in_dwords;
5026 enum dbg_status status;
5028 *num_dumped_dwords = 0;
5030 status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5032 &needed_buf_size_in_dwords);
5033 if (status != DBG_STATUS_OK)
5036 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5037 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5039 /* Update reset state */
5040 qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
5041 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5043 /* Idle Check Dump */
5044 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5046 /* Revert GRC params to their default */
5047 qed_dbg_grc_set_params_default(p_hwfn);
5049 return DBG_STATUS_OK;
5052 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5053 struct qed_ptt *p_ptt,
5056 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5060 if (status != DBG_STATUS_OK)
5063 return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5066 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5067 struct qed_ptt *p_ptt,
5069 u32 buf_size_in_dwords,
5070 u32 *num_dumped_dwords)
5072 u32 needed_buf_size_in_dwords;
5073 enum dbg_status status;
5076 qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5078 &needed_buf_size_in_dwords);
5079 if (status != DBG_STATUS_OK && status !=
5080 DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5083 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5084 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5086 /* Update reset state */
5087 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5090 status = qed_mcp_trace_dump(p_hwfn,
5091 p_ptt, dump_buf, true, num_dumped_dwords);
5093 /* Revert GRC params to their default */
5094 qed_dbg_grc_set_params_default(p_hwfn);
5099 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5100 struct qed_ptt *p_ptt,
5103 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5107 if (status != DBG_STATUS_OK)
5110 return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5113 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5114 struct qed_ptt *p_ptt,
5116 u32 buf_size_in_dwords,
5117 u32 *num_dumped_dwords)
5119 u32 needed_buf_size_in_dwords;
5120 enum dbg_status status;
5122 *num_dumped_dwords = 0;
5124 status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5126 &needed_buf_size_in_dwords);
5127 if (status != DBG_STATUS_OK)
5130 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5131 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5133 /* Update reset state */
5134 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5136 status = qed_reg_fifo_dump(p_hwfn,
5137 p_ptt, dump_buf, true, num_dumped_dwords);
5139 /* Revert GRC params to their default */
5140 qed_dbg_grc_set_params_default(p_hwfn);
5145 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5146 struct qed_ptt *p_ptt,
5149 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5153 if (status != DBG_STATUS_OK)
5156 return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5159 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5160 struct qed_ptt *p_ptt,
5162 u32 buf_size_in_dwords,
5163 u32 *num_dumped_dwords)
5165 u32 needed_buf_size_in_dwords;
5166 enum dbg_status status;
5168 *num_dumped_dwords = 0;
5170 status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5172 &needed_buf_size_in_dwords);
5173 if (status != DBG_STATUS_OK)
5176 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5177 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5179 /* Update reset state */
5180 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5182 status = qed_igu_fifo_dump(p_hwfn,
5183 p_ptt, dump_buf, true, num_dumped_dwords);
5184 /* Revert GRC params to their default */
5185 qed_dbg_grc_set_params_default(p_hwfn);
5191 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5192 struct qed_ptt *p_ptt,
5195 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5199 if (status != DBG_STATUS_OK)
5202 return qed_protection_override_dump(p_hwfn,
5203 p_ptt, NULL, false, buf_size);
5206 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5207 struct qed_ptt *p_ptt,
5209 u32 buf_size_in_dwords,
5210 u32 *num_dumped_dwords)
5212 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5213 enum dbg_status status;
5215 *num_dumped_dwords = 0;
5218 qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5221 if (status != DBG_STATUS_OK)
5224 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5225 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5227 /* Update reset state */
5228 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5230 status = qed_protection_override_dump(p_hwfn,
5233 true, num_dumped_dwords);
5235 /* Revert GRC params to their default */
5236 qed_dbg_grc_set_params_default(p_hwfn);
5241 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5242 struct qed_ptt *p_ptt,
5245 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5249 if (status != DBG_STATUS_OK)
5252 /* Update reset state */
5253 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5255 *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5257 return DBG_STATUS_OK;
5260 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5261 struct qed_ptt *p_ptt,
5263 u32 buf_size_in_dwords,
5264 u32 *num_dumped_dwords)
5266 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5267 enum dbg_status status;
5269 *num_dumped_dwords = 0;
5272 qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5275 if (status != DBG_STATUS_OK)
5278 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5279 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5281 *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5283 /* Revert GRC params to their default */
5284 qed_dbg_grc_set_params_default(p_hwfn);
5286 return DBG_STATUS_OK;
5289 static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5290 struct qed_ptt *p_ptt,
5293 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5297 if (status != DBG_STATUS_OK)
5300 *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
5302 return DBG_STATUS_OK;
5305 static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
5306 struct qed_ptt *p_ptt,
5308 u32 buf_size_in_dwords,
5309 u32 *num_dumped_dwords)
5311 u32 needed_buf_size_in_dwords;
5312 enum dbg_status status;
5314 *num_dumped_dwords = 0;
5316 status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
5318 &needed_buf_size_in_dwords);
5319 if (status != DBG_STATUS_OK)
5322 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5323 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5325 *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
5327 /* Reveret GRC params to their default */
5328 qed_dbg_grc_set_params_default(p_hwfn);
5330 return DBG_STATUS_OK;
5333 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5334 struct qed_ptt *p_ptt,
5335 enum block_id block_id,
5336 enum dbg_attn_type attn_type,
5338 struct dbg_attn_block_result *results)
5340 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5341 u8 reg_idx, num_attn_regs, num_result_regs = 0;
5342 const struct dbg_attn_reg *attn_reg_arr;
5344 if (status != DBG_STATUS_OK)
5347 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5348 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5349 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5350 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5352 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
5354 attn_type, &num_attn_regs);
5356 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5357 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5358 struct dbg_attn_reg_result *reg_result;
5359 u32 sts_addr, sts_val;
5360 u16 modes_buf_offset;
5364 eval_mode = GET_FIELD(reg_data->mode.data,
5365 DBG_MODE_HDR_EVAL_MODE) > 0;
5366 modes_buf_offset = GET_FIELD(reg_data->mode.data,
5367 DBG_MODE_HDR_MODES_BUF_OFFSET);
5368 if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5371 /* Mode match - read attention status register */
5372 sts_addr = DWORDS_TO_BYTES(clear_status ?
5373 reg_data->sts_clr_address :
5374 GET_FIELD(reg_data->data,
5375 DBG_ATTN_REG_STS_ADDRESS));
5376 sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5380 /* Non-zero attention status - add to results */
5381 reg_result = &results->reg_results[num_result_regs];
5382 SET_FIELD(reg_result->data,
5383 DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5384 SET_FIELD(reg_result->data,
5385 DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5386 GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5387 reg_result->block_attn_offset = reg_data->block_attn_offset;
5388 reg_result->sts_val = sts_val;
5389 reg_result->mask_val = qed_rd(p_hwfn,
5392 (reg_data->mask_address));
5396 results->block_id = (u8)block_id;
5397 results->names_offset =
5398 qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
5399 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5400 SET_FIELD(results->data,
5401 DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5403 return DBG_STATUS_OK;
5406 /******************************* Data Types **********************************/
5408 /* REG fifo element */
5409 struct reg_fifo_element {
5411 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
5412 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
5413 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
5414 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
5415 #define REG_FIFO_ELEMENT_PF_SHIFT 24
5416 #define REG_FIFO_ELEMENT_PF_MASK 0xf
5417 #define REG_FIFO_ELEMENT_VF_SHIFT 28
5418 #define REG_FIFO_ELEMENT_VF_MASK 0xff
5419 #define REG_FIFO_ELEMENT_PORT_SHIFT 36
5420 #define REG_FIFO_ELEMENT_PORT_MASK 0x3
5421 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
5422 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
5423 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
5424 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
5425 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43
5426 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf
5427 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47
5428 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
5431 /* REG fifo error element */
5432 struct reg_fifo_err {
5434 const char *err_msg;
5437 /* IGU fifo element */
5438 struct igu_fifo_element {
5440 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
5441 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
5442 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
5443 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
5444 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
5445 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
5446 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
5447 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
5448 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
5449 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
5452 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
5453 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
5454 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
5455 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
5459 struct igu_fifo_wr_data {
5461 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
5462 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
5463 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
5464 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
5465 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
5466 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
5467 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
5468 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
5469 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
5470 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
5471 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
5472 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
5475 struct igu_fifo_cleanup_wr_data {
5477 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
5478 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
5479 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
5480 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
5481 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
5482 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
5483 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
5484 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
5487 /* Protection override element */
5488 struct protection_override_element {
5490 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
5491 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
5492 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
5493 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
5494 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
5495 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
5496 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
5497 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
5498 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
5499 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
5500 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
5501 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
5504 enum igu_fifo_sources {
5518 enum igu_fifo_addr_types {
5519 IGU_ADDR_TYPE_MSIX_MEM,
5520 IGU_ADDR_TYPE_WRITE_PBA,
5521 IGU_ADDR_TYPE_WRITE_INT_ACK,
5522 IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5523 IGU_ADDR_TYPE_READ_INT,
5524 IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5525 IGU_ADDR_TYPE_RESERVED
5528 struct igu_fifo_addr_data {
5533 enum igu_fifo_addr_types type;
5536 /******************************** Constants **********************************/
5538 #define MAX_MSG_LEN 1024
5540 #define MCP_TRACE_MAX_MODULE_LEN 8
5541 #define MCP_TRACE_FORMAT_MAX_PARAMS 3
5542 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5543 (MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
5545 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4
5546 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
5548 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
5550 /***************************** Constant Arrays *******************************/
5552 /* Status string array */
5553 static const char * const s_status_str[] = {
5555 "Operation completed successfully",
5557 /* DBG_STATUS_APP_VERSION_NOT_SET */
5558 "Debug application version wasn't set",
5560 /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5561 "Unsupported debug application version",
5563 /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5564 "The debug block wasn't reset since the last recording",
5566 /* DBG_STATUS_INVALID_ARGS */
5567 "Invalid arguments",
5569 /* DBG_STATUS_OUTPUT_ALREADY_SET */
5570 "The debug output was already set",
5572 /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5573 "Invalid PCI buffer size",
5575 /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5576 "PCI buffer allocation failed",
5578 /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5579 "A PCI buffer wasn't allocated",
5581 /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
5582 "The filter/trigger constraint dword offsets are not enabled for recording",
5583 /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
5584 "No matching framing mode",
5586 /* DBG_STATUS_VFC_READ_ERROR */
5587 "Error reading from VFC",
5589 /* DBG_STATUS_STORM_ALREADY_ENABLED */
5590 "The Storm was already enabled",
5592 /* DBG_STATUS_STORM_NOT_ENABLED */
5593 "The specified Storm wasn't enabled",
5595 /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5596 "The block was already enabled",
5598 /* DBG_STATUS_BLOCK_NOT_ENABLED */
5599 "The specified block wasn't enabled",
5601 /* DBG_STATUS_NO_INPUT_ENABLED */
5602 "No input was enabled for recording",
5604 /* DBG_STATUS_NO_FILTER_TRIGGER_256B */
5605 "Filters and triggers are not allowed in E4 256-bit mode",
5607 /* DBG_STATUS_FILTER_ALREADY_ENABLED */
5608 "The filter was already enabled",
5610 /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5611 "The trigger was already enabled",
5613 /* DBG_STATUS_TRIGGER_NOT_ENABLED */
5614 "The trigger wasn't enabled",
5616 /* DBG_STATUS_CANT_ADD_CONSTRAINT */
5617 "A constraint can be added only after a filter was enabled or a trigger state was added",
5619 /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5620 "Cannot add more than 3 trigger states",
5622 /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5623 "Cannot add more than 4 constraints per filter or trigger state",
5625 /* DBG_STATUS_RECORDING_NOT_STARTED */
5626 "The recording wasn't started",
5628 /* DBG_STATUS_DATA_DIDNT_TRIGGER */
5629 "A trigger was configured, but it didn't trigger",
5631 /* DBG_STATUS_NO_DATA_RECORDED */
5632 "No data was recorded",
5634 /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5635 "Dump buffer is too small",
5637 /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5638 "Dumped data is not aligned to chunks",
5640 /* DBG_STATUS_UNKNOWN_CHIP */
5643 /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5644 "Failed allocating virtual memory",
5646 /* DBG_STATUS_BLOCK_IN_RESET */
5647 "The input block is in reset",
5649 /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5650 "Invalid MCP trace signature found in NVRAM",
5652 /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5653 "Invalid bundle ID found in NVRAM",
5655 /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5656 "Failed getting NVRAM image",
5658 /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5659 "NVRAM image is not dword-aligned",
5661 /* DBG_STATUS_NVRAM_READ_FAILED */
5662 "Failed reading from NVRAM",
5664 /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5665 "Idle check parsing failed",
5667 /* DBG_STATUS_MCP_TRACE_BAD_DATA */
5668 "MCP Trace data is corrupt",
5670 /* DBG_STATUS_MCP_TRACE_NO_META */
5671 "Dump doesn't contain meta data - it must be provided in image file",
5673 /* DBG_STATUS_MCP_COULD_NOT_HALT */
5674 "Failed to halt MCP",
5676 /* DBG_STATUS_MCP_COULD_NOT_RESUME */
5677 "Failed to resume MCP after halt",
5679 /* DBG_STATUS_RESERVED0 */
5682 /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5683 "Failed to empty SEMI sync FIFO",
5685 /* DBG_STATUS_IGU_FIFO_BAD_DATA */
5686 "IGU FIFO data is corrupt",
5688 /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5689 "MCP failed to mask parities",
5691 /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5692 "FW Asserts parsing failed",
5694 /* DBG_STATUS_REG_FIFO_BAD_DATA */
5695 "GRC FIFO data is corrupt",
5697 /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5698 "Protection Override data is corrupt",
5700 /* DBG_STATUS_DBG_ARRAY_NOT_SET */
5701 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5703 /* DBG_STATUS_RESERVED1 */
5706 /* DBG_STATUS_NON_MATCHING_LINES */
5707 "Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
5709 /* DBG_STATUS_INSUFFICIENT_HW_IDS */
5710 "Insufficient HW IDs. Try to record less Storms/blocks",
5712 /* DBG_STATUS_DBG_BUS_IN_USE */
5713 "The debug bus is in use",
5715 /* DBG_STATUS_INVALID_STORM_DBG_MODE */
5716 "The storm debug mode is not supported in the current chip",
5718 /* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
5719 "Other engine is supported only in BB",
5721 /* DBG_STATUS_FILTER_SINGLE_HW_ID */
5722 "The configured filter mode requires a single Storm/block input",
5724 /* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
5725 "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
5727 /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
5728 "When triggering on Storm data, the Storm to trigger on must be specified"
5731 /* Idle check severity names array */
5732 static const char * const s_idle_chk_severity_str[] = {
5734 "Error if no traffic",
5738 /* MCP Trace level names array */
5739 static const char * const s_mcp_trace_level_str[] = {
5745 /* Access type names array */
5746 static const char * const s_access_strs[] = {
5751 /* Privilege type names array */
5752 static const char * const s_privilege_strs[] = {
5759 /* Protection type names array */
5760 static const char * const s_protection_strs[] = {
5771 /* Master type names array */
5772 static const char * const s_master_strs[] = {
5791 /* REG FIFO error messages array */
5792 static struct reg_fifo_err s_reg_fifo_errors[] = {
5794 {2, "address doesn't belong to any block"},
5795 {4, "reserved address in block or write to read-only address"},
5796 {8, "privilege/protection mismatch"},
5797 {16, "path isolation error"},
5801 /* IGU FIFO sources array */
5802 static const char * const s_igu_fifo_source_strs[] = {
5816 /* IGU FIFO error messages */
5817 static const char * const s_igu_fifo_error_strs[] = {
5820 "function disabled",
5821 "VF sent command to attention address",
5822 "host sent prod update command",
5823 "read of during interrupt register while in MIMD mode",
5824 "access to PXP BAR reserved address",
5825 "producer update command to attention index",
5827 "SB index not valid",
5828 "SB relative index and FID not found",
5830 "command with error flag asserted (PCI error or CAU discard)",
5831 "VF sent cleanup and RF cleanup is disabled",
5832 "cleanup command on type bigger than 4"
5835 /* IGU FIFO address data */
5836 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5837 {0x0, 0x101, "MSI-X Memory", NULL,
5838 IGU_ADDR_TYPE_MSIX_MEM},
5839 {0x102, 0x1ff, "reserved", NULL,
5840 IGU_ADDR_TYPE_RESERVED},
5841 {0x200, 0x200, "Write PBA[0:63]", NULL,
5842 IGU_ADDR_TYPE_WRITE_PBA},
5843 {0x201, 0x201, "Write PBA[64:127]", "reserved",
5844 IGU_ADDR_TYPE_WRITE_PBA},
5845 {0x202, 0x202, "Write PBA[128]", "reserved",
5846 IGU_ADDR_TYPE_WRITE_PBA},
5847 {0x203, 0x3ff, "reserved", NULL,
5848 IGU_ADDR_TYPE_RESERVED},
5849 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5850 IGU_ADDR_TYPE_WRITE_INT_ACK},
5851 {0x5f0, 0x5f0, "Attention bits update", NULL,
5852 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5853 {0x5f1, 0x5f1, "Attention bits set", NULL,
5854 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5855 {0x5f2, 0x5f2, "Attention bits clear", NULL,
5856 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5857 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5858 IGU_ADDR_TYPE_READ_INT},
5859 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5860 IGU_ADDR_TYPE_READ_INT},
5861 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
5862 IGU_ADDR_TYPE_READ_INT},
5863 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
5864 IGU_ADDR_TYPE_READ_INT},
5865 {0x5f7, 0x5ff, "reserved", NULL,
5866 IGU_ADDR_TYPE_RESERVED},
5867 {0x600, 0x7ff, "Producer update", NULL,
5868 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
5871 /******************************** Variables **********************************/
5873 /* Temporary buffer, used for print size calculations */
5874 static char s_temp_buf[MAX_MSG_LEN];
5876 /**************************** Private Functions ******************************/
5878 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
5880 return (a + b) % size;
5883 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
5885 return (size + a - b) % size;
5888 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
5889 * bytes) and returns them as a dword value. the specified buffer offset is
5892 static u32 qed_read_from_cyclic_buf(void *buf,
5894 u32 buf_size, u8 num_bytes_to_read)
5896 u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
5899 val_ptr = (u8 *)&val;
5901 /* Assume running on a LITTLE ENDIAN and the buffer is network order
5902 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
5904 for (i = 0; i < num_bytes_to_read; i++) {
5905 val_ptr[i] = bytes_buf[*offset];
5906 *offset = qed_cyclic_add(*offset, 1, buf_size);
5912 /* Reads and returns the next byte from the specified buffer.
5913 * The specified buffer offset is updated.
5915 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
5917 return ((u8 *)buf)[(*offset)++];
5920 /* Reads and returns the next dword from the specified buffer.
5921 * The specified buffer offset is updated.
5923 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
5925 u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
5932 /* Reads the next string from the specified buffer, and copies it to the
5933 * specified pointer. The specified buffer offset is updated.
5935 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
5937 const char *source_str = &((const char *)buf)[*offset];
5939 strncpy(dest, source_str, size);
5940 dest[size - 1] = '\0';
5944 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
5945 * If the specified buffer in NULL, a temporary buffer pointer is returned.
5947 static char *qed_get_buf_ptr(void *buf, u32 offset)
5949 return buf ? (char *)buf + offset : s_temp_buf;
5952 /* Reads a param from the specified buffer. Returns the number of dwords read.
5953 * If the returned str_param is NULL, the param is numeric and its value is
5954 * returned in num_param.
5955 * Otheriwise, the param is a string and its pointer is returned in str_param.
5957 static u32 qed_read_param(u32 *dump_buf,
5958 const char **param_name,
5959 const char **param_str_val, u32 *param_num_val)
5961 char *char_buf = (char *)dump_buf;
5964 /* Extract param name */
5965 *param_name = char_buf;
5966 offset += strlen(*param_name) + 1;
5968 /* Check param type */
5969 if (*(char_buf + offset++)) {
5971 *param_str_val = char_buf + offset;
5973 offset += strlen(*param_str_val) + 1;
5975 offset += (4 - (offset & 0x3));
5978 *param_str_val = NULL;
5980 offset += (4 - (offset & 0x3));
5981 *param_num_val = *(u32 *)(char_buf + offset);
5985 return (u32)offset / 4;
5988 /* Reads a section header from the specified buffer.
5989 * Returns the number of dwords read.
5991 static u32 qed_read_section_hdr(u32 *dump_buf,
5992 const char **section_name,
5993 u32 *num_section_params)
5995 const char *param_str_val;
5997 return qed_read_param(dump_buf,
5998 section_name, ¶m_str_val, num_section_params);
6001 /* Reads section params from the specified buffer and prints them to the results
6002 * buffer. Returns the number of dwords read.
6004 static u32 qed_print_section_params(u32 *dump_buf,
6005 u32 num_section_params,
6006 char *results_buf, u32 *num_chars_printed)
6008 u32 i, dump_offset = 0, results_offset = 0;
6010 for (i = 0; i < num_section_params; i++) {
6011 const char *param_name, *param_str_val;
6012 u32 param_num_val = 0;
6014 dump_offset += qed_read_param(dump_buf + dump_offset,
6016 ¶m_str_val, ¶m_num_val);
6020 sprintf(qed_get_buf_ptr(results_buf,
6022 "%s: %s\n", param_name, param_str_val);
6023 else if (strcmp(param_name, "fw-timestamp"))
6025 sprintf(qed_get_buf_ptr(results_buf,
6027 "%s: %d\n", param_name, param_num_val);
6030 results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6033 *num_chars_printed = results_offset;
6038 /* Returns the block name that matches the specified block ID,
6039 * or NULL if not found.
6041 static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
6042 enum block_id block_id)
6044 const struct dbg_block_user *block =
6045 (const struct dbg_block_user *)
6046 p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
6048 return (const char *)block->name;
6051 static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
6054 return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6057 /* Parses the idle check rules and returns the number of characters printed.
6058 * In case of parsing error, returns 0.
6060 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6064 bool print_fw_idle_chk,
6066 u32 *num_errors, u32 *num_warnings)
6068 /* Offset in results_buf in bytes */
6069 u32 results_offset = 0;
6077 /* Go over dumped results */
6078 for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6080 const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6081 struct dbg_idle_chk_result_hdr *hdr;
6082 const char *parsing_str, *lsi_msg;
6083 u32 parsing_str_offset;
6087 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6089 (const struct dbg_idle_chk_rule_parsing_data *)
6090 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
6092 parsing_str_offset =
6093 GET_FIELD(rule_parsing_data->data,
6094 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6096 GET_FIELD(rule_parsing_data->data,
6097 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6098 parsing_str = (const char *)
6099 p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
6101 lsi_msg = parsing_str;
6104 if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6107 /* Skip rule header */
6108 dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6110 /* Update errors/warnings count */
6111 if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6112 hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6117 /* Print rule severity */
6119 sprintf(qed_get_buf_ptr(results_buf,
6120 results_offset), "%s: ",
6121 s_idle_chk_severity_str[hdr->severity]);
6123 /* Print rule message */
6125 parsing_str += strlen(parsing_str) + 1;
6127 sprintf(qed_get_buf_ptr(results_buf,
6128 results_offset), "%s.",
6130 print_fw_idle_chk ? parsing_str : lsi_msg);
6131 parsing_str += strlen(parsing_str) + 1;
6133 /* Print register values */
6135 sprintf(qed_get_buf_ptr(results_buf,
6136 results_offset), " Registers:");
6138 i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6140 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6145 (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6146 is_mem = GET_FIELD(reg_hdr->data,
6147 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6148 reg_id = GET_FIELD(reg_hdr->data,
6149 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6151 /* Skip reg header */
6152 dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6154 /* Skip register names until the required reg_id is
6157 for (; reg_id > curr_reg_id;
6159 parsing_str += strlen(parsing_str) + 1);
6162 sprintf(qed_get_buf_ptr(results_buf,
6163 results_offset), " %s",
6165 if (i < hdr->num_dumped_cond_regs && is_mem)
6167 sprintf(qed_get_buf_ptr(results_buf,
6169 "[%d]", hdr->mem_entry_id +
6170 reg_hdr->start_entry);
6172 sprintf(qed_get_buf_ptr(results_buf,
6173 results_offset), "=");
6174 for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6176 sprintf(qed_get_buf_ptr(results_buf,
6179 if (j < reg_hdr->size - 1)
6181 sprintf(qed_get_buf_ptr
6183 results_offset), ",");
6188 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6191 /* Check if end of dump buffer was exceeded */
6192 if (dump_buf > dump_buf_end)
6195 return results_offset;
6198 /* Parses an idle check dump buffer.
6199 * If result_buf is not NULL, the idle check results are printed to it.
6200 * In any case, the required results buffer size is assigned to
6201 * parsed_results_bytes.
6202 * The parsing status is returned.
6204 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6206 u32 num_dumped_dwords,
6208 u32 *parsed_results_bytes,
6212 const char *section_name, *param_name, *param_str_val;
6213 u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6214 u32 num_section_params = 0, num_rules;
6216 /* Offset in results_buf in bytes */
6217 u32 results_offset = 0;
6219 *parsed_results_bytes = 0;
6223 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6224 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6225 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6227 /* Read global_params section */
6228 dump_buf += qed_read_section_hdr(dump_buf,
6229 §ion_name, &num_section_params);
6230 if (strcmp(section_name, "global_params"))
6231 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6233 /* Print global params */
6234 dump_buf += qed_print_section_params(dump_buf,
6236 results_buf, &results_offset);
6238 /* Read idle_chk section */
6239 dump_buf += qed_read_section_hdr(dump_buf,
6240 §ion_name, &num_section_params);
6241 if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6242 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6243 dump_buf += qed_read_param(dump_buf,
6244 ¶m_name, ¶m_str_val, &num_rules);
6245 if (strcmp(param_name, "num_rules"))
6246 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6249 u32 rules_print_size;
6251 /* Print FW output */
6253 sprintf(qed_get_buf_ptr(results_buf,
6255 "FW_IDLE_CHECK:\n");
6257 qed_parse_idle_chk_dump_rules(p_hwfn,
6268 results_offset += rules_print_size;
6269 if (!rules_print_size)
6270 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6272 /* Print LSI output */
6274 sprintf(qed_get_buf_ptr(results_buf,
6276 "\nLSI_IDLE_CHECK:\n");
6278 qed_parse_idle_chk_dump_rules(p_hwfn,
6289 results_offset += rules_print_size;
6290 if (!rules_print_size)
6291 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6294 /* Print errors/warnings count */
6297 sprintf(qed_get_buf_ptr(results_buf,
6299 "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6300 *num_errors, *num_warnings);
6301 else if (*num_warnings)
6303 sprintf(qed_get_buf_ptr(results_buf,
6305 "\nIdle Check completed successfully (with %d warnings)\n",
6309 sprintf(qed_get_buf_ptr(results_buf,
6311 "\nIdle Check completed successfully\n");
6313 /* Add 1 for string NULL termination */
6314 *parsed_results_bytes = results_offset + 1;
6316 return DBG_STATUS_OK;
6319 /* Allocates and fills MCP Trace meta data based on the specified meta data
6321 * Returns debug status code.
6323 static enum dbg_status
6324 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6325 const u32 *meta_buf)
6327 struct dbg_tools_user_data *dev_user_data;
6328 u32 offset = 0, signature, i;
6329 struct mcp_trace_meta *meta;
6332 dev_user_data = qed_dbg_get_user_data(p_hwfn);
6333 meta = &dev_user_data->mcp_trace_meta;
6334 meta_buf_bytes = (u8 *)meta_buf;
6336 /* Free the previous meta before loading a new one. */
6337 if (meta->is_allocated)
6338 qed_mcp_trace_free_meta_data(p_hwfn);
6340 memset(meta, 0, sizeof(*meta));
6342 /* Read first signature */
6343 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6344 if (signature != NVM_MAGIC_VALUE)
6345 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6347 /* Read no. of modules and allocate memory for their pointers */
6348 meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6349 meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6352 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6354 /* Allocate and read all module strings */
6355 for (i = 0; i < meta->modules_num; i++) {
6356 u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6358 *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6359 if (!(*(meta->modules + i))) {
6360 /* Update number of modules to be released */
6361 meta->modules_num = i ? i - 1 : 0;
6362 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6365 qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6366 *(meta->modules + i));
6367 if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6368 (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6371 /* Read second signature */
6372 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6373 if (signature != NVM_MAGIC_VALUE)
6374 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6376 /* Read number of formats and allocate memory for all formats */
6377 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6378 meta->formats = kcalloc(meta->formats_num,
6379 sizeof(struct mcp_trace_format),
6382 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6384 /* Allocate and read all strings */
6385 for (i = 0; i < meta->formats_num; i++) {
6386 struct mcp_trace_format *format_ptr = &meta->formats[i];
6389 format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6391 format_len = GET_MFW_FIELD(format_ptr->data,
6392 MCP_TRACE_FORMAT_LEN);
6393 format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6394 if (!format_ptr->format_str) {
6395 /* Update number of modules to be released */
6396 meta->formats_num = i ? i - 1 : 0;
6397 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6400 qed_read_str_from_buf(meta_buf_bytes,
6402 format_len, format_ptr->format_str);
6405 meta->is_allocated = true;
6406 return DBG_STATUS_OK;
6409 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6410 * are printed to it. The parsing status is returned.
6412 * trace_buf - MCP trace cyclic buffer
6413 * trace_buf_size - MCP trace cyclic buffer size in bytes
6414 * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6416 * data_size - size in bytes of data to parse.
6417 * parsed_buf - destination buffer for parsed data.
6418 * parsed_results_bytes - size of parsed data in bytes.
6420 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6426 u32 *parsed_results_bytes)
6428 struct dbg_tools_user_data *dev_user_data;
6429 struct mcp_trace_meta *meta;
6430 u32 param_mask, param_shift;
6431 enum dbg_status status;
6433 dev_user_data = qed_dbg_get_user_data(p_hwfn);
6434 meta = &dev_user_data->mcp_trace_meta;
6435 *parsed_results_bytes = 0;
6437 if (!meta->is_allocated)
6438 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6440 status = DBG_STATUS_OK;
6443 struct mcp_trace_format *format_ptr;
6444 u8 format_level, format_module;
6445 u32 params[3] = { 0, 0, 0 };
6446 u32 header, format_idx, i;
6448 if (data_size < MFW_TRACE_ENTRY_SIZE)
6449 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6451 header = qed_read_from_cyclic_buf(trace_buf,
6454 MFW_TRACE_ENTRY_SIZE);
6455 data_size -= MFW_TRACE_ENTRY_SIZE;
6456 format_idx = header & MFW_TRACE_EVENTID_MASK;
6458 /* Skip message if its index doesn't exist in the meta data */
6459 if (format_idx >= meta->formats_num) {
6460 u8 format_size = (u8)GET_MFW_FIELD(header,
6461 MFW_TRACE_PRM_SIZE);
6463 if (data_size < format_size)
6464 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6466 data_offset = qed_cyclic_add(data_offset,
6469 data_size -= format_size;
6473 format_ptr = &meta->formats[format_idx];
6476 param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6477 MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
6478 i < MCP_TRACE_FORMAT_MAX_PARAMS;
6479 i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6480 param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6481 /* Extract param size (0..3) */
6482 u8 param_size = (u8)((format_ptr->data & param_mask) >>
6485 /* If the param size is zero, there are no other
6491 /* Size is encoded using 2 bits, where 3 is used to
6494 if (param_size == 3)
6497 if (data_size < param_size)
6498 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6500 params[i] = qed_read_from_cyclic_buf(trace_buf,
6504 data_size -= param_size;
6507 format_level = (u8)GET_MFW_FIELD(format_ptr->data,
6508 MCP_TRACE_FORMAT_LEVEL);
6509 format_module = (u8)GET_MFW_FIELD(format_ptr->data,
6510 MCP_TRACE_FORMAT_MODULE);
6511 if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6512 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6514 /* Print current message to results buffer */
6515 *parsed_results_bytes +=
6516 sprintf(qed_get_buf_ptr(parsed_buf,
6517 *parsed_results_bytes),
6519 s_mcp_trace_level_str[format_level],
6520 meta->modules[format_module]);
6521 *parsed_results_bytes +=
6522 sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6523 format_ptr->format_str,
6524 params[0], params[1], params[2]);
6527 /* Add string NULL terminator */
6528 (*parsed_results_bytes)++;
6533 /* Parses an MCP Trace dump buffer.
6534 * If result_buf is not NULL, the MCP Trace results are printed to it.
6535 * In any case, the required results buffer size is assigned to
6536 * parsed_results_bytes.
6537 * The parsing status is returned.
6539 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6542 u32 *parsed_results_bytes,
6543 bool free_meta_data)
6545 const char *section_name, *param_name, *param_str_val;
6546 u32 data_size, trace_data_dwords, trace_meta_dwords;
6547 u32 offset, results_offset, results_buf_bytes;
6548 u32 param_num_val, num_section_params;
6549 struct mcp_trace *trace;
6550 enum dbg_status status;
6551 const u32 *meta_buf;
6554 *parsed_results_bytes = 0;
6556 /* Read global_params section */
6557 dump_buf += qed_read_section_hdr(dump_buf,
6558 §ion_name, &num_section_params);
6559 if (strcmp(section_name, "global_params"))
6560 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6562 /* Print global params */
6563 dump_buf += qed_print_section_params(dump_buf,
6565 results_buf, &results_offset);
6567 /* Read trace_data section */
6568 dump_buf += qed_read_section_hdr(dump_buf,
6569 §ion_name, &num_section_params);
6570 if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6571 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6572 dump_buf += qed_read_param(dump_buf,
6573 ¶m_name, ¶m_str_val, ¶m_num_val);
6574 if (strcmp(param_name, "size"))
6575 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6576 trace_data_dwords = param_num_val;
6578 /* Prepare trace info */
6579 trace = (struct mcp_trace *)dump_buf;
6580 if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
6581 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6583 trace_buf = (u8 *)dump_buf + sizeof(*trace);
6584 offset = trace->trace_oldest;
6585 data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6586 dump_buf += trace_data_dwords;
6588 /* Read meta_data section */
6589 dump_buf += qed_read_section_hdr(dump_buf,
6590 §ion_name, &num_section_params);
6591 if (strcmp(section_name, "mcp_trace_meta"))
6592 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6593 dump_buf += qed_read_param(dump_buf,
6594 ¶m_name, ¶m_str_val, ¶m_num_val);
6595 if (strcmp(param_name, "size"))
6596 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6597 trace_meta_dwords = param_num_val;
6599 /* Choose meta data buffer */
6600 if (!trace_meta_dwords) {
6601 /* Dump doesn't include meta data */
6602 struct dbg_tools_user_data *dev_user_data =
6603 qed_dbg_get_user_data(p_hwfn);
6605 if (!dev_user_data->mcp_trace_user_meta_buf)
6606 return DBG_STATUS_MCP_TRACE_NO_META;
6608 meta_buf = dev_user_data->mcp_trace_user_meta_buf;
6610 /* Dump includes meta data */
6611 meta_buf = dump_buf;
6614 /* Allocate meta data memory */
6615 status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
6616 if (status != DBG_STATUS_OK)
6619 status = qed_parse_mcp_trace_buf(p_hwfn,
6625 results_buf + results_offset :
6627 &results_buf_bytes);
6628 if (status != DBG_STATUS_OK)
6632 qed_mcp_trace_free_meta_data(p_hwfn);
6634 *parsed_results_bytes = results_offset + results_buf_bytes;
6636 return DBG_STATUS_OK;
6639 /* Parses a Reg FIFO dump buffer.
6640 * If result_buf is not NULL, the Reg FIFO results are printed to it.
6641 * In any case, the required results buffer size is assigned to
6642 * parsed_results_bytes.
6643 * The parsing status is returned.
6645 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6647 u32 *parsed_results_bytes)
6649 const char *section_name, *param_name, *param_str_val;
6650 u32 param_num_val, num_section_params, num_elements;
6651 struct reg_fifo_element *elements;
6652 u8 i, j, err_code, vf_val;
6653 u32 results_offset = 0;
6656 /* Read global_params section */
6657 dump_buf += qed_read_section_hdr(dump_buf,
6658 §ion_name, &num_section_params);
6659 if (strcmp(section_name, "global_params"))
6660 return DBG_STATUS_REG_FIFO_BAD_DATA;
6662 /* Print global params */
6663 dump_buf += qed_print_section_params(dump_buf,
6665 results_buf, &results_offset);
6667 /* Read reg_fifo_data section */
6668 dump_buf += qed_read_section_hdr(dump_buf,
6669 §ion_name, &num_section_params);
6670 if (strcmp(section_name, "reg_fifo_data"))
6671 return DBG_STATUS_REG_FIFO_BAD_DATA;
6672 dump_buf += qed_read_param(dump_buf,
6673 ¶m_name, ¶m_str_val, ¶m_num_val);
6674 if (strcmp(param_name, "size"))
6675 return DBG_STATUS_REG_FIFO_BAD_DATA;
6676 if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6677 return DBG_STATUS_REG_FIFO_BAD_DATA;
6678 num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6679 elements = (struct reg_fifo_element *)dump_buf;
6681 /* Decode elements */
6682 for (i = 0; i < num_elements; i++) {
6683 const char *err_msg = NULL;
6685 /* Discover if element belongs to a VF or a PF */
6686 vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6687 if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6688 sprintf(vf_str, "%s", "N/A");
6690 sprintf(vf_str, "%d", vf_val);
6692 /* Find error message */
6693 err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
6694 for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
6695 if (err_code == s_reg_fifo_errors[j].err_code)
6696 err_msg = s_reg_fifo_errors[j].err_msg;
6698 /* Add parsed element to parsed buffer */
6700 sprintf(qed_get_buf_ptr(results_buf,
6702 "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
6704 (u32)GET_FIELD(elements[i].data,
6705 REG_FIFO_ELEMENT_ADDRESS) *
6706 REG_FIFO_ELEMENT_ADDR_FACTOR,
6707 s_access_strs[GET_FIELD(elements[i].data,
6708 REG_FIFO_ELEMENT_ACCESS)],
6709 (u32)GET_FIELD(elements[i].data,
6710 REG_FIFO_ELEMENT_PF),
6712 (u32)GET_FIELD(elements[i].data,
6713 REG_FIFO_ELEMENT_PORT),
6714 s_privilege_strs[GET_FIELD(elements[i].data,
6715 REG_FIFO_ELEMENT_PRIVILEGE)],
6716 s_protection_strs[GET_FIELD(elements[i].data,
6717 REG_FIFO_ELEMENT_PROTECTION)],
6718 s_master_strs[GET_FIELD(elements[i].data,
6719 REG_FIFO_ELEMENT_MASTER)],
6720 err_msg ? err_msg : "unknown error code");
6723 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6725 "fifo contained %d elements", num_elements);
6727 /* Add 1 for string NULL termination */
6728 *parsed_results_bytes = results_offset + 1;
6730 return DBG_STATUS_OK;
6733 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6736 u32 *results_offset)
6738 const struct igu_fifo_addr_data *found_addr = NULL;
6739 u8 source, err_type, i, is_cleanup;
6740 char parsed_addr_data[32];
6741 char parsed_wr_data[256];
6742 u32 wr_data, prod_cons;
6743 bool is_wr_cmd, is_pf;
6747 /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6750 dword12 = ((u64)element->dword2 << 32) | element->dword1;
6751 is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6752 is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6753 cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6754 source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6755 err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6757 if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6758 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6759 if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6760 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6762 /* Find address data */
6763 for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6764 const struct igu_fifo_addr_data *curr_addr =
6765 &s_igu_fifo_addr_data[i];
6767 if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6768 curr_addr->end_addr)
6769 found_addr = curr_addr;
6773 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6775 /* Prepare parsed address data */
6776 switch (found_addr->type) {
6777 case IGU_ADDR_TYPE_MSIX_MEM:
6778 sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6780 case IGU_ADDR_TYPE_WRITE_INT_ACK:
6781 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6782 sprintf(parsed_addr_data,
6783 " SB = 0x%x", cmd_addr - found_addr->start_addr);
6786 parsed_addr_data[0] = '\0';
6790 parsed_wr_data[0] = '\0';
6794 /* Prepare parsed write data */
6795 wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6796 prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
6797 is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
6799 if (source == IGU_SRC_ATTN) {
6800 sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
6803 u8 cleanup_val, cleanup_type;
6807 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6810 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6812 sprintf(parsed_wr_data,
6813 "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
6814 cleanup_val ? "set" : "clear",
6817 u8 update_flag, en_dis_int_for_sb, segment;
6820 update_flag = GET_FIELD(wr_data,
6821 IGU_FIFO_WR_DATA_UPDATE_FLAG);
6824 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6825 segment = GET_FIELD(wr_data,
6826 IGU_FIFO_WR_DATA_SEGMENT);
6827 timer_mask = GET_FIELD(wr_data,
6828 IGU_FIFO_WR_DATA_TIMER_MASK);
6830 sprintf(parsed_wr_data,
6831 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
6833 update_flag ? "update" : "nop",
6835 (en_dis_int_for_sb == 1 ? "disable" : "nop") :
6837 segment ? "attn" : "regular",
6842 /* Add parsed element to parsed buffer */
6843 *results_offset += sprintf(qed_get_buf_ptr(results_buf,
6845 "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
6846 element->dword2, element->dword1,
6848 is_pf ? "pf" : "vf",
6849 GET_FIELD(element->dword0,
6850 IGU_FIFO_ELEMENT_DWORD0_FID),
6851 s_igu_fifo_source_strs[source],
6852 is_wr_cmd ? "wr" : "rd",
6854 (!is_pf && found_addr->vf_desc)
6855 ? found_addr->vf_desc
6859 s_igu_fifo_error_strs[err_type]);
6861 return DBG_STATUS_OK;
6864 /* Parses an IGU FIFO dump buffer.
6865 * If result_buf is not NULL, the IGU FIFO results are printed to it.
6866 * In any case, the required results buffer size is assigned to
6867 * parsed_results_bytes.
6868 * The parsing status is returned.
6870 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
6872 u32 *parsed_results_bytes)
6874 const char *section_name, *param_name, *param_str_val;
6875 u32 param_num_val, num_section_params, num_elements;
6876 struct igu_fifo_element *elements;
6877 enum dbg_status status;
6878 u32 results_offset = 0;
6881 /* Read global_params section */
6882 dump_buf += qed_read_section_hdr(dump_buf,
6883 §ion_name, &num_section_params);
6884 if (strcmp(section_name, "global_params"))
6885 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6887 /* Print global params */
6888 dump_buf += qed_print_section_params(dump_buf,
6890 results_buf, &results_offset);
6892 /* Read igu_fifo_data section */
6893 dump_buf += qed_read_section_hdr(dump_buf,
6894 §ion_name, &num_section_params);
6895 if (strcmp(section_name, "igu_fifo_data"))
6896 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6897 dump_buf += qed_read_param(dump_buf,
6898 ¶m_name, ¶m_str_val, ¶m_num_val);
6899 if (strcmp(param_name, "size"))
6900 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6901 if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
6902 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6903 num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
6904 elements = (struct igu_fifo_element *)dump_buf;
6906 /* Decode elements */
6907 for (i = 0; i < num_elements; i++) {
6908 status = qed_parse_igu_fifo_element(&elements[i],
6911 if (status != DBG_STATUS_OK)
6915 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6917 "fifo contained %d elements", num_elements);
6919 /* Add 1 for string NULL termination */
6920 *parsed_results_bytes = results_offset + 1;
6922 return DBG_STATUS_OK;
6925 static enum dbg_status
6926 qed_parse_protection_override_dump(u32 *dump_buf,
6928 u32 *parsed_results_bytes)
6930 const char *section_name, *param_name, *param_str_val;
6931 u32 param_num_val, num_section_params, num_elements;
6932 struct protection_override_element *elements;
6933 u32 results_offset = 0;
6936 /* Read global_params section */
6937 dump_buf += qed_read_section_hdr(dump_buf,
6938 §ion_name, &num_section_params);
6939 if (strcmp(section_name, "global_params"))
6940 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6942 /* Print global params */
6943 dump_buf += qed_print_section_params(dump_buf,
6945 results_buf, &results_offset);
6947 /* Read protection_override_data section */
6948 dump_buf += qed_read_section_hdr(dump_buf,
6949 §ion_name, &num_section_params);
6950 if (strcmp(section_name, "protection_override_data"))
6951 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6952 dump_buf += qed_read_param(dump_buf,
6953 ¶m_name, ¶m_str_val, ¶m_num_val);
6954 if (strcmp(param_name, "size"))
6955 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6956 if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
6957 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6958 num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
6959 elements = (struct protection_override_element *)dump_buf;
6961 /* Decode elements */
6962 for (i = 0; i < num_elements; i++) {
6963 u32 address = GET_FIELD(elements[i].data,
6964 PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
6965 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
6968 sprintf(qed_get_buf_ptr(results_buf,
6970 "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
6972 (u32)GET_FIELD(elements[i].data,
6973 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
6974 (u32)GET_FIELD(elements[i].data,
6975 PROTECTION_OVERRIDE_ELEMENT_READ),
6976 (u32)GET_FIELD(elements[i].data,
6977 PROTECTION_OVERRIDE_ELEMENT_WRITE),
6978 s_protection_strs[GET_FIELD(elements[i].data,
6979 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
6980 s_protection_strs[GET_FIELD(elements[i].data,
6981 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
6984 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6986 "protection override contained %d elements",
6989 /* Add 1 for string NULL termination */
6990 *parsed_results_bytes = results_offset + 1;
6992 return DBG_STATUS_OK;
6995 /* Parses a FW Asserts dump buffer.
6996 * If result_buf is not NULL, the FW Asserts results are printed to it.
6997 * In any case, the required results buffer size is assigned to
6998 * parsed_results_bytes.
6999 * The parsing status is returned.
7001 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
7003 u32 *parsed_results_bytes)
7005 u32 num_section_params, param_num_val, i, results_offset = 0;
7006 const char *param_name, *param_str_val, *section_name;
7007 bool last_section_found = false;
7009 *parsed_results_bytes = 0;
7011 /* Read global_params section */
7012 dump_buf += qed_read_section_hdr(dump_buf,
7013 §ion_name, &num_section_params);
7014 if (strcmp(section_name, "global_params"))
7015 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7017 /* Print global params */
7018 dump_buf += qed_print_section_params(dump_buf,
7020 results_buf, &results_offset);
7022 while (!last_section_found) {
7023 dump_buf += qed_read_section_hdr(dump_buf,
7025 &num_section_params);
7026 if (!strcmp(section_name, "fw_asserts")) {
7027 /* Extract params */
7028 const char *storm_letter = NULL;
7029 u32 storm_dump_size = 0;
7031 for (i = 0; i < num_section_params; i++) {
7032 dump_buf += qed_read_param(dump_buf,
7036 if (!strcmp(param_name, "storm"))
7037 storm_letter = param_str_val;
7038 else if (!strcmp(param_name, "size"))
7039 storm_dump_size = param_num_val;
7042 DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7045 if (!storm_letter || !storm_dump_size)
7046 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7050 sprintf(qed_get_buf_ptr(results_buf,
7052 "\n%sSTORM_ASSERT: size=%d\n",
7053 storm_letter, storm_dump_size);
7054 for (i = 0; i < storm_dump_size; i++, dump_buf++)
7056 sprintf(qed_get_buf_ptr(results_buf,
7058 "%08x\n", *dump_buf);
7059 } else if (!strcmp(section_name, "last")) {
7060 last_section_found = true;
7062 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7066 /* Add 1 for string NULL termination */
7067 *parsed_results_bytes = results_offset + 1;
7069 return DBG_STATUS_OK;
7072 /***************************** Public Functions *******************************/
7074 enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
7075 const u8 * const bin_ptr)
7077 struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
7080 /* Convert binary data to debug arrays */
7081 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
7082 qed_set_dbg_bin_buf(p_hwfn,
7083 (enum bin_dbg_buffer_type)buf_id,
7084 (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
7085 buf_hdrs[buf_id].length);
7087 return DBG_STATUS_OK;
7090 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
7091 void **user_data_ptr)
7093 *user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
7095 if (!(*user_data_ptr))
7096 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7098 return DBG_STATUS_OK;
7101 const char *qed_dbg_get_status_str(enum dbg_status status)
7104 MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7107 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7109 u32 num_dumped_dwords,
7110 u32 *results_buf_size)
7112 u32 num_errors, num_warnings;
7114 return qed_parse_idle_chk_dump(p_hwfn,
7119 &num_errors, &num_warnings);
7122 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7124 u32 num_dumped_dwords,
7129 u32 parsed_buf_size;
7131 return qed_parse_idle_chk_dump(p_hwfn,
7136 num_errors, num_warnings);
7139 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7140 const u32 *meta_buf)
7142 struct dbg_tools_user_data *dev_user_data =
7143 qed_dbg_get_user_data(p_hwfn);
7145 dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7148 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7150 u32 num_dumped_dwords,
7151 u32 *results_buf_size)
7153 return qed_parse_mcp_trace_dump(p_hwfn,
7154 dump_buf, NULL, results_buf_size, true);
7157 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7159 u32 num_dumped_dwords,
7162 u32 parsed_buf_size;
7164 return qed_parse_mcp_trace_dump(p_hwfn,
7166 results_buf, &parsed_buf_size, true);
7169 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7173 u32 parsed_buf_size;
7175 return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7176 &parsed_buf_size, false);
7179 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7181 u32 num_dumped_bytes,
7184 u32 parsed_results_bytes;
7186 return qed_parse_mcp_trace_buf(p_hwfn,
7191 results_buf, &parsed_results_bytes);
7194 /* Frees the specified MCP Trace meta data */
7195 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7197 struct dbg_tools_user_data *dev_user_data;
7198 struct mcp_trace_meta *meta;
7201 dev_user_data = qed_dbg_get_user_data(p_hwfn);
7202 meta = &dev_user_data->mcp_trace_meta;
7203 if (!meta->is_allocated)
7206 /* Release modules */
7207 if (meta->modules) {
7208 for (i = 0; i < meta->modules_num; i++)
7209 kfree(meta->modules[i]);
7210 kfree(meta->modules);
7213 /* Release formats */
7214 if (meta->formats) {
7215 for (i = 0; i < meta->formats_num; i++)
7216 kfree(meta->formats[i].format_str);
7217 kfree(meta->formats);
7220 meta->is_allocated = false;
7223 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7225 u32 num_dumped_dwords,
7226 u32 *results_buf_size)
7228 return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7231 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7233 u32 num_dumped_dwords,
7236 u32 parsed_buf_size;
7238 return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7241 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7243 u32 num_dumped_dwords,
7244 u32 *results_buf_size)
7246 return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7249 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7251 u32 num_dumped_dwords,
7254 u32 parsed_buf_size;
7256 return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7260 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7262 u32 num_dumped_dwords,
7263 u32 *results_buf_size)
7265 return qed_parse_protection_override_dump(dump_buf,
7266 NULL, results_buf_size);
7269 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7271 u32 num_dumped_dwords,
7274 u32 parsed_buf_size;
7276 return qed_parse_protection_override_dump(dump_buf,
7281 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7283 u32 num_dumped_dwords,
7284 u32 *results_buf_size)
7286 return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7289 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7291 u32 num_dumped_dwords,
7294 u32 parsed_buf_size;
7296 return qed_parse_fw_asserts_dump(dump_buf,
7297 results_buf, &parsed_buf_size);
7300 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7301 struct dbg_attn_block_result *results)
7303 const u32 *block_attn_name_offsets;
7304 const char *attn_name_base;
7305 const char *block_name;
7306 enum dbg_attn_type attn_type;
7309 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7310 attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7311 block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
7313 return DBG_STATUS_INVALID_ARGS;
7315 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7316 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7317 !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7318 return DBG_STATUS_DBG_ARRAY_NOT_SET;
7320 block_attn_name_offsets =
7321 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
7322 results->names_offset;
7324 attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
7326 /* Go over registers with a non-zero attention status */
7327 for (i = 0; i < num_regs; i++) {
7328 struct dbg_attn_bit_mapping *bit_mapping;
7329 struct dbg_attn_reg_result *reg_result;
7330 u8 num_reg_attn, bit_idx = 0;
7332 reg_result = &results->reg_results[i];
7333 num_reg_attn = GET_FIELD(reg_result->data,
7334 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7335 bit_mapping = (struct dbg_attn_bit_mapping *)
7336 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
7337 reg_result->block_attn_offset;
7339 /* Go over attention status bits */
7340 for (j = 0; j < num_reg_attn; j++, bit_idx++) {
7341 u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7342 DBG_ATTN_BIT_MAPPING_VAL);
7343 const char *attn_name, *attn_type_str, *masked_str;
7344 u32 attn_name_offset;
7347 /* Check if bit mask should be advanced (due to unused
7350 if (GET_FIELD(bit_mapping[j].data,
7351 DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7352 bit_idx += (u8)attn_idx_val;
7356 /* Check current bit index */
7357 if (!(reg_result->sts_val & BIT(bit_idx)))
7360 /* An attention bit with value=1 was found
7361 * Find attention name
7364 block_attn_name_offsets[attn_idx_val];
7365 attn_name = attn_name_base + attn_name_offset;
7368 ATTN_TYPE_INTERRUPT ? "Interrupt" :
7370 masked_str = reg_result->mask_val & BIT(bit_idx) ?
7372 sts_addr = GET_FIELD(reg_result->data,
7373 DBG_ATTN_REG_RESULT_STS_ADDRESS);
7375 "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7376 block_name, attn_type_str, attn_name,
7377 sts_addr * 4, bit_idx, masked_str);
7381 return DBG_STATUS_OK;
7384 static DEFINE_MUTEX(qed_dbg_lock);
7386 /* Wrapper for unifying the idle_chk and mcp_trace api */
7387 static enum dbg_status
7388 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7390 u32 num_dumped_dwords,
7393 u32 num_errors, num_warnnings;
7395 return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7396 results_buf, &num_errors,
7400 /* Feature meta data lookup table */
7403 enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7404 struct qed_ptt *p_ptt, u32 *size);
7405 enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7406 struct qed_ptt *p_ptt, u32 *dump_buf,
7407 u32 buf_size, u32 *dumped_dwords);
7408 enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7409 u32 *dump_buf, u32 num_dumped_dwords,
7411 enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7413 u32 num_dumped_dwords,
7414 u32 *results_buf_size);
7415 } qed_features_lookup[] = {
7417 "grc", qed_dbg_grc_get_dump_buf_size,
7418 qed_dbg_grc_dump, NULL, NULL}, {
7420 qed_dbg_idle_chk_get_dump_buf_size,
7421 qed_dbg_idle_chk_dump,
7422 qed_print_idle_chk_results_wrapper,
7423 qed_get_idle_chk_results_buf_size}, {
7425 qed_dbg_mcp_trace_get_dump_buf_size,
7426 qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7427 qed_get_mcp_trace_results_buf_size}, {
7429 qed_dbg_reg_fifo_get_dump_buf_size,
7430 qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7431 qed_get_reg_fifo_results_buf_size}, {
7433 qed_dbg_igu_fifo_get_dump_buf_size,
7434 qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7435 qed_get_igu_fifo_results_buf_size}, {
7436 "protection_override",
7437 qed_dbg_protection_override_get_dump_buf_size,
7438 qed_dbg_protection_override_dump,
7439 qed_print_protection_override_results,
7440 qed_get_protection_override_results_buf_size}, {
7442 qed_dbg_fw_asserts_get_dump_buf_size,
7443 qed_dbg_fw_asserts_dump,
7444 qed_print_fw_asserts_results,
7445 qed_get_fw_asserts_results_buf_size}, {
7447 qed_dbg_ilt_get_dump_buf_size,
7448 qed_dbg_ilt_dump, NULL, NULL},};
7450 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7452 u32 i, precision = 80;
7457 pr_notice("\n%.*s", precision, p_text_buf);
7458 for (i = precision; i < text_size; i += precision)
7459 pr_cont("%.*s", precision, p_text_buf + i);
7463 #define QED_RESULTS_BUF_MIN_SIZE 16
7464 /* Generic function for decoding debug feature info */
7465 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7466 enum qed_dbg_features feature_idx)
7468 struct qed_dbg_feature *feature =
7469 &p_hwfn->cdev->dbg_features[feature_idx];
7470 u32 text_size_bytes, null_char_pos, i;
7474 /* Check if feature supports formatting capability */
7475 if (!qed_features_lookup[feature_idx].results_buf_size)
7476 return DBG_STATUS_OK;
7478 /* Obtain size of formatted output */
7479 rc = qed_features_lookup[feature_idx].
7480 results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7481 feature->dumped_dwords, &text_size_bytes);
7482 if (rc != DBG_STATUS_OK)
7485 /* Make sure that the allocated size is a multiple of dword (4 bytes) */
7486 null_char_pos = text_size_bytes - 1;
7487 text_size_bytes = (text_size_bytes + 3) & ~0x3;
7489 if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7490 DP_NOTICE(p_hwfn->cdev,
7491 "formatted size of feature was too small %d. Aborting\n",
7493 return DBG_STATUS_INVALID_ARGS;
7496 /* Allocate temp text buf */
7497 text_buf = vzalloc(text_size_bytes);
7499 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7501 /* Decode feature opcodes to string on temp buf */
7502 rc = qed_features_lookup[feature_idx].
7503 print_results(p_hwfn, (u32 *)feature->dump_buf,
7504 feature->dumped_dwords, text_buf);
7505 if (rc != DBG_STATUS_OK) {
7510 /* Replace the original null character with a '\n' character.
7511 * The bytes that were added as a result of the dword alignment are also
7512 * padded with '\n' characters.
7514 for (i = null_char_pos; i < text_size_bytes; i++)
7517 /* Dump printable feature to log */
7518 if (p_hwfn->cdev->print_dbg_data)
7519 qed_dbg_print_feature(text_buf, text_size_bytes);
7521 /* Just return the original binary buffer if requested */
7522 if (p_hwfn->cdev->dbg_bin_dump) {
7524 return DBG_STATUS_OK;
7527 /* Free the old dump_buf and point the dump_buf to the newly allocagted
7528 * and formatted text buffer.
7530 vfree(feature->dump_buf);
7531 feature->dump_buf = text_buf;
7532 feature->buf_size = text_size_bytes;
7533 feature->dumped_dwords = text_size_bytes / 4;
7537 #define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF
7539 /* Generic function for performing the dump of a debug feature. */
7540 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7541 struct qed_ptt *p_ptt,
7542 enum qed_dbg_features feature_idx)
7544 struct qed_dbg_feature *feature =
7545 &p_hwfn->cdev->dbg_features[feature_idx];
7546 u32 buf_size_dwords;
7549 DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7550 qed_features_lookup[feature_idx].name);
7552 /* Dump_buf was already allocated need to free (this can happen if dump
7553 * was called but file was never read).
7554 * We can't use the buffer as is since size may have changed.
7556 if (feature->dump_buf) {
7557 vfree(feature->dump_buf);
7558 feature->dump_buf = NULL;
7561 /* Get buffer size from hsi, allocate accordingly, and perform the
7564 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7566 if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7569 if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
7570 feature->buf_size = 0;
7571 DP_NOTICE(p_hwfn->cdev,
7572 "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
7573 qed_features_lookup[feature_idx].name,
7574 buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
7576 return DBG_STATUS_OK;
7579 feature->buf_size = buf_size_dwords * sizeof(u32);
7580 feature->dump_buf = vmalloc(feature->buf_size);
7581 if (!feature->dump_buf)
7582 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7584 rc = qed_features_lookup[feature_idx].
7585 perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7586 feature->buf_size / sizeof(u32),
7587 &feature->dumped_dwords);
7589 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7590 * In this case the buffer holds valid binary data, but we wont able
7591 * to parse it (since parsing relies on data in NVRAM which is only
7592 * accessible when MFW is responsive). skip the formatting but return
7593 * success so that binary data is provided.
7595 if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7596 return DBG_STATUS_OK;
7598 if (rc != DBG_STATUS_OK)
7602 rc = format_feature(p_hwfn, feature_idx);
7606 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7608 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7611 int qed_dbg_grc_size(struct qed_dev *cdev)
7613 return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7616 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7618 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7622 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7624 return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7627 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7629 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7633 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7635 return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7638 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7640 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7644 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7646 return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7649 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7650 enum qed_nvm_images image_id, u32 *length)
7652 struct qed_nvm_image_att image_att;
7656 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7660 *length = image_att.length;
7665 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7666 u32 *num_dumped_bytes,
7667 enum qed_nvm_images image_id)
7669 struct qed_hwfn *p_hwfn =
7670 &cdev->hwfns[cdev->engine_for_debug];
7674 *num_dumped_bytes = 0;
7675 rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7679 DP_NOTICE(p_hwfn->cdev,
7680 "Collecting a debug feature [\"nvram image %d\"]\n",
7683 len_rounded = roundup(len_rounded, sizeof(u32));
7684 rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7688 /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7689 if (image_id != QED_NVM_IMAGE_NVM_META)
7690 cpu_to_be32_array((__force __be32 *)buffer,
7691 (const u32 *)buffer,
7692 len_rounded / sizeof(u32));
7694 *num_dumped_bytes = len_rounded;
7699 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7700 u32 *num_dumped_bytes)
7702 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7706 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7708 return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7711 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7712 u32 *num_dumped_bytes)
7714 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7718 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7720 return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7723 int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7725 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
7728 int qed_dbg_ilt_size(struct qed_dev *cdev)
7730 return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
7733 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7734 u32 *num_dumped_bytes)
7736 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7740 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7742 return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7745 /* Defines the amount of bytes allocated for recording the length of debugfs
7748 #define REGDUMP_HEADER_SIZE sizeof(u32)
7749 #define REGDUMP_HEADER_SIZE_SHIFT 0
7750 #define REGDUMP_HEADER_SIZE_MASK 0xffffff
7751 #define REGDUMP_HEADER_FEATURE_SHIFT 24
7752 #define REGDUMP_HEADER_FEATURE_MASK 0x1f
7753 #define REGDUMP_HEADER_BIN_DUMP_SHIFT 29
7754 #define REGDUMP_HEADER_BIN_DUMP_MASK 0x1
7755 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
7756 #define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1
7757 #define REGDUMP_HEADER_ENGINE_SHIFT 31
7758 #define REGDUMP_HEADER_ENGINE_MASK 0x1
7759 #define REGDUMP_MAX_SIZE 0x1000000
7760 #define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15)
7762 enum debug_print_features {
7768 PROTECTION_OVERRIDE = 5,
7779 static u32 qed_calc_regdump_header(struct qed_dev *cdev,
7780 enum debug_print_features feature,
7781 int engine, u32 feature_size, u8 omit_engine)
7785 SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
7786 if (res != feature_size)
7788 "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
7789 feature, feature_size);
7791 SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
7792 SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
7793 SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
7794 SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
7799 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7801 u8 cur_engine, omit_engine = 0, org_engine;
7802 struct qed_hwfn *p_hwfn =
7803 &cdev->hwfns[cdev->engine_for_debug];
7804 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
7805 int grc_params[MAX_DBG_GRC_PARAMS], i;
7806 u32 offset = 0, feature_size;
7809 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7810 grc_params[i] = dev_data->grc.param_val[i];
7812 if (!QED_IS_CMT(cdev))
7815 mutex_lock(&qed_dbg_lock);
7816 cdev->dbg_bin_dump = true;
7818 org_engine = qed_get_debug_engine(cdev);
7819 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7820 /* Collect idle_chks and grcDump for each hw function */
7821 DP_VERBOSE(cdev, QED_MSG_DEBUG,
7822 "obtaining idle_chk and grcdump for current engine\n");
7823 qed_set_debug_engine(cdev, cur_engine);
7825 /* First idle_chk */
7826 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7827 REGDUMP_HEADER_SIZE, &feature_size);
7829 *(u32 *)((u8 *)buffer + offset) =
7830 qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
7831 feature_size, omit_engine);
7832 offset += (feature_size + REGDUMP_HEADER_SIZE);
7834 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7837 /* Second idle_chk */
7838 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7839 REGDUMP_HEADER_SIZE, &feature_size);
7841 *(u32 *)((u8 *)buffer + offset) =
7842 qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
7843 feature_size, omit_engine);
7844 offset += (feature_size + REGDUMP_HEADER_SIZE);
7846 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7850 rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7851 REGDUMP_HEADER_SIZE, &feature_size);
7853 *(u32 *)((u8 *)buffer + offset) =
7854 qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
7855 feature_size, omit_engine);
7856 offset += (feature_size + REGDUMP_HEADER_SIZE);
7858 DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7862 rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7863 REGDUMP_HEADER_SIZE, &feature_size);
7865 *(u32 *)((u8 *)buffer + offset) =
7866 qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
7867 feature_size, omit_engine);
7868 offset += (feature_size + REGDUMP_HEADER_SIZE);
7870 DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7873 /* protection_override dump */
7874 rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7875 REGDUMP_HEADER_SIZE,
7878 *(u32 *)((u8 *)buffer + offset) =
7879 qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
7881 feature_size, omit_engine);
7882 offset += (feature_size + REGDUMP_HEADER_SIZE);
7885 "qed_dbg_protection_override failed. rc = %d\n",
7889 /* fw_asserts dump */
7890 rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7891 REGDUMP_HEADER_SIZE, &feature_size);
7893 *(u32 *)((u8 *)buffer + offset) =
7894 qed_calc_regdump_header(cdev, FW_ASSERTS,
7895 cur_engine, feature_size,
7897 offset += (feature_size + REGDUMP_HEADER_SIZE);
7899 DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7903 feature_size = qed_dbg_ilt_size(cdev);
7904 if (!cdev->disable_ilt_dump &&
7905 feature_size < ILT_DUMP_MAX_SIZE) {
7906 rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
7907 REGDUMP_HEADER_SIZE, &feature_size);
7909 *(u32 *)((u8 *)buffer + offset) =
7910 qed_calc_regdump_header(cdev, ILT_DUMP,
7914 offset += feature_size + REGDUMP_HEADER_SIZE;
7916 DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
7921 /* GRC dump - must be last because when mcp stuck it will
7922 * clutter idle_chk, reg_fifo, ...
7924 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7925 dev_data->grc.param_val[i] = grc_params[i];
7927 rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7928 REGDUMP_HEADER_SIZE, &feature_size);
7930 *(u32 *)((u8 *)buffer + offset) =
7931 qed_calc_regdump_header(cdev, GRC_DUMP,
7933 feature_size, omit_engine);
7934 offset += (feature_size + REGDUMP_HEADER_SIZE);
7936 DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7940 qed_set_debug_engine(cdev, org_engine);
7943 rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7944 REGDUMP_HEADER_SIZE, &feature_size);
7946 *(u32 *)((u8 *)buffer + offset) =
7947 qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
7948 feature_size, omit_engine);
7949 offset += (feature_size + REGDUMP_HEADER_SIZE);
7951 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7954 /* Re-populate nvm attribute info */
7955 qed_mcp_nvm_info_free(p_hwfn);
7956 qed_mcp_nvm_info_populate(p_hwfn);
7959 rc = qed_dbg_nvm_image(cdev,
7960 (u8 *)buffer + offset +
7961 REGDUMP_HEADER_SIZE, &feature_size,
7962 QED_NVM_IMAGE_NVM_CFG1);
7964 *(u32 *)((u8 *)buffer + offset) =
7965 qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
7966 feature_size, omit_engine);
7967 offset += (feature_size + REGDUMP_HEADER_SIZE);
7968 } else if (rc != -ENOENT) {
7970 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7971 QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
7975 rc = qed_dbg_nvm_image(cdev,
7976 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
7977 &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
7979 *(u32 *)((u8 *)buffer + offset) =
7980 qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
7981 feature_size, omit_engine);
7982 offset += (feature_size + REGDUMP_HEADER_SIZE);
7983 } else if (rc != -ENOENT) {
7985 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7986 QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
7991 rc = qed_dbg_nvm_image(cdev,
7992 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
7993 &feature_size, QED_NVM_IMAGE_NVM_META);
7995 *(u32 *)((u8 *)buffer + offset) =
7996 qed_calc_regdump_header(cdev, NVM_META, cur_engine,
7997 feature_size, omit_engine);
7998 offset += (feature_size + REGDUMP_HEADER_SIZE);
7999 } else if (rc != -ENOENT) {
8001 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8002 QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
8006 rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
8007 REGDUMP_HEADER_SIZE, &feature_size,
8008 QED_NVM_IMAGE_MDUMP);
8010 *(u32 *)((u8 *)buffer + offset) =
8011 qed_calc_regdump_header(cdev, MDUMP, cur_engine,
8012 feature_size, omit_engine);
8013 offset += (feature_size + REGDUMP_HEADER_SIZE);
8014 } else if (rc != -ENOENT) {
8016 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
8017 QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
8020 cdev->dbg_bin_dump = false;
8021 mutex_unlock(&qed_dbg_lock);
8026 int qed_dbg_all_data_size(struct qed_dev *cdev)
8028 struct qed_hwfn *p_hwfn =
8029 &cdev->hwfns[cdev->engine_for_debug];
8030 u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
8031 u8 cur_engine, org_engine;
8033 cdev->disable_ilt_dump = false;
8034 org_engine = qed_get_debug_engine(cdev);
8035 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8036 /* Engine specific */
8037 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8038 "calculating idle_chk and grcdump register length for current engine\n");
8039 qed_set_debug_engine(cdev, cur_engine);
8040 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8041 REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8042 REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8043 REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8044 REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8045 REGDUMP_HEADER_SIZE +
8046 qed_dbg_protection_override_size(cdev) +
8047 REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8049 ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
8050 if (ilt_len < ILT_DUMP_MAX_SIZE) {
8051 total_ilt_len += ilt_len;
8052 regs_len += ilt_len;
8056 qed_set_debug_engine(cdev, org_engine);
8059 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8060 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8062 regs_len += REGDUMP_HEADER_SIZE + image_len;
8063 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8065 regs_len += REGDUMP_HEADER_SIZE + image_len;
8066 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8068 regs_len += REGDUMP_HEADER_SIZE + image_len;
8069 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
8071 regs_len += REGDUMP_HEADER_SIZE + image_len;
8073 if (regs_len > REGDUMP_MAX_SIZE) {
8074 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8075 "Dump exceeds max size 0x%x, disable ILT dump\n",
8077 cdev->disable_ilt_dump = true;
8078 regs_len -= total_ilt_len;
8084 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8085 enum qed_dbg_features feature, u32 *num_dumped_bytes)
8087 struct qed_hwfn *p_hwfn =
8088 &cdev->hwfns[cdev->engine_for_debug];
8089 struct qed_dbg_feature *qed_feature =
8090 &cdev->dbg_features[feature];
8091 enum dbg_status dbg_rc;
8092 struct qed_ptt *p_ptt;
8096 p_ptt = qed_ptt_acquire(p_hwfn);
8101 dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8102 if (dbg_rc != DBG_STATUS_OK) {
8103 DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8104 qed_dbg_get_status_str(dbg_rc));
8105 *num_dumped_bytes = 0;
8110 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8111 "copying debugfs feature to external buffer\n");
8112 memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8113 *num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
8117 qed_ptt_release(p_hwfn, p_ptt);
8121 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8123 struct qed_hwfn *p_hwfn =
8124 &cdev->hwfns[cdev->engine_for_debug];
8125 struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8126 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8127 u32 buf_size_dwords;
8133 rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8135 if (rc != DBG_STATUS_OK)
8136 buf_size_dwords = 0;
8138 /* Feature will not be dumped if it exceeds maximum size */
8139 if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8140 buf_size_dwords = 0;
8142 qed_ptt_release(p_hwfn, p_ptt);
8143 qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8144 return qed_feature->buf_size;
8147 u8 qed_get_debug_engine(struct qed_dev *cdev)
8149 return cdev->engine_for_debug;
8152 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8154 DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8156 cdev->engine_for_debug = engine_number;
8159 void qed_dbg_pf_init(struct qed_dev *cdev)
8161 const u8 *dbg_values = NULL;
8164 /* Debug values are after init values.
8165 * The offset is the first dword of the file.
8167 dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8169 for_each_hwfn(cdev, i) {
8170 qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8171 qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8174 /* Set the hwfn to be 0 as default */
8175 cdev->engine_for_debug = 0;
8178 void qed_dbg_pf_exit(struct qed_dev *cdev)
8180 struct qed_dbg_feature *feature = NULL;
8181 enum qed_dbg_features feature_idx;
8183 /* debug features' buffers may be allocated if debug feature was used
8184 * but dump wasn't called
8186 for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8187 feature = &cdev->dbg_features[feature_idx];
8188 if (feature->dump_buf) {
8189 vfree(feature->dump_buf);
8190 feature->dump_buf = NULL;