2 * Copyright (C) 2017 Chelsio Communications. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
18 #include <linux/sort.h>
23 #include "cudbg_lib_common.h"
24 #include "cudbg_entity.h"
25 #include "cudbg_lib.h"
26 #include "cudbg_zlib.h"
28 static int cudbg_do_compression(struct cudbg_init *pdbg_init,
29 struct cudbg_buffer *pin_buff,
30 struct cudbg_buffer *dbg_buff)
32 struct cudbg_buffer temp_in_buff = { 0 };
33 int bytes_left, bytes_read, bytes;
34 u32 offset = dbg_buff->offset;
37 temp_in_buff.offset = pin_buff->offset;
38 temp_in_buff.data = pin_buff->data;
39 temp_in_buff.size = pin_buff->size;
41 bytes_left = pin_buff->size;
43 while (bytes_left > 0) {
44 /* Do compression in smaller chunks */
45 bytes = min_t(unsigned long, bytes_left,
46 (unsigned long)CUDBG_CHUNK_SIZE);
47 temp_in_buff.data = (char *)pin_buff->data + bytes_read;
48 temp_in_buff.size = bytes;
49 rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
56 pin_buff->size = dbg_buff->offset - offset;
60 static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
61 struct cudbg_buffer *pin_buff,
62 struct cudbg_buffer *dbg_buff)
66 if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
67 cudbg_update_buff(pin_buff, dbg_buff);
69 rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
75 cudbg_put_buff(pdbg_init, pin_buff);
79 static int is_fw_attached(struct cudbg_init *pdbg_init)
81 struct adapter *padap = pdbg_init->adap;
83 if (!(padap->flags & FW_OK) || padap->use_bd)
89 /* This function will add additional padding bytes into debug_buffer to make it
92 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
93 struct cudbg_entity_hdr *entity_hdr)
98 remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
101 memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
103 dbg_buff->offset += padding;
104 entity_hdr->num_pad = padding;
106 entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
109 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
111 struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
113 return (struct cudbg_entity_hdr *)
114 ((char *)outbuf + cudbg_hdr->hdr_len +
115 (sizeof(struct cudbg_entity_hdr) * (i - 1)));
118 static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
123 vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
127 rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
134 static int cudbg_mem_desc_cmp(const void *a, const void *b)
136 return ((const struct cudbg_mem_desc *)a)->base -
137 ((const struct cudbg_mem_desc *)b)->base;
140 int cudbg_fill_meminfo(struct adapter *padap,
141 struct cudbg_meminfo *meminfo_buff)
143 struct cudbg_mem_desc *md;
144 u32 lo, hi, used, alloc;
147 memset(meminfo_buff->avail, 0,
148 ARRAY_SIZE(meminfo_buff->avail) *
149 sizeof(struct cudbg_mem_desc));
150 memset(meminfo_buff->mem, 0,
151 (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
152 md = meminfo_buff->mem;
154 for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
155 meminfo_buff->mem[i].limit = 0;
156 meminfo_buff->mem[i].idx = i;
159 /* Find and sort the populated memory ranges */
161 lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
162 if (lo & EDRAM0_ENABLE_F) {
163 hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
164 meminfo_buff->avail[i].base =
165 cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
166 meminfo_buff->avail[i].limit =
167 meminfo_buff->avail[i].base +
168 cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
169 meminfo_buff->avail[i].idx = 0;
173 if (lo & EDRAM1_ENABLE_F) {
174 hi = t4_read_reg(padap, MA_EDRAM1_BAR_A);
175 meminfo_buff->avail[i].base =
176 cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
177 meminfo_buff->avail[i].limit =
178 meminfo_buff->avail[i].base +
179 cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
180 meminfo_buff->avail[i].idx = 1;
184 if (is_t5(padap->params.chip)) {
185 if (lo & EXT_MEM0_ENABLE_F) {
186 hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
187 meminfo_buff->avail[i].base =
188 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
189 meminfo_buff->avail[i].limit =
190 meminfo_buff->avail[i].base +
191 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
192 meminfo_buff->avail[i].idx = 3;
196 if (lo & EXT_MEM1_ENABLE_F) {
197 hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
198 meminfo_buff->avail[i].base =
199 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
200 meminfo_buff->avail[i].limit =
201 meminfo_buff->avail[i].base +
202 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
203 meminfo_buff->avail[i].idx = 4;
207 if (lo & EXT_MEM_ENABLE_F) {
208 hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
209 meminfo_buff->avail[i].base =
210 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
211 meminfo_buff->avail[i].limit =
212 meminfo_buff->avail[i].base +
213 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
214 meminfo_buff->avail[i].idx = 2;
218 if (lo & HMA_MUX_F) {
219 hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
220 meminfo_buff->avail[i].base =
221 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
222 meminfo_buff->avail[i].limit =
223 meminfo_buff->avail[i].base +
224 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
225 meminfo_buff->avail[i].idx = 5;
230 if (!i) /* no memory available */
231 return CUDBG_STATUS_ENTITY_NOT_FOUND;
233 meminfo_buff->avail_c = i;
234 sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
235 cudbg_mem_desc_cmp, NULL);
236 (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
237 (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
238 (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
239 (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
240 (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
241 (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
242 (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
243 (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
244 (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
246 /* the next few have explicit upper bounds */
247 md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
248 md->limit = md->base - 1 +
249 t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
250 PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
253 md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
254 md->limit = md->base - 1 +
255 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
256 PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
259 if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
260 if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
261 hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
262 md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
264 hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
265 md->base = t4_read_reg(padap,
266 LE_DB_HASH_TBL_BASE_ADDR_A);
271 md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
275 #define ulp_region(reg) do { \
276 md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
277 (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
280 ulp_region(RX_ISCSI);
285 ulp_region(RX_RQUDP);
290 md->idx = ARRAY_SIZE(cudbg_region);
291 if (!is_t4(padap->params.chip)) {
292 u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
293 u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
296 if (is_t5(padap->params.chip)) {
297 if (sge_ctrl & VFIFO_ENABLE_F)
298 size = DBVFIFO_SIZE_G(fifo_size);
300 size = T6_DBVFIFO_SIZE_G(fifo_size);
304 md->base = BASEADDR_G(t4_read_reg(padap,
305 SGE_DBVFIFO_BADDR_A));
306 md->limit = md->base + (size << 2) - 1;
312 md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
315 md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
319 md->base = padap->vres.ocq.start;
320 if (padap->vres.ocq.size)
321 md->limit = md->base + padap->vres.ocq.size - 1;
323 md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
326 /* add any address-space holes, there can be up to 3 */
327 for (n = 0; n < i - 1; n++)
328 if (meminfo_buff->avail[n].limit <
329 meminfo_buff->avail[n + 1].base)
330 (md++)->base = meminfo_buff->avail[n].limit;
332 if (meminfo_buff->avail[n].limit)
333 (md++)->base = meminfo_buff->avail[n].limit;
335 n = md - meminfo_buff->mem;
336 meminfo_buff->mem_c = n;
338 sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
339 cudbg_mem_desc_cmp, NULL);
341 lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
342 hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
343 meminfo_buff->up_ram_lo = lo;
344 meminfo_buff->up_ram_hi = hi;
346 lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
347 hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
348 meminfo_buff->up_extmem2_lo = lo;
349 meminfo_buff->up_extmem2_hi = hi;
351 lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
352 for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++)
353 meminfo_buff->free_rx_cnt +=
354 FREERXPAGECOUNT_G(t4_read_reg(padap,
355 TP_FLM_FREE_RX_CNT_A));
357 meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
358 meminfo_buff->rx_pages_data[1] =
359 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
360 meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
362 lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
363 hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
364 for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++)
365 meminfo_buff->free_tx_cnt +=
366 FREETXPAGECOUNT_G(t4_read_reg(padap,
367 TP_FLM_FREE_TX_CNT_A));
369 meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
370 meminfo_buff->tx_pages_data[1] =
371 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
372 meminfo_buff->tx_pages_data[2] =
373 hi >= (1 << 20) ? 'M' : 'K';
374 meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
376 meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
377 meminfo_buff->p_structs_free_cnt =
378 FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A));
380 for (i = 0; i < 4; i++) {
381 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
382 lo = t4_read_reg(padap,
383 MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
385 lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
386 if (is_t5(padap->params.chip)) {
387 used = T5_USED_G(lo);
388 alloc = T5_ALLOC_G(lo);
393 meminfo_buff->port_used[i] = used;
394 meminfo_buff->port_alloc[i] = alloc;
397 for (i = 0; i < padap->params.arch.nchan; i++) {
398 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
399 lo = t4_read_reg(padap,
400 MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
402 lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
403 if (is_t5(padap->params.chip)) {
404 used = T5_USED_G(lo);
405 alloc = T5_ALLOC_G(lo);
410 meminfo_buff->loopback_used[i] = used;
411 meminfo_buff->loopback_alloc[i] = alloc;
417 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
418 struct cudbg_buffer *dbg_buff,
419 struct cudbg_error *cudbg_err)
421 struct adapter *padap = pdbg_init->adap;
422 struct cudbg_buffer temp_buff = { 0 };
426 if (is_t4(padap->params.chip))
427 buf_size = T4_REGMAP_SIZE;
428 else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
429 buf_size = T5_REGMAP_SIZE;
431 rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
434 t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
435 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
438 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
439 struct cudbg_buffer *dbg_buff,
440 struct cudbg_error *cudbg_err)
442 struct adapter *padap = pdbg_init->adap;
443 struct cudbg_buffer temp_buff = { 0 };
444 struct devlog_params *dparams;
447 rc = t4_init_devlog_params(padap);
449 cudbg_err->sys_err = rc;
453 dparams = &padap->params.devlog;
454 rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
458 /* Collect FW devlog */
459 if (dparams->start != 0) {
460 spin_lock(&padap->win0_lock);
461 rc = t4_memory_rw(padap, padap->params.drv_memwin,
462 dparams->memtype, dparams->start,
464 (__be32 *)(char *)temp_buff.data,
466 spin_unlock(&padap->win0_lock);
468 cudbg_err->sys_err = rc;
469 cudbg_put_buff(pdbg_init, &temp_buff);
473 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
476 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
477 struct cudbg_buffer *dbg_buff,
478 struct cudbg_error *cudbg_err)
480 struct adapter *padap = pdbg_init->adap;
481 struct cudbg_buffer temp_buff = { 0 };
485 if (is_t6(padap->params.chip)) {
486 size = padap->params.cim_la_size / 10 + 1;
487 size *= 10 * sizeof(u32);
489 size = padap->params.cim_la_size / 8;
490 size *= 8 * sizeof(u32);
494 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
498 rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
500 cudbg_err->sys_err = rc;
501 cudbg_put_buff(pdbg_init, &temp_buff);
505 memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
506 rc = t4_cim_read_la(padap,
507 (u32 *)((char *)temp_buff.data + sizeof(cfg)),
510 cudbg_err->sys_err = rc;
511 cudbg_put_buff(pdbg_init, &temp_buff);
514 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
517 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
518 struct cudbg_buffer *dbg_buff,
519 struct cudbg_error *cudbg_err)
521 struct adapter *padap = pdbg_init->adap;
522 struct cudbg_buffer temp_buff = { 0 };
525 size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
526 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
530 t4_cim_read_ma_la(padap,
531 (u32 *)temp_buff.data,
532 (u32 *)((char *)temp_buff.data +
534 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
537 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
538 struct cudbg_buffer *dbg_buff,
539 struct cudbg_error *cudbg_err)
541 struct adapter *padap = pdbg_init->adap;
542 struct cudbg_buffer temp_buff = { 0 };
543 struct cudbg_cim_qcfg *cim_qcfg_data;
546 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
551 cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
552 cim_qcfg_data->chip = padap->params.chip;
553 rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
554 ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
556 cudbg_err->sys_err = rc;
557 cudbg_put_buff(pdbg_init, &temp_buff);
561 rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
562 ARRAY_SIZE(cim_qcfg_data->obq_wr),
563 cim_qcfg_data->obq_wr);
565 cudbg_err->sys_err = rc;
566 cudbg_put_buff(pdbg_init, &temp_buff);
570 t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
571 cim_qcfg_data->thres);
572 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
575 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
576 struct cudbg_buffer *dbg_buff,
577 struct cudbg_error *cudbg_err, int qid)
579 struct adapter *padap = pdbg_init->adap;
580 struct cudbg_buffer temp_buff = { 0 };
581 int no_of_read_words, rc = 0;
584 /* collect CIM IBQ */
585 qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
586 rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
590 /* t4_read_cim_ibq will return no. of read words or error */
591 no_of_read_words = t4_read_cim_ibq(padap, qid,
592 (u32 *)temp_buff.data, qsize);
593 /* no_of_read_words is less than or equal to 0 means error */
594 if (no_of_read_words <= 0) {
595 if (!no_of_read_words)
596 rc = CUDBG_SYSTEM_ERROR;
598 rc = no_of_read_words;
599 cudbg_err->sys_err = rc;
600 cudbg_put_buff(pdbg_init, &temp_buff);
603 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
606 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
607 struct cudbg_buffer *dbg_buff,
608 struct cudbg_error *cudbg_err)
610 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
613 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
614 struct cudbg_buffer *dbg_buff,
615 struct cudbg_error *cudbg_err)
617 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
620 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
621 struct cudbg_buffer *dbg_buff,
622 struct cudbg_error *cudbg_err)
624 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
627 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
628 struct cudbg_buffer *dbg_buff,
629 struct cudbg_error *cudbg_err)
631 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
634 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
635 struct cudbg_buffer *dbg_buff,
636 struct cudbg_error *cudbg_err)
638 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
641 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
642 struct cudbg_buffer *dbg_buff,
643 struct cudbg_error *cudbg_err)
645 return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
648 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
652 t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
653 QUENUMSELECT_V(qid));
654 value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
655 value = CIMQSIZE_G(value) * 64; /* size in number of words */
656 return value * sizeof(u32);
659 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
660 struct cudbg_buffer *dbg_buff,
661 struct cudbg_error *cudbg_err, int qid)
663 struct adapter *padap = pdbg_init->adap;
664 struct cudbg_buffer temp_buff = { 0 };
665 int no_of_read_words, rc = 0;
668 /* collect CIM OBQ */
669 qsize = cudbg_cim_obq_size(padap, qid);
670 rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
674 /* t4_read_cim_obq will return no. of read words or error */
675 no_of_read_words = t4_read_cim_obq(padap, qid,
676 (u32 *)temp_buff.data, qsize);
677 /* no_of_read_words is less than or equal to 0 means error */
678 if (no_of_read_words <= 0) {
679 if (!no_of_read_words)
680 rc = CUDBG_SYSTEM_ERROR;
682 rc = no_of_read_words;
683 cudbg_err->sys_err = rc;
684 cudbg_put_buff(pdbg_init, &temp_buff);
687 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
690 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
691 struct cudbg_buffer *dbg_buff,
692 struct cudbg_error *cudbg_err)
694 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
697 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
698 struct cudbg_buffer *dbg_buff,
699 struct cudbg_error *cudbg_err)
701 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
704 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
705 struct cudbg_buffer *dbg_buff,
706 struct cudbg_error *cudbg_err)
708 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
711 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
712 struct cudbg_buffer *dbg_buff,
713 struct cudbg_error *cudbg_err)
715 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
718 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
719 struct cudbg_buffer *dbg_buff,
720 struct cudbg_error *cudbg_err)
722 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
725 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
726 struct cudbg_buffer *dbg_buff,
727 struct cudbg_error *cudbg_err)
729 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
732 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
733 struct cudbg_buffer *dbg_buff,
734 struct cudbg_error *cudbg_err)
736 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
739 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
740 struct cudbg_buffer *dbg_buff,
741 struct cudbg_error *cudbg_err)
743 return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
746 static int cudbg_meminfo_get_mem_index(struct adapter *padap,
747 struct cudbg_meminfo *mem_info,
748 u8 mem_type, u8 *idx)
760 /* Some T5 cards have both MC0 and MC1. */
761 flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
770 return CUDBG_STATUS_ENTITY_NOT_FOUND;
773 for (i = 0; i < mem_info->avail_c; i++) {
774 if (mem_info->avail[i].idx == flag) {
780 return CUDBG_STATUS_ENTITY_NOT_FOUND;
783 /* Fetch the @region_name's start and end from @meminfo. */
784 static int cudbg_get_mem_region(struct adapter *padap,
785 struct cudbg_meminfo *meminfo,
786 u8 mem_type, const char *region_name,
787 struct cudbg_mem_desc *mem_desc)
793 rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
797 for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
798 if (!strcmp(cudbg_region[i], region_name)) {
808 for (i = 0; i < meminfo->mem_c; i++) {
809 if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
810 continue; /* Skip holes */
812 if (!(meminfo->mem[i].limit))
813 meminfo->mem[i].limit =
814 i < meminfo->mem_c - 1 ?
815 meminfo->mem[i + 1].base - 1 : ~0;
817 if (meminfo->mem[i].idx == idx) {
818 /* Check if the region exists in @mem_type memory */
819 if (meminfo->mem[i].base < meminfo->avail[mc].base &&
820 meminfo->mem[i].limit < meminfo->avail[mc].base)
823 if (meminfo->mem[i].base > meminfo->avail[mc].limit)
826 memcpy(mem_desc, &meminfo->mem[i],
827 sizeof(struct cudbg_mem_desc));
838 /* Fetch and update the start and end of the requested memory region w.r.t 0
839 * in the corresponding EDC/MC/HMA.
841 static int cudbg_get_mem_relative(struct adapter *padap,
842 struct cudbg_meminfo *meminfo,
843 u8 mem_type, u32 *out_base, u32 *out_end)
848 rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
852 if (*out_base < meminfo->avail[mc_idx].base)
855 *out_base -= meminfo->avail[mc_idx].base;
857 if (*out_end > meminfo->avail[mc_idx].limit)
858 *out_end = meminfo->avail[mc_idx].limit;
860 *out_end -= meminfo->avail[mc_idx].base;
865 /* Get TX and RX Payload region */
866 static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
867 const char *region_name,
868 struct cudbg_region_info *payload)
870 struct cudbg_mem_desc mem_desc = { 0 };
871 struct cudbg_meminfo meminfo;
874 rc = cudbg_fill_meminfo(padap, &meminfo);
878 rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
881 payload->exist = false;
885 payload->exist = true;
886 payload->start = mem_desc.base;
887 payload->end = mem_desc.limit;
889 return cudbg_get_mem_relative(padap, &meminfo, mem_type,
890 &payload->start, &payload->end);
893 static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win,
894 int mtype, u32 addr, u32 len, void *hbuf)
896 u32 win_pf, memoffset, mem_aperture, mem_base;
897 struct adapter *adap = pdbg_init->adap;
898 u32 pos, offset, resid;
903 /* Argument sanity checks ...
905 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
910 /* Try to do 64-bit reads. Residual will be handled later. */
914 ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
919 addr = addr + memoffset;
920 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
922 pos = addr & ~(mem_aperture - 1);
925 /* Set up initial PCI-E Memory Window to cover the start of our
928 t4_memory_update_win(adap, win, pos | win_pf);
930 /* Transfer data from the adapter */
932 *buf++ = le64_to_cpu((__force __le64)
933 t4_read_reg64(adap, mem_base + offset));
934 offset += sizeof(u64);
937 /* If we've reached the end of our current window aperture,
938 * move the PCI-E Memory Window on to the next.
940 if (offset == mem_aperture) {
943 t4_memory_update_win(adap, win, pos | win_pf);
947 res_buf = (u32 *)buf;
948 /* Read residual in 32-bit multiples */
949 while (resid > sizeof(u32)) {
950 *res_buf++ = le32_to_cpu((__force __le32)
951 t4_read_reg(adap, mem_base + offset));
952 offset += sizeof(u32);
953 resid -= sizeof(u32);
955 /* If we've reached the end of our current window aperture,
956 * move the PCI-E Memory Window on to the next.
958 if (offset == mem_aperture) {
961 t4_memory_update_win(adap, win, pos | win_pf);
965 /* Transfer residual < 32-bits */
967 t4_memory_rw_residual(adap, resid, mem_base + offset,
968 (u8 *)res_buf, T4_MEMORY_READ);
973 #define CUDBG_YIELD_ITERATION 256
975 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
976 struct cudbg_buffer *dbg_buff, u8 mem_type,
977 unsigned long tot_len,
978 struct cudbg_error *cudbg_err)
980 static const char * const region_name[] = { "Tx payload:",
982 unsigned long bytes, bytes_left, bytes_read = 0;
983 struct adapter *padap = pdbg_init->adap;
984 struct cudbg_buffer temp_buff = { 0 };
985 struct cudbg_region_info payload[2];
990 /* Get TX/RX Payload region range if they exist */
991 memset(payload, 0, sizeof(payload));
992 for (i = 0; i < ARRAY_SIZE(region_name); i++) {
993 rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
998 if (payload[i].exist) {
999 /* Align start and end to avoid wrap around */
1000 payload[i].start = roundup(payload[i].start,
1002 payload[i].end = rounddown(payload[i].end,
1007 bytes_left = tot_len;
1008 while (bytes_left > 0) {
1009 /* As MC size is huge and read through PIO access, this
1010 * loop will hold cpu for a longer time. OS may think that
1011 * the process is hanged and will generate CPU stall traces.
1012 * So yield the cpu regularly.
1015 if (!(yield_count % CUDBG_YIELD_ITERATION))
1018 bytes = min_t(unsigned long, bytes_left,
1019 (unsigned long)CUDBG_CHUNK_SIZE);
1020 rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
1024 for (i = 0; i < ARRAY_SIZE(payload); i++)
1025 if (payload[i].exist &&
1026 bytes_read >= payload[i].start &&
1027 bytes_read + bytes <= payload[i].end)
1028 /* TX and RX Payload regions can't overlap */
1031 spin_lock(&padap->win0_lock);
1032 rc = cudbg_memory_read(pdbg_init, MEMWIN_NIC, mem_type,
1033 bytes_read, bytes, temp_buff.data);
1034 spin_unlock(&padap->win0_lock);
1036 cudbg_err->sys_err = rc;
1037 cudbg_put_buff(pdbg_init, &temp_buff);
1042 bytes_left -= bytes;
1043 bytes_read += bytes;
1044 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
1047 cudbg_put_buff(pdbg_init, &temp_buff);
1054 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
1055 struct cudbg_error *cudbg_err)
1057 struct adapter *padap = pdbg_init->adap;
1060 if (is_fw_attached(pdbg_init)) {
1061 /* Flush uP dcache before reading edcX/mcX */
1062 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
1064 cudbg_err->sys_warn = rc;
1068 static int cudbg_mem_region_size(struct cudbg_init *pdbg_init,
1069 struct cudbg_error *cudbg_err,
1070 u8 mem_type, unsigned long *region_size)
1072 struct adapter *padap = pdbg_init->adap;
1073 struct cudbg_meminfo mem_info;
1077 memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
1078 rc = cudbg_fill_meminfo(padap, &mem_info);
1080 cudbg_err->sys_err = rc;
1084 cudbg_t4_fwcache(pdbg_init, cudbg_err);
1085 rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
1087 cudbg_err->sys_err = rc;
1092 *region_size = mem_info.avail[mc_idx].limit -
1093 mem_info.avail[mc_idx].base;
1098 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
1099 struct cudbg_buffer *dbg_buff,
1100 struct cudbg_error *cudbg_err,
1103 unsigned long size = 0;
1106 rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size);
1110 return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
1114 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
1115 struct cudbg_buffer *dbg_buff,
1116 struct cudbg_error *cudbg_err)
1118 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1122 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
1123 struct cudbg_buffer *dbg_buff,
1124 struct cudbg_error *cudbg_err)
1126 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1130 int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
1131 struct cudbg_buffer *dbg_buff,
1132 struct cudbg_error *cudbg_err)
1134 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1138 int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
1139 struct cudbg_buffer *dbg_buff,
1140 struct cudbg_error *cudbg_err)
1142 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1146 int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
1147 struct cudbg_buffer *dbg_buff,
1148 struct cudbg_error *cudbg_err)
1150 return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1154 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
1155 struct cudbg_buffer *dbg_buff,
1156 struct cudbg_error *cudbg_err)
1158 struct adapter *padap = pdbg_init->adap;
1159 struct cudbg_buffer temp_buff = { 0 };
1162 nentries = t4_chip_rss_size(padap);
1163 rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
1168 rc = t4_read_rss(padap, (u16 *)temp_buff.data);
1170 cudbg_err->sys_err = rc;
1171 cudbg_put_buff(pdbg_init, &temp_buff);
1174 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1177 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
1178 struct cudbg_buffer *dbg_buff,
1179 struct cudbg_error *cudbg_err)
1181 struct adapter *padap = pdbg_init->adap;
1182 struct cudbg_buffer temp_buff = { 0 };
1183 struct cudbg_rss_vf_conf *vfconf;
1184 int vf, rc, vf_count;
1186 vf_count = padap->params.arch.vfcount;
1187 rc = cudbg_get_buff(pdbg_init, dbg_buff,
1188 vf_count * sizeof(struct cudbg_rss_vf_conf),
1193 vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
1194 for (vf = 0; vf < vf_count; vf++)
1195 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1196 &vfconf[vf].rss_vf_vfh, true);
1197 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1200 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
1201 struct cudbg_buffer *dbg_buff,
1202 struct cudbg_error *cudbg_err)
1204 struct adapter *padap = pdbg_init->adap;
1205 struct cudbg_buffer temp_buff = { 0 };
1208 rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
1213 t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
1214 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1217 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
1218 struct cudbg_buffer *dbg_buff,
1219 struct cudbg_error *cudbg_err)
1221 struct adapter *padap = pdbg_init->adap;
1222 struct cudbg_buffer temp_buff = { 0 };
1223 struct cudbg_pm_stats *pm_stats_buff;
1226 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
1231 pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
1232 t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1233 t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1234 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1237 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
1238 struct cudbg_buffer *dbg_buff,
1239 struct cudbg_error *cudbg_err)
1241 struct adapter *padap = pdbg_init->adap;
1242 struct cudbg_buffer temp_buff = { 0 };
1243 struct cudbg_hw_sched *hw_sched_buff;
1246 if (!padap->params.vpd.cclk)
1247 return CUDBG_STATUS_CCLK_NOT_DEFINED;
1249 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
1251 hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
1252 hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
1253 hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
1254 t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1255 for (i = 0; i < NTX_SCHED; ++i)
1256 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1257 &hw_sched_buff->ipg[i], true);
1258 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1261 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
1262 struct cudbg_buffer *dbg_buff,
1263 struct cudbg_error *cudbg_err)
1265 struct adapter *padap = pdbg_init->adap;
1266 struct cudbg_buffer temp_buff = { 0 };
1267 struct ireg_buf *ch_tp_pio;
1271 if (is_t5(padap->params.chip))
1272 n = sizeof(t5_tp_pio_array) +
1273 sizeof(t5_tp_tm_pio_array) +
1274 sizeof(t5_tp_mib_index_array);
1276 n = sizeof(t6_tp_pio_array) +
1277 sizeof(t6_tp_tm_pio_array) +
1278 sizeof(t6_tp_mib_index_array);
1280 n = n / (IREG_NUM_ELEM * sizeof(u32));
1281 size = sizeof(struct ireg_buf) * n;
1282 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1286 ch_tp_pio = (struct ireg_buf *)temp_buff.data;
1289 if (is_t5(padap->params.chip))
1290 n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1291 else if (is_t6(padap->params.chip))
1292 n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1294 for (i = 0; i < n; i++) {
1295 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1296 u32 *buff = ch_tp_pio->outbuf;
1298 if (is_t5(padap->params.chip)) {
1299 tp_pio->ireg_addr = t5_tp_pio_array[i][0];
1300 tp_pio->ireg_data = t5_tp_pio_array[i][1];
1301 tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
1302 tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
1303 } else if (is_t6(padap->params.chip)) {
1304 tp_pio->ireg_addr = t6_tp_pio_array[i][0];
1305 tp_pio->ireg_data = t6_tp_pio_array[i][1];
1306 tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
1307 tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
1309 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
1310 tp_pio->ireg_local_offset, true);
1315 if (is_t5(padap->params.chip))
1316 n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1317 else if (is_t6(padap->params.chip))
1318 n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1320 for (i = 0; i < n; i++) {
1321 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1322 u32 *buff = ch_tp_pio->outbuf;
1324 if (is_t5(padap->params.chip)) {
1325 tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
1326 tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
1327 tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
1328 tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
1329 } else if (is_t6(padap->params.chip)) {
1330 tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
1331 tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
1332 tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
1333 tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
1335 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
1336 tp_pio->ireg_local_offset, true);
1341 if (is_t5(padap->params.chip))
1342 n = sizeof(t5_tp_mib_index_array) /
1343 (IREG_NUM_ELEM * sizeof(u32));
1344 else if (is_t6(padap->params.chip))
1345 n = sizeof(t6_tp_mib_index_array) /
1346 (IREG_NUM_ELEM * sizeof(u32));
1348 for (i = 0; i < n ; i++) {
1349 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1350 u32 *buff = ch_tp_pio->outbuf;
1352 if (is_t5(padap->params.chip)) {
1353 tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
1354 tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
1355 tp_pio->ireg_local_offset =
1356 t5_tp_mib_index_array[i][2];
1357 tp_pio->ireg_offset_range =
1358 t5_tp_mib_index_array[i][3];
1359 } else if (is_t6(padap->params.chip)) {
1360 tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
1361 tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
1362 tp_pio->ireg_local_offset =
1363 t6_tp_mib_index_array[i][2];
1364 tp_pio->ireg_offset_range =
1365 t6_tp_mib_index_array[i][3];
1367 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
1368 tp_pio->ireg_local_offset, true);
1371 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1374 static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap,
1375 struct sge_qbase_reg_field *qbase,
1376 u32 func, bool is_pf)
1381 buff = qbase->pf_data_value[func];
1383 buff = qbase->vf_data_value[func];
1384 /* In SGE_QBASE_INDEX,
1385 * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256.
1390 t4_write_reg(padap, qbase->reg_addr, func);
1391 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++)
1392 *buff = t4_read_reg(padap, qbase->reg_data[i]);
1395 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
1396 struct cudbg_buffer *dbg_buff,
1397 struct cudbg_error *cudbg_err)
1399 struct adapter *padap = pdbg_init->adap;
1400 struct cudbg_buffer temp_buff = { 0 };
1401 struct sge_qbase_reg_field *sge_qbase;
1402 struct ireg_buf *ch_sge_dbg;
1403 u8 padap_running = 0;
1407 /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
1408 * lead to SGE missing doorbells under heavy traffic. So, only
1409 * collect them when adapter is idle.
1411 for_each_port(padap, i) {
1412 padap_running = netif_running(padap->port[i]);
1417 size = sizeof(*ch_sge_dbg) * 2;
1419 size += sizeof(*sge_qbase);
1421 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1425 ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
1426 for (i = 0; i < 2; i++) {
1427 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
1428 u32 *buff = ch_sge_dbg->outbuf;
1430 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
1431 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
1432 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
1433 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
1434 t4_read_indirect(padap,
1438 sge_pio->ireg_offset_range,
1439 sge_pio->ireg_local_offset);
1443 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
1445 sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
1446 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
1447 * SGE_QBASE_MAP[0-3]
1449 sge_qbase->reg_addr = t6_sge_qbase_index_array[0];
1450 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++)
1451 sge_qbase->reg_data[i] =
1452 t6_sge_qbase_index_array[i + 1];
1454 for (i = 0; i <= PCIE_FW_MASTER_M; i++)
1455 cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1458 for (i = 0; i < padap->params.arch.vfcount; i++)
1459 cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1462 sge_qbase->vfcount = padap->params.arch.vfcount;
1465 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1468 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
1469 struct cudbg_buffer *dbg_buff,
1470 struct cudbg_error *cudbg_err)
1472 struct adapter *padap = pdbg_init->adap;
1473 struct cudbg_buffer temp_buff = { 0 };
1474 struct cudbg_ulprx_la *ulprx_la_buff;
1477 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
1482 ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
1483 t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
1484 ulprx_la_buff->size = ULPRX_LA_SIZE;
1485 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1488 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
1489 struct cudbg_buffer *dbg_buff,
1490 struct cudbg_error *cudbg_err)
1492 struct adapter *padap = pdbg_init->adap;
1493 struct cudbg_buffer temp_buff = { 0 };
1494 struct cudbg_tp_la *tp_la_buff;
1497 size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
1498 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1502 tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
1503 tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
1504 t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1505 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1508 int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
1509 struct cudbg_buffer *dbg_buff,
1510 struct cudbg_error *cudbg_err)
1512 struct adapter *padap = pdbg_init->adap;
1513 struct cudbg_buffer temp_buff = { 0 };
1514 struct cudbg_meminfo *meminfo_buff;
1515 struct cudbg_ver_hdr *ver_hdr;
1518 rc = cudbg_get_buff(pdbg_init, dbg_buff,
1519 sizeof(struct cudbg_ver_hdr) +
1520 sizeof(struct cudbg_meminfo),
1525 ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
1526 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
1527 ver_hdr->revision = CUDBG_MEMINFO_REV;
1528 ver_hdr->size = sizeof(struct cudbg_meminfo);
1530 meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data +
1532 rc = cudbg_fill_meminfo(padap, meminfo_buff);
1534 cudbg_err->sys_err = rc;
1535 cudbg_put_buff(pdbg_init, &temp_buff);
1539 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1542 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
1543 struct cudbg_buffer *dbg_buff,
1544 struct cudbg_error *cudbg_err)
1546 struct cudbg_cim_pif_la *cim_pif_la_buff;
1547 struct adapter *padap = pdbg_init->adap;
1548 struct cudbg_buffer temp_buff = { 0 };
1551 size = sizeof(struct cudbg_cim_pif_la) +
1552 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1553 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1557 cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
1558 cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1559 t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1560 (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1562 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1565 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
1566 struct cudbg_buffer *dbg_buff,
1567 struct cudbg_error *cudbg_err)
1569 struct adapter *padap = pdbg_init->adap;
1570 struct cudbg_buffer temp_buff = { 0 };
1571 struct cudbg_clk_info *clk_info_buff;
1575 if (!padap->params.vpd.cclk)
1576 return CUDBG_STATUS_CCLK_NOT_DEFINED;
1578 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
1583 clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
1584 clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
1585 clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
1586 clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
1587 clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
1588 tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1590 clk_info_buff->dack_timer =
1591 (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
1592 t4_read_reg(padap, TP_DACK_TIMER_A);
1593 clk_info_buff->retransmit_min =
1594 tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
1595 clk_info_buff->retransmit_max =
1596 tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
1597 clk_info_buff->persist_timer_min =
1598 tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
1599 clk_info_buff->persist_timer_max =
1600 tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
1601 clk_info_buff->keepalive_idle_timer =
1602 tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
1603 clk_info_buff->keepalive_interval =
1604 tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
1605 clk_info_buff->initial_srtt =
1606 tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
1607 clk_info_buff->finwait2_timer =
1608 tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
1610 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1613 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
1614 struct cudbg_buffer *dbg_buff,
1615 struct cudbg_error *cudbg_err)
1617 struct adapter *padap = pdbg_init->adap;
1618 struct cudbg_buffer temp_buff = { 0 };
1619 struct ireg_buf *ch_pcie;
1623 n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1624 size = sizeof(struct ireg_buf) * n * 2;
1625 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1629 ch_pcie = (struct ireg_buf *)temp_buff.data;
1631 for (i = 0; i < n; i++) {
1632 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1633 u32 *buff = ch_pcie->outbuf;
1635 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
1636 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
1637 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
1638 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
1639 t4_read_indirect(padap,
1640 pcie_pio->ireg_addr,
1641 pcie_pio->ireg_data,
1643 pcie_pio->ireg_offset_range,
1644 pcie_pio->ireg_local_offset);
1649 n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1650 for (i = 0; i < n; i++) {
1651 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1652 u32 *buff = ch_pcie->outbuf;
1654 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
1655 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
1656 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
1657 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
1658 t4_read_indirect(padap,
1659 pcie_pio->ireg_addr,
1660 pcie_pio->ireg_data,
1662 pcie_pio->ireg_offset_range,
1663 pcie_pio->ireg_local_offset);
1666 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1669 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
1670 struct cudbg_buffer *dbg_buff,
1671 struct cudbg_error *cudbg_err)
1673 struct adapter *padap = pdbg_init->adap;
1674 struct cudbg_buffer temp_buff = { 0 };
1675 struct ireg_buf *ch_pm;
1679 n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
1680 size = sizeof(struct ireg_buf) * n * 2;
1681 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1685 ch_pm = (struct ireg_buf *)temp_buff.data;
1687 for (i = 0; i < n; i++) {
1688 struct ireg_field *pm_pio = &ch_pm->tp_pio;
1689 u32 *buff = ch_pm->outbuf;
1691 pm_pio->ireg_addr = t5_pm_rx_array[i][0];
1692 pm_pio->ireg_data = t5_pm_rx_array[i][1];
1693 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
1694 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
1695 t4_read_indirect(padap,
1699 pm_pio->ireg_offset_range,
1700 pm_pio->ireg_local_offset);
1705 n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
1706 for (i = 0; i < n; i++) {
1707 struct ireg_field *pm_pio = &ch_pm->tp_pio;
1708 u32 *buff = ch_pm->outbuf;
1710 pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1711 pm_pio->ireg_data = t5_pm_tx_array[i][1];
1712 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1713 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1714 t4_read_indirect(padap,
1718 pm_pio->ireg_offset_range,
1719 pm_pio->ireg_local_offset);
1722 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1725 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1726 struct cudbg_buffer *dbg_buff,
1727 struct cudbg_error *cudbg_err)
1729 struct adapter *padap = pdbg_init->adap;
1730 struct cudbg_tid_info_region_rev1 *tid1;
1731 struct cudbg_buffer temp_buff = { 0 };
1732 struct cudbg_tid_info_region *tid;
1733 u32 para[2], val[2];
1736 rc = cudbg_get_buff(pdbg_init, dbg_buff,
1737 sizeof(struct cudbg_tid_info_region_rev1),
1742 tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1744 tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1745 tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1746 tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1747 sizeof(struct cudbg_ver_hdr);
1749 /* If firmware is not attached/alive, use backdoor register
1750 * access to collect dump.
1752 if (!is_fw_attached(pdbg_init))
1755 #define FW_PARAM_PFVF_A(param) \
1756 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1757 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1758 FW_PARAMS_PARAM_Y_V(0) | \
1759 FW_PARAMS_PARAM_Z_V(0))
1761 para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1762 para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1763 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1765 cudbg_err->sys_err = rc;
1766 cudbg_put_buff(pdbg_init, &temp_buff);
1769 tid->uotid_base = val[0];
1770 tid->nuotids = val[1] - val[0] + 1;
1772 if (is_t5(padap->params.chip)) {
1773 tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1774 } else if (is_t6(padap->params.chip)) {
1776 t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1777 tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1779 para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1780 para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1781 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1784 cudbg_err->sys_err = rc;
1785 cudbg_put_buff(pdbg_init, &temp_buff);
1788 tid->hpftid_base = val[0];
1789 tid->nhpftids = val[1] - val[0] + 1;
1792 #undef FW_PARAM_PFVF_A
1795 tid->ntids = padap->tids.ntids;
1796 tid->nstids = padap->tids.nstids;
1797 tid->stid_base = padap->tids.stid_base;
1798 tid->hash_base = padap->tids.hash_base;
1800 tid->natids = padap->tids.natids;
1801 tid->nftids = padap->tids.nftids;
1802 tid->ftid_base = padap->tids.ftid_base;
1803 tid->aftid_base = padap->tids.aftid_base;
1804 tid->aftid_end = padap->tids.aftid_end;
1806 tid->sftid_base = padap->tids.sftid_base;
1807 tid->nsftids = padap->tids.nsftids;
1809 tid->flags = padap->flags;
1810 tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1811 tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1812 tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1814 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1817 int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
1818 struct cudbg_buffer *dbg_buff,
1819 struct cudbg_error *cudbg_err)
1821 struct adapter *padap = pdbg_init->adap;
1822 struct cudbg_buffer temp_buff = { 0 };
1823 u32 size, *value, j;
1826 size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
1827 n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
1828 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1832 value = (u32 *)temp_buff.data;
1833 for (i = 0; i < n; i++) {
1834 for (j = t5_pcie_config_array[i][0];
1835 j <= t5_pcie_config_array[i][1]; j += 4) {
1836 t4_hw_pci_read_cfg4(padap, j, value);
1840 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1843 static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
1845 int index, bit, bit_pos = 0;
1858 index = bit_pos / 32;
1860 return buf[index] & (1U << bit);
1863 static int cudbg_get_ctxt_region_info(struct adapter *padap,
1864 struct cudbg_region_info *ctx_info,
1867 struct cudbg_mem_desc mem_desc;
1868 struct cudbg_meminfo meminfo;
1869 u32 i, j, value, found;
1873 rc = cudbg_fill_meminfo(padap, &meminfo);
1877 /* Get EGRESS and INGRESS context region size */
1878 for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
1880 memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
1881 for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
1882 rc = cudbg_get_mem_region(padap, &meminfo, j,
1887 rc = cudbg_get_mem_relative(padap, &meminfo, j,
1891 ctx_info[i].exist = false;
1894 ctx_info[i].exist = true;
1895 ctx_info[i].start = mem_desc.base;
1896 ctx_info[i].end = mem_desc.limit;
1902 ctx_info[i].exist = false;
1905 /* Get FLM and CNM max qid. */
1906 value = t4_read_reg(padap, SGE_FLM_CFG_A);
1908 /* Get number of data freelist queues */
1909 flq = HDRSTARTFLQ_G(value);
1910 ctx_info[CTXT_FLM].exist = true;
1911 ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
1913 /* The number of CONM contexts are same as number of freelist
1916 ctx_info[CTXT_CNM].exist = true;
1917 ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
1922 int cudbg_dump_context_size(struct adapter *padap)
1924 struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
1925 u8 mem_type[CTXT_INGRESS + 1] = { 0 };
1929 /* Get max valid qid for each type of queue */
1930 rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
1934 for (i = 0; i < CTXT_CNM; i++) {
1935 if (!region_info[i].exist) {
1936 if (i == CTXT_EGRESS || i == CTXT_INGRESS)
1937 size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
1942 size += (region_info[i].end - region_info[i].start + 1) /
1945 return size * sizeof(struct cudbg_ch_cntxt);
1948 static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
1949 enum ctxt_type ctype, u32 *data)
1951 struct adapter *padap = pdbg_init->adap;
1954 /* Under heavy traffic, the SGE Queue contexts registers will be
1955 * frequently accessed by firmware.
1957 * To avoid conflicts with firmware, always ask firmware to fetch
1958 * the SGE Queue contexts via mailbox. On failure, fallback to
1959 * accessing hardware registers directly.
1961 if (is_fw_attached(pdbg_init))
1962 rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
1964 t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
1967 static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
1969 struct cudbg_ch_cntxt **out_buff)
1971 struct cudbg_ch_cntxt *buff = *out_buff;
1975 for (j = 0; j < max_qid; j++) {
1976 cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
1977 rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
1981 buff->cntxt_type = ctxt_type;
1984 if (ctxt_type == CTXT_FLM) {
1985 cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
1986 buff->cntxt_type = CTXT_CNM;
1995 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
1996 struct cudbg_buffer *dbg_buff,
1997 struct cudbg_error *cudbg_err)
1999 struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
2000 struct adapter *padap = pdbg_init->adap;
2001 u32 j, size, max_ctx_size, max_ctx_qid;
2002 u8 mem_type[CTXT_INGRESS + 1] = { 0 };
2003 struct cudbg_buffer temp_buff = { 0 };
2004 struct cudbg_ch_cntxt *buff;
2009 /* Get max valid qid for each type of queue */
2010 rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
2014 rc = cudbg_dump_context_size(padap);
2016 return CUDBG_STATUS_ENTITY_NOT_FOUND;
2019 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2023 /* Get buffer with enough space to read the biggest context
2026 max_ctx_size = max(region_info[CTXT_EGRESS].end -
2027 region_info[CTXT_EGRESS].start + 1,
2028 region_info[CTXT_INGRESS].end -
2029 region_info[CTXT_INGRESS].start + 1);
2031 ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
2033 cudbg_put_buff(pdbg_init, &temp_buff);
2037 buff = (struct cudbg_ch_cntxt *)temp_buff.data;
2039 /* Collect EGRESS and INGRESS context data.
2040 * In case of failures, fallback to collecting via FW or
2043 for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
2044 if (!region_info[i].exist) {
2045 max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2046 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2051 max_ctx_size = region_info[i].end - region_info[i].start + 1;
2052 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2054 /* If firmware is not attached/alive, use backdoor register
2055 * access to collect dump.
2057 if (is_fw_attached(pdbg_init)) {
2058 t4_sge_ctxt_flush(padap, padap->mbox, i);
2060 rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
2061 region_info[i].start, max_ctx_size,
2062 (__be32 *)ctx_buf, 1);
2065 if (rc || !is_fw_attached(pdbg_init)) {
2066 max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2067 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2072 for (j = 0; j < max_ctx_qid; j++) {
2076 src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
2077 dst_off = (__be64 *)buff->data;
2079 /* The data is stored in 64-bit cpu order. Convert it
2080 * to big endian before parsing.
2082 for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
2083 dst_off[k] = cpu_to_be64(src_off[k]);
2085 rc = cudbg_sge_ctxt_check_valid(buff->data, i);
2089 buff->cntxt_type = i;
2097 /* Collect FREELIST and CONGESTION MANAGER contexts */
2098 max_ctx_size = region_info[CTXT_FLM].end -
2099 region_info[CTXT_FLM].start + 1;
2100 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2101 /* Since FLM and CONM are 1-to-1 mapped, the below function
2102 * will fetch both FLM and CONM contexts.
2104 cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
2106 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2109 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
2112 y = (__force u64)cpu_to_be64(y);
2113 memcpy(addr, (char *)&y + 2, ETH_ALEN);
2116 static void cudbg_mps_rpl_backdoor(struct adapter *padap,
2117 struct fw_ldst_mps_rplc *mps_rplc)
2119 if (is_t5(padap->params.chip)) {
2120 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2121 MPS_VF_RPLCT_MAP3_A));
2122 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2123 MPS_VF_RPLCT_MAP2_A));
2124 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2125 MPS_VF_RPLCT_MAP1_A));
2126 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2127 MPS_VF_RPLCT_MAP0_A));
2129 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2130 MPS_VF_RPLCT_MAP7_A));
2131 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2132 MPS_VF_RPLCT_MAP6_A));
2133 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2134 MPS_VF_RPLCT_MAP5_A));
2135 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2136 MPS_VF_RPLCT_MAP4_A));
2138 mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
2139 mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
2140 mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
2141 mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
2144 static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
2145 struct cudbg_mps_tcam *tcam, u32 idx)
2147 struct adapter *padap = pdbg_init->adap;
2148 u64 tcamy, tcamx, val;
2152 if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
2153 /* CtlReqID - 1: use Host Driver Requester ID
2154 * CtlCmdType - 0: Read, 1: Write
2155 * CtlTcamSel - 0: TCAM0, 1: TCAM1
2156 * CtlXYBitSel- 0: Y bit, 1: X bit
2160 ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
2162 ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
2164 ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
2166 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2167 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2168 tcamy = DMACH_G(val) << 32;
2169 tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2170 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2171 tcam->lookup_type = DATALKPTYPE_G(data2);
2173 /* 0 - Outer header, 1 - Inner header
2174 * [71:48] bit locations are overloaded for
2175 * outer vs. inner lookup types.
2177 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2178 /* Inner header VNI */
2179 tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2180 tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
2181 tcam->dip_hit = data2 & DATADIPHIT_F;
2183 tcam->vlan_vld = data2 & DATAVIDH2_F;
2184 tcam->ivlan = VIDL_G(val);
2187 tcam->port_num = DATAPORTNUM_G(data2);
2189 /* Read tcamx. Change the control param */
2190 ctl |= CTLXYBITSEL_V(1);
2191 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2192 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2193 tcamx = DMACH_G(val) << 32;
2194 tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2195 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2196 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2197 /* Inner header VNI mask */
2198 tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2199 tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
2202 tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
2203 tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
2206 /* If no entry, return */
2210 tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
2211 tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
2213 if (is_t5(padap->params.chip))
2214 tcam->repli = (tcam->cls_lo & REPLICATE_F);
2215 else if (is_t6(padap->params.chip))
2216 tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
2219 struct fw_ldst_cmd ldst_cmd;
2220 struct fw_ldst_mps_rplc mps_rplc;
2222 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
2223 ldst_cmd.op_to_addrspace =
2224 htonl(FW_CMD_OP_V(FW_LDST_CMD) |
2225 FW_CMD_REQUEST_F | FW_CMD_READ_F |
2226 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
2227 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
2228 ldst_cmd.u.mps.rplc.fid_idx =
2229 htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
2230 FW_LDST_CMD_IDX_V(idx));
2232 /* If firmware is not attached/alive, use backdoor register
2233 * access to collect dump.
2235 if (is_fw_attached(pdbg_init))
2236 rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
2237 sizeof(ldst_cmd), &ldst_cmd);
2239 if (rc || !is_fw_attached(pdbg_init)) {
2240 cudbg_mps_rpl_backdoor(padap, &mps_rplc);
2241 /* Ignore error since we collected directly from
2242 * reading registers.
2246 mps_rplc = ldst_cmd.u.mps.rplc;
2249 tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
2250 tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
2251 tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
2252 tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
2253 if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
2254 tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
2255 tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
2256 tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
2257 tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
2260 cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
2262 tcam->rplc_size = padap->params.arch.mps_rplc_size;
2266 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
2267 struct cudbg_buffer *dbg_buff,
2268 struct cudbg_error *cudbg_err)
2270 struct adapter *padap = pdbg_init->adap;
2271 struct cudbg_buffer temp_buff = { 0 };
2272 u32 size = 0, i, n, total_size = 0;
2273 struct cudbg_mps_tcam *tcam;
2276 n = padap->params.arch.mps_tcam_size;
2277 size = sizeof(struct cudbg_mps_tcam) * n;
2278 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2282 tcam = (struct cudbg_mps_tcam *)temp_buff.data;
2283 for (i = 0; i < n; i++) {
2284 rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
2286 cudbg_err->sys_err = rc;
2287 cudbg_put_buff(pdbg_init, &temp_buff);
2290 total_size += sizeof(struct cudbg_mps_tcam);
2295 rc = CUDBG_SYSTEM_ERROR;
2296 cudbg_err->sys_err = rc;
2297 cudbg_put_buff(pdbg_init, &temp_buff);
2300 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2303 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
2304 struct cudbg_buffer *dbg_buff,
2305 struct cudbg_error *cudbg_err)
2307 struct adapter *padap = pdbg_init->adap;
2308 struct cudbg_buffer temp_buff = { 0 };
2309 char vpd_str[CUDBG_VPD_VER_LEN + 1];
2310 u32 scfg_vers, vpd_vers, fw_vers;
2311 struct cudbg_vpd_data *vpd_data;
2312 struct vpd_params vpd = { 0 };
2315 rc = t4_get_raw_vpd_params(padap, &vpd);
2319 rc = t4_get_fw_version(padap, &fw_vers);
2323 /* Serial Configuration Version is located beyond the PF's vpd size.
2324 * Temporarily give access to entire EEPROM to get it.
2326 rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
2330 ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
2333 /* Restore back to original PF's vpd size */
2334 rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
2341 rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
2346 vpd_str[CUDBG_VPD_VER_LEN] = '\0';
2347 rc = kstrtouint(vpd_str, 0, &vpd_vers);
2351 rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
2356 vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
2357 memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
2358 memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
2359 memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
2360 memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
2361 vpd_data->scfg_vers = scfg_vers;
2362 vpd_data->vpd_vers = vpd_vers;
2363 vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
2364 vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
2365 vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
2366 vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
2367 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2370 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
2371 struct cudbg_tid_data *tid_data)
2373 struct adapter *padap = pdbg_init->adap;
2374 int i, cmd_retry = 8;
2377 /* Fill REQ_DATA regs with 0's */
2378 for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
2379 t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
2381 /* Write DBIG command */
2382 val = DBGICMD_V(4) | DBGITID_V(tid);
2383 t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
2384 tid_data->dbig_cmd = val;
2386 val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */
2387 t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
2388 tid_data->dbig_conf = val;
2390 /* Poll the DBGICMDBUSY bit */
2393 val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
2394 val = val & DBGICMDBUSY_F;
2397 return CUDBG_SYSTEM_ERROR;
2400 /* Check RESP status */
2401 val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
2402 tid_data->dbig_rsp_stat = val;
2404 return CUDBG_SYSTEM_ERROR;
2406 /* Read RESP data */
2407 for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
2408 tid_data->data[i] = t4_read_reg(padap,
2409 LE_DB_DBGI_RSP_DATA_A +
2411 tid_data->tid = tid;
2415 static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
2417 int type = LE_ET_UNKNOWN;
2419 if (tid < tcam_region.server_start)
2420 type = LE_ET_TCAM_CON;
2421 else if (tid < tcam_region.filter_start)
2422 type = LE_ET_TCAM_SERVER;
2423 else if (tid < tcam_region.clip_start)
2424 type = LE_ET_TCAM_FILTER;
2425 else if (tid < tcam_region.routing_start)
2426 type = LE_ET_TCAM_CLIP;
2427 else if (tid < tcam_region.tid_hash_base)
2428 type = LE_ET_TCAM_ROUTING;
2429 else if (tid < tcam_region.max_tid)
2430 type = LE_ET_HASH_CON;
2432 type = LE_ET_INVALID_TID;
2437 static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
2438 struct cudbg_tcam tcam_region)
2443 le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
2444 if (tid_data->tid & 1)
2447 if (le_type == LE_ET_HASH_CON) {
2448 ipv6 = tid_data->data[16] & 0x8000;
2449 } else if (le_type == LE_ET_TCAM_CON) {
2450 ipv6 = tid_data->data[16] & 0x8000;
2452 ipv6 = tid_data->data[9] == 0x00C00000;
2459 void cudbg_fill_le_tcam_info(struct adapter *padap,
2460 struct cudbg_tcam *tcam_region)
2464 /* Get the LE regions */
2465 value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */
2466 tcam_region->tid_hash_base = value;
2468 /* Get routing table index */
2469 value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
2470 tcam_region->routing_start = value;
2472 /* Get clip table index. For T6 there is separate CLIP TCAM */
2473 if (is_t6(padap->params.chip))
2474 value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A);
2476 value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
2477 tcam_region->clip_start = value;
2479 /* Get filter table index */
2480 value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
2481 tcam_region->filter_start = value;
2483 /* Get server table index */
2484 value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
2485 tcam_region->server_start = value;
2487 /* Check whether hash is enabled and calculate the max tids */
2488 value = t4_read_reg(padap, LE_DB_CONFIG_A);
2489 if ((value >> HASHEN_S) & 1) {
2490 value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
2491 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
2492 tcam_region->max_tid = (value & 0xFFFFF) +
2493 tcam_region->tid_hash_base;
2495 value = HASHTIDSIZE_G(value);
2497 tcam_region->max_tid = value +
2498 tcam_region->tid_hash_base;
2500 } else { /* hash not enabled */
2501 if (is_t6(padap->params.chip))
2502 tcam_region->max_tid = (value & ASLIPCOMPEN_F) ?
2503 CUDBG_MAX_TID_COMP_EN :
2504 CUDBG_MAX_TID_COMP_DIS;
2506 tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
2509 if (is_t6(padap->params.chip))
2510 tcam_region->max_tid += CUDBG_T6_CLIP;
2513 int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
2514 struct cudbg_buffer *dbg_buff,
2515 struct cudbg_error *cudbg_err)
2517 struct adapter *padap = pdbg_init->adap;
2518 struct cudbg_buffer temp_buff = { 0 };
2519 struct cudbg_tcam tcam_region = { 0 };
2520 struct cudbg_tid_data *tid_data;
2525 cudbg_fill_le_tcam_info(padap, &tcam_region);
2527 size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
2528 size += sizeof(struct cudbg_tcam);
2529 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2533 memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
2534 bytes = sizeof(struct cudbg_tcam);
2535 tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
2537 for (i = 0; i < tcam_region.max_tid; ) {
2538 rc = cudbg_read_tid(pdbg_init, i, tid_data);
2540 cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
2541 /* Update tcam header and exit */
2542 tcam_region.max_tid = i;
2543 memcpy(temp_buff.data, &tcam_region,
2544 sizeof(struct cudbg_tcam));
2548 if (cudbg_is_ipv6_entry(tid_data, tcam_region)) {
2549 /* T6 CLIP TCAM: ipv6 takes 4 entries */
2550 if (is_t6(padap->params.chip) &&
2551 i >= tcam_region.clip_start &&
2552 i < tcam_region.clip_start + CUDBG_T6_CLIP)
2554 else /* Main TCAM: ipv6 takes two tids */
2561 bytes += sizeof(struct cudbg_tid_data);
2565 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2568 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
2569 struct cudbg_buffer *dbg_buff,
2570 struct cudbg_error *cudbg_err)
2572 struct adapter *padap = pdbg_init->adap;
2573 struct cudbg_buffer temp_buff = { 0 };
2577 size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2578 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2582 t4_read_cong_tbl(padap, (void *)temp_buff.data);
2583 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2586 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
2587 struct cudbg_buffer *dbg_buff,
2588 struct cudbg_error *cudbg_err)
2590 struct adapter *padap = pdbg_init->adap;
2591 struct cudbg_buffer temp_buff = { 0 };
2592 struct ireg_buf *ma_indr;
2596 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2597 return CUDBG_STATUS_ENTITY_NOT_FOUND;
2599 n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2600 size = sizeof(struct ireg_buf) * n * 2;
2601 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2605 ma_indr = (struct ireg_buf *)temp_buff.data;
2606 for (i = 0; i < n; i++) {
2607 struct ireg_field *ma_fli = &ma_indr->tp_pio;
2608 u32 *buff = ma_indr->outbuf;
2610 ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
2611 ma_fli->ireg_data = t6_ma_ireg_array[i][1];
2612 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
2613 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
2614 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
2615 buff, ma_fli->ireg_offset_range,
2616 ma_fli->ireg_local_offset);
2620 n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
2621 for (i = 0; i < n; i++) {
2622 struct ireg_field *ma_fli = &ma_indr->tp_pio;
2623 u32 *buff = ma_indr->outbuf;
2625 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
2626 ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
2627 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
2628 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
2629 t4_read_indirect(padap, ma_fli->ireg_addr,
2630 ma_fli->ireg_data, buff, 1,
2631 ma_fli->ireg_local_offset);
2633 ma_fli->ireg_local_offset += 0x20;
2637 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2640 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
2641 struct cudbg_buffer *dbg_buff,
2642 struct cudbg_error *cudbg_err)
2644 struct adapter *padap = pdbg_init->adap;
2645 struct cudbg_buffer temp_buff = { 0 };
2646 struct cudbg_ulptx_la *ulptx_la_buff;
2647 struct cudbg_ver_hdr *ver_hdr;
2651 rc = cudbg_get_buff(pdbg_init, dbg_buff,
2652 sizeof(struct cudbg_ver_hdr) +
2653 sizeof(struct cudbg_ulptx_la),
2658 ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
2659 ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
2660 ver_hdr->revision = CUDBG_ULPTX_LA_REV;
2661 ver_hdr->size = sizeof(struct cudbg_ulptx_la);
2663 ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
2665 for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
2666 ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
2667 ULP_TX_LA_RDPTR_0_A +
2669 ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
2670 ULP_TX_LA_WRPTR_0_A +
2672 ulptx_la_buff->rddata[i] = t4_read_reg(padap,
2673 ULP_TX_LA_RDDATA_0_A +
2675 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
2676 ulptx_la_buff->rd_data[i][j] =
2678 ULP_TX_LA_RDDATA_0_A + 0x10 * i);
2681 for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
2682 t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
2683 ulptx_la_buff->rdptr_asic[i] =
2684 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
2685 ulptx_la_buff->rddata_asic[i][0] =
2686 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
2687 ulptx_la_buff->rddata_asic[i][1] =
2688 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
2689 ulptx_la_buff->rddata_asic[i][2] =
2690 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
2691 ulptx_la_buff->rddata_asic[i][3] =
2692 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
2693 ulptx_la_buff->rddata_asic[i][4] =
2694 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
2695 ulptx_la_buff->rddata_asic[i][5] =
2696 t4_read_reg(padap, PM_RX_BASE_ADDR);
2699 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2702 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
2703 struct cudbg_buffer *dbg_buff,
2704 struct cudbg_error *cudbg_err)
2706 struct adapter *padap = pdbg_init->adap;
2707 struct cudbg_buffer temp_buff = { 0 };
2708 u32 local_offset, local_range;
2709 struct ireg_buf *up_cim;
2714 if (is_t5(padap->params.chip))
2715 n = sizeof(t5_up_cim_reg_array) /
2716 ((IREG_NUM_ELEM + 1) * sizeof(u32));
2717 else if (is_t6(padap->params.chip))
2718 n = sizeof(t6_up_cim_reg_array) /
2719 ((IREG_NUM_ELEM + 1) * sizeof(u32));
2721 return CUDBG_STATUS_NOT_IMPLEMENTED;
2723 size = sizeof(struct ireg_buf) * n;
2724 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2728 up_cim = (struct ireg_buf *)temp_buff.data;
2729 for (i = 0; i < n; i++) {
2730 struct ireg_field *up_cim_reg = &up_cim->tp_pio;
2731 u32 *buff = up_cim->outbuf;
2733 if (is_t5(padap->params.chip)) {
2734 up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
2735 up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
2736 up_cim_reg->ireg_local_offset =
2737 t5_up_cim_reg_array[i][2];
2738 up_cim_reg->ireg_offset_range =
2739 t5_up_cim_reg_array[i][3];
2740 instance = t5_up_cim_reg_array[i][4];
2741 } else if (is_t6(padap->params.chip)) {
2742 up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
2743 up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
2744 up_cim_reg->ireg_local_offset =
2745 t6_up_cim_reg_array[i][2];
2746 up_cim_reg->ireg_offset_range =
2747 t6_up_cim_reg_array[i][3];
2748 instance = t6_up_cim_reg_array[i][4];
2752 case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
2753 iter = up_cim_reg->ireg_offset_range;
2754 local_offset = 0x120;
2757 case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
2758 iter = up_cim_reg->ireg_offset_range;
2759 local_offset = 0x10;
2765 local_range = up_cim_reg->ireg_offset_range;
2769 for (j = 0; j < iter; j++, buff++) {
2770 rc = t4_cim_read(padap,
2771 up_cim_reg->ireg_local_offset +
2772 (j * local_offset), local_range, buff);
2774 cudbg_put_buff(pdbg_init, &temp_buff);
2780 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2783 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
2784 struct cudbg_buffer *dbg_buff,
2785 struct cudbg_error *cudbg_err)
2787 struct adapter *padap = pdbg_init->adap;
2788 struct cudbg_buffer temp_buff = { 0 };
2789 struct cudbg_pbt_tables *pbt;
2793 rc = cudbg_get_buff(pdbg_init, dbg_buff,
2794 sizeof(struct cudbg_pbt_tables),
2799 pbt = (struct cudbg_pbt_tables *)temp_buff.data;
2800 /* PBT dynamic entries */
2801 addr = CUDBG_CHAC_PBT_ADDR;
2802 for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
2803 rc = t4_cim_read(padap, addr + (i * 4), 1,
2804 &pbt->pbt_dynamic[i]);
2806 cudbg_err->sys_err = rc;
2807 cudbg_put_buff(pdbg_init, &temp_buff);
2812 /* PBT static entries */
2813 /* static entries start when bit 6 is set */
2814 addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
2815 for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
2816 rc = t4_cim_read(padap, addr + (i * 4), 1,
2817 &pbt->pbt_static[i]);
2819 cudbg_err->sys_err = rc;
2820 cudbg_put_buff(pdbg_init, &temp_buff);
2826 addr = CUDBG_CHAC_PBT_LRF;
2827 for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
2828 rc = t4_cim_read(padap, addr + (i * 4), 1,
2829 &pbt->lrf_table[i]);
2831 cudbg_err->sys_err = rc;
2832 cudbg_put_buff(pdbg_init, &temp_buff);
2837 /* PBT data entries */
2838 addr = CUDBG_CHAC_PBT_DATA;
2839 for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
2840 rc = t4_cim_read(padap, addr + (i * 4), 1,
2843 cudbg_err->sys_err = rc;
2844 cudbg_put_buff(pdbg_init, &temp_buff);
2848 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2851 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
2852 struct cudbg_buffer *dbg_buff,
2853 struct cudbg_error *cudbg_err)
2855 struct adapter *padap = pdbg_init->adap;
2856 struct cudbg_mbox_log *mboxlog = NULL;
2857 struct cudbg_buffer temp_buff = { 0 };
2858 struct mbox_cmd_log *log = NULL;
2859 struct mbox_cmd *entry;
2860 unsigned int entry_idx;
2866 log = padap->mbox_log;
2867 mbox_cmds = padap->mbox_log->size;
2868 size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
2869 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2873 mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
2874 for (k = 0; k < mbox_cmds; k++) {
2875 entry_idx = log->cursor + k;
2876 if (entry_idx >= log->size)
2877 entry_idx -= log->size;
2879 entry = mbox_cmd_log_entry(log, entry_idx);
2880 /* skip over unused entries */
2881 if (entry->timestamp == 0)
2884 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
2885 for (i = 0; i < MBOX_LEN / 8; i++) {
2886 flit = entry->cmd[i];
2887 mboxlog->hi[i] = (u32)(flit >> 32);
2888 mboxlog->lo[i] = (u32)flit;
2892 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2895 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
2896 struct cudbg_buffer *dbg_buff,
2897 struct cudbg_error *cudbg_err)
2899 struct adapter *padap = pdbg_init->adap;
2900 struct cudbg_buffer temp_buff = { 0 };
2901 struct ireg_buf *hma_indr;
2905 if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2906 return CUDBG_STATUS_ENTITY_NOT_FOUND;
2908 n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2909 size = sizeof(struct ireg_buf) * n;
2910 rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2914 hma_indr = (struct ireg_buf *)temp_buff.data;
2915 for (i = 0; i < n; i++) {
2916 struct ireg_field *hma_fli = &hma_indr->tp_pio;
2917 u32 *buff = hma_indr->outbuf;
2919 hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
2920 hma_fli->ireg_data = t6_hma_ireg_array[i][1];
2921 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
2922 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
2923 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
2924 buff, hma_fli->ireg_offset_range,
2925 hma_fli->ireg_local_offset);
2928 return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);