GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_cudbg.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
4  */
5
6 #include "t4_regs.h"
7 #include "cxgb4.h"
8 #include "cxgb4_cudbg.h"
9 #include "cudbg_zlib.h"
10
11 static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
12         { CUDBG_EDC0, cudbg_collect_edc0_meminfo },
13         { CUDBG_EDC1, cudbg_collect_edc1_meminfo },
14         { CUDBG_MC0, cudbg_collect_mc0_meminfo },
15         { CUDBG_MC1, cudbg_collect_mc1_meminfo },
16         { CUDBG_HMA, cudbg_collect_hma_meminfo },
17 };
18
19 static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
20         { CUDBG_MBOX_LOG, cudbg_collect_mbox_log },
21         { CUDBG_QDESC, cudbg_collect_qdesc },
22         { CUDBG_DEV_LOG, cudbg_collect_fw_devlog },
23         { CUDBG_REG_DUMP, cudbg_collect_reg_dump },
24         { CUDBG_CIM_LA, cudbg_collect_cim_la },
25         { CUDBG_CIM_MA_LA, cudbg_collect_cim_ma_la },
26         { CUDBG_CIM_QCFG, cudbg_collect_cim_qcfg },
27         { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 },
28         { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 },
29         { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp },
30         { CUDBG_CIM_IBQ_SGE0, cudbg_collect_cim_ibq_sge0 },
31         { CUDBG_CIM_IBQ_SGE1, cudbg_collect_cim_ibq_sge1 },
32         { CUDBG_CIM_IBQ_NCSI, cudbg_collect_cim_ibq_ncsi },
33         { CUDBG_CIM_OBQ_ULP0, cudbg_collect_cim_obq_ulp0 },
34         { CUDBG_CIM_OBQ_ULP1, cudbg_collect_cim_obq_ulp1 },
35         { CUDBG_CIM_OBQ_ULP2, cudbg_collect_cim_obq_ulp2 },
36         { CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 },
37         { CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge },
38         { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi },
39         { CUDBG_RSS, cudbg_collect_rss },
40         { CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config },
41         { CUDBG_PATH_MTU, cudbg_collect_path_mtu },
42         { CUDBG_PM_STATS, cudbg_collect_pm_stats },
43         { CUDBG_HW_SCHED, cudbg_collect_hw_sched },
44         { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect },
45         { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
46         { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
47         { CUDBG_TP_LA, cudbg_collect_tp_la },
48         { CUDBG_MEMINFO, cudbg_collect_meminfo },
49         { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
50         { CUDBG_CLK, cudbg_collect_clk_info },
51         { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
52         { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 },
53         { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
54         { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
55         { CUDBG_TID_INFO, cudbg_collect_tid },
56         { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
57         { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
58         { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
59         { CUDBG_VPD_DATA, cudbg_collect_vpd_data },
60         { CUDBG_LE_TCAM, cudbg_collect_le_tcam },
61         { CUDBG_CCTRL, cudbg_collect_cctrl },
62         { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect },
63         { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la },
64         { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect },
65         { CUDBG_PBT_TABLE, cudbg_collect_pbt_tables },
66         { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect },
67 };
68
69 static const struct cxgb4_collect_entity cxgb4_collect_flash_dump[] = {
70         { CUDBG_FLASH, cudbg_collect_flash },
71 };
72
73 u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
74 {
75         u32 i, entity;
76         u32 len = 0;
77         u32 wsize;
78
79         if (flag & CXGB4_ETH_DUMP_HW) {
80                 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
81                         entity = cxgb4_collect_hw_dump[i].entity;
82                         len += cudbg_get_entity_length(adap, entity);
83                 }
84         }
85
86         if (flag & CXGB4_ETH_DUMP_MEM) {
87                 for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) {
88                         entity = cxgb4_collect_mem_dump[i].entity;
89                         len += cudbg_get_entity_length(adap, entity);
90                 }
91         }
92
93         if (flag & CXGB4_ETH_DUMP_FLASH)
94                 len += adap->params.sf_size;
95
96         /* If compression is enabled, a smaller destination buffer is enough */
97         wsize = cudbg_get_workspace_size();
98         if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
99                 len = CUDBG_DUMP_BUFF_SIZE;
100
101         return len;
102 }
103
104 static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
105                                        struct cudbg_buffer *dbg_buff,
106                                        const struct cxgb4_collect_entity *e_arr,
107                                        u32 arr_size, void *buf, u32 *tot_size)
108 {
109         struct cudbg_error cudbg_err = { 0 };
110         struct cudbg_entity_hdr *entity_hdr;
111         u32 i, total_size = 0;
112         int ret;
113
114         for (i = 0; i < arr_size; i++) {
115                 const struct cxgb4_collect_entity *e = &e_arr[i];
116
117                 entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
118                 entity_hdr->entity_type = e->entity;
119                 entity_hdr->start_offset = dbg_buff->offset;
120                 memset(&cudbg_err, 0, sizeof(struct cudbg_error));
121                 ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err);
122                 if (ret) {
123                         entity_hdr->size = 0;
124                         dbg_buff->offset = entity_hdr->start_offset;
125                 } else {
126                         cudbg_align_debug_buffer(dbg_buff, entity_hdr);
127                 }
128
129                 /* Log error and continue with next entity */
130                 if (cudbg_err.sys_err)
131                         ret = CUDBG_SYSTEM_ERROR;
132
133                 entity_hdr->hdr_flags = ret;
134                 entity_hdr->sys_err = cudbg_err.sys_err;
135                 entity_hdr->sys_warn = cudbg_err.sys_warn;
136                 total_size += entity_hdr->size;
137         }
138
139         *tot_size += total_size;
140 }
141
142 static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
143 {
144         u32 workspace_size;
145
146         workspace_size = cudbg_get_workspace_size();
147         pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE +
148                                            workspace_size);
149         if (!pdbg_init->compress_buff)
150                 return -ENOMEM;
151
152         pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE;
153         pdbg_init->workspace = (u8 *)pdbg_init->compress_buff +
154                                CUDBG_COMPRESS_BUFF_SIZE - workspace_size;
155         return 0;
156 }
157
158 static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
159 {
160         vfree(pdbg_init->compress_buff);
161 }
162
163 int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
164                         u32 flag)
165 {
166         struct cudbg_buffer dbg_buff = { 0 };
167         u32 size, min_size, total_size = 0;
168         struct cudbg_init cudbg_init;
169         struct cudbg_hdr *cudbg_hdr;
170         int rc;
171
172         size = *buf_size;
173
174         memset(&cudbg_init, 0, sizeof(struct cudbg_init));
175         cudbg_init.adap = adap;
176         cudbg_init.outbuf = buf;
177         cudbg_init.outbuf_size = size;
178
179         dbg_buff.data = buf;
180         dbg_buff.size = size;
181         dbg_buff.offset = 0;
182
183         cudbg_hdr = (struct cudbg_hdr *)buf;
184         cudbg_hdr->signature = CUDBG_SIGNATURE;
185         cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr);
186         cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION;
187         cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION;
188         cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
189         cudbg_hdr->chip_ver = adap->params.chip;
190         cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
191
192         min_size = sizeof(struct cudbg_hdr) +
193                    sizeof(struct cudbg_entity_hdr) *
194                    cudbg_hdr->max_entities;
195         if (size < min_size)
196                 return -ENOMEM;
197
198         rc = cudbg_get_workspace_size();
199         if (rc) {
200                 /* Zlib available.  So, use zlib deflate */
201                 cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB;
202                 rc = cudbg_alloc_compress_buff(&cudbg_init);
203                 if (rc) {
204                         /* Ignore error and continue without compression. */
205                         dev_warn(adap->pdev_dev,
206                                  "Fail allocating compression buffer ret: %d.  Continuing without compression.\n",
207                                  rc);
208                         cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
209                         rc = 0;
210                 }
211         } else {
212                 cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
213         }
214
215         cudbg_hdr->compress_type = cudbg_init.compress_type;
216         dbg_buff.offset += min_size;
217         total_size = dbg_buff.offset;
218
219         if (flag & CXGB4_ETH_DUMP_HW)
220                 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
221                                            cxgb4_collect_hw_dump,
222                                            ARRAY_SIZE(cxgb4_collect_hw_dump),
223                                            buf,
224                                            &total_size);
225
226         if (flag & CXGB4_ETH_DUMP_MEM)
227                 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
228                                            cxgb4_collect_mem_dump,
229                                            ARRAY_SIZE(cxgb4_collect_mem_dump),
230                                            buf,
231                                            &total_size);
232
233         if (flag & CXGB4_ETH_DUMP_FLASH)
234                 cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
235                                            cxgb4_collect_flash_dump,
236                                            ARRAY_SIZE(cxgb4_collect_flash_dump),
237                                            buf,
238                                            &total_size);
239
240         cudbg_free_compress_buff(&cudbg_init);
241         cudbg_hdr->data_len = total_size;
242         if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
243                 *buf_size = size;
244         else
245                 *buf_size = total_size;
246         return 0;
247 }
248
249 void cxgb4_init_ethtool_dump(struct adapter *adapter)
250 {
251         adapter->eth_dump.flag = CXGB4_ETH_DUMP_NONE;
252         adapter->eth_dump.version = adapter->params.fw_vers;
253         adapter->eth_dump.len = 0;
254 }
255
256 static int cxgb4_cudbg_vmcoredd_collect(struct vmcoredd_data *data, void *buf)
257 {
258         struct adapter *adap = container_of(data, struct adapter, vmcoredd);
259         u32 len = data->size;
260
261         return cxgb4_cudbg_collect(adap, buf, &len, CXGB4_ETH_DUMP_ALL);
262 }
263
264 int cxgb4_cudbg_vmcore_add_dump(struct adapter *adap)
265 {
266         struct vmcoredd_data *data = &adap->vmcoredd;
267         u32 len;
268
269         len = sizeof(struct cudbg_hdr) +
270               sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY;
271         len += CUDBG_DUMP_BUFF_SIZE;
272
273         data->size = len;
274         snprintf(data->dump_name, sizeof(data->dump_name), "%s_%s",
275                  cxgb4_driver_name, adap->name);
276         data->vmcoredd_callback = cxgb4_cudbg_vmcoredd_collect;
277
278         return vmcore_add_device_dump(data);
279 }