1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/delay.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/etherdevice.h>
47 #include "qed_reg_addr.h"
48 #include "qed_sriov.h"
50 #define QED_MCP_RESP_ITER_US 10
52 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
53 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
55 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
56 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
59 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
60 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
62 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
63 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
64 offsetof(struct public_drv_mb, _field), _val)
66 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
67 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
68 offsetof(struct public_drv_mb, _field))
70 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
71 DRV_ID_PDA_COMP_VER_SHIFT)
73 #define MCP_BYTES_PER_MBIT_SHIFT 17
75 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
77 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
82 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
84 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
86 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
88 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
90 DP_VERBOSE(p_hwfn, QED_MSG_SP,
91 "port_addr = 0x%x, port_id 0x%02x\n",
92 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
95 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
97 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
100 if (!p_hwfn->mcp_info->public_base)
103 for (i = 0; i < length; i++) {
104 tmp = qed_rd(p_hwfn, p_ptt,
105 p_hwfn->mcp_info->mfw_mb_addr +
106 (i << 2) + sizeof(u32));
108 /* The MB data is actually BE; Need to force it to cpu */
109 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
110 be32_to_cpu((__force __be32)tmp);
114 struct qed_mcp_cmd_elem {
115 struct list_head list;
116 struct qed_mcp_mb_params *p_mb_params;
117 u16 expected_seq_num;
121 /* Must be called while cmd_lock is acquired */
122 static struct qed_mcp_cmd_elem *
123 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
124 struct qed_mcp_mb_params *p_mb_params,
125 u16 expected_seq_num)
127 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
129 p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
133 p_cmd_elem->p_mb_params = p_mb_params;
134 p_cmd_elem->expected_seq_num = expected_seq_num;
135 list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
140 /* Must be called while cmd_lock is acquired */
141 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
142 struct qed_mcp_cmd_elem *p_cmd_elem)
144 list_del(&p_cmd_elem->list);
148 /* Must be called while cmd_lock is acquired */
149 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
152 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
154 list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
155 if (p_cmd_elem->expected_seq_num == seq_num)
162 int qed_mcp_free(struct qed_hwfn *p_hwfn)
164 if (p_hwfn->mcp_info) {
165 struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
167 kfree(p_hwfn->mcp_info->mfw_mb_cur);
168 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
170 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
171 list_for_each_entry_safe(p_cmd_elem,
173 &p_hwfn->mcp_info->cmd_list, list) {
174 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
176 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
179 kfree(p_hwfn->mcp_info);
180 p_hwfn->mcp_info = NULL;
185 /* Maximum of 1 sec to wait for the SHMEM ready indication */
186 #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
187 #define QED_MCP_SHMEM_RDY_ITER_MS 50
189 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
191 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
192 u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
193 u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
194 u32 drv_mb_offsize, mfw_mb_offsize;
195 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
197 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
198 if (!p_info->public_base) {
200 "The address of the MCP scratch-pad is not configured\n");
204 p_info->public_base |= GRCBASE_MCP;
206 /* Get the MFW MB address and number of supported messages */
207 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
208 SECTION_OFFSIZE_ADDR(p_info->public_base,
210 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
211 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
212 p_info->mfw_mb_addr +
213 offsetof(struct public_mfw_mb,
216 /* The driver can notify that there was an MCP reset, and might read the
217 * SHMEM values before the MFW has completed initializing them.
218 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
219 * data ready indication.
221 while (!p_info->mfw_mb_length && --cnt) {
223 p_info->mfw_mb_length =
224 (u16)qed_rd(p_hwfn, p_ptt,
225 p_info->mfw_mb_addr +
226 offsetof(struct public_mfw_mb, sup_msgs));
231 "Failed to get the SHMEM ready notification after %d msec\n",
232 QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
236 /* Calculate the driver and MFW mailbox address */
237 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
238 SECTION_OFFSIZE_ADDR(p_info->public_base,
240 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
241 DP_VERBOSE(p_hwfn, QED_MSG_SP,
242 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
243 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
245 /* Get the current driver mailbox sequence before sending
248 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
249 DRV_MSG_SEQ_NUMBER_MASK;
251 /* Get current FW pulse sequence */
252 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
255 p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
260 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
262 struct qed_mcp_info *p_info;
265 /* Allocate mcp_info structure */
266 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
267 if (!p_hwfn->mcp_info)
269 p_info = p_hwfn->mcp_info;
271 /* Initialize the MFW spinlock */
272 spin_lock_init(&p_info->cmd_lock);
273 spin_lock_init(&p_info->link_lock);
275 INIT_LIST_HEAD(&p_info->cmd_list);
277 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
278 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
279 /* Do not free mcp_info here, since public_base indicate that
280 * the MCP is not initialized
285 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
286 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
287 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
288 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
294 qed_mcp_free(p_hwfn);
298 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
299 struct qed_ptt *p_ptt)
301 u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
303 /* Use MCP history register to check if MCP reset occurred between init
306 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
309 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
310 p_hwfn->mcp_info->mcp_hist, generic_por_0);
312 qed_load_mcp_offsets(p_hwfn, p_ptt);
313 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
317 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
319 u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
322 if (p_hwfn->mcp_info->b_block_cmd) {
324 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
328 /* Ensure that only a single thread is accessing the mailbox */
329 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
331 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
333 /* Set drv command along with the updated sequence */
334 qed_mcp_reread_offsets(p_hwfn, p_ptt);
335 seq = ++p_hwfn->mcp_info->drv_mb_seq;
336 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
339 /* Wait for MFW response */
341 /* Give the FW up to 500 second (50*1000*10usec) */
342 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
343 MISCS_REG_GENERIC_POR_0)) &&
344 (cnt++ < QED_MCP_RESET_RETRIES));
346 if (org_mcp_reset_seq !=
347 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
348 DP_VERBOSE(p_hwfn, QED_MSG_SP,
349 "MCP was reset after %d usec\n", cnt * delay);
351 DP_ERR(p_hwfn, "Failed to reset MCP\n");
355 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
360 /* Must be called while cmd_lock is acquired */
361 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
363 struct qed_mcp_cmd_elem *p_cmd_elem;
365 /* There is at most one pending command at a certain time, and if it
366 * exists - it is placed at the HEAD of the list.
368 if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
369 p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
370 struct qed_mcp_cmd_elem, list);
371 return !p_cmd_elem->b_is_completed;
377 /* Must be called while cmd_lock is acquired */
379 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
381 struct qed_mcp_mb_params *p_mb_params;
382 struct qed_mcp_cmd_elem *p_cmd_elem;
386 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
387 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
389 /* Return if no new non-handled response has been received */
390 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
393 p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
396 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
401 p_mb_params = p_cmd_elem->p_mb_params;
403 /* Get the MFW response along with the sequence number */
404 p_mb_params->mcp_resp = mcp_resp;
406 /* Get the MFW param */
407 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
409 /* Get the union data */
410 if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
411 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
412 offsetof(struct public_drv_mb,
414 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
415 union_data_addr, p_mb_params->data_dst_size);
418 p_cmd_elem->b_is_completed = true;
423 /* Must be called while cmd_lock is acquired */
424 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
425 struct qed_ptt *p_ptt,
426 struct qed_mcp_mb_params *p_mb_params,
429 union drv_union_data union_data;
432 /* Set the union data */
433 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
434 offsetof(struct public_drv_mb, union_data);
435 memset(&union_data, 0, sizeof(union_data));
436 if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
437 memcpy(&union_data, p_mb_params->p_data_src,
438 p_mb_params->data_src_size);
439 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
442 /* Set the drv param */
443 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
445 /* Set the drv command along with the sequence number */
446 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
448 DP_VERBOSE(p_hwfn, QED_MSG_SP,
449 "MFW mailbox: command 0x%08x param 0x%08x\n",
450 (p_mb_params->cmd | seq_num), p_mb_params->param);
453 static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
455 p_hwfn->mcp_info->b_block_cmd = block_cmd;
457 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
458 block_cmd ? "Block" : "Unblock");
461 static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
462 struct qed_ptt *p_ptt)
464 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
465 u32 delay = QED_MCP_RESP_ITER_US;
467 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
468 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
469 cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
471 cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
473 cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
476 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
477 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
481 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
482 struct qed_ptt *p_ptt,
483 struct qed_mcp_mb_params *p_mb_params,
484 u32 max_retries, u32 usecs)
486 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
487 struct qed_mcp_cmd_elem *p_cmd_elem;
491 /* Wait until the mailbox is non-occupied */
493 /* Exit the loop if there is no pending command, or if the
494 * pending command is completed during this iteration.
495 * The spinlock stays locked until the command is sent.
498 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
500 if (!qed_mcp_has_pending_cmd(p_hwfn)) {
501 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
505 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
507 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
509 } else if (rc != -EAGAIN) {
513 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
515 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
519 } while (++cnt < max_retries);
521 if (cnt >= max_retries) {
523 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
524 p_mb_params->cmd, p_mb_params->param);
528 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
530 /* Send the mailbox command */
531 qed_mcp_reread_offsets(p_hwfn, p_ptt);
532 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
533 p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
539 __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
540 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
542 /* Wait for the MFW response */
544 /* Exit the loop if the command is already completed, or if the
545 * command is completed during this iteration.
546 * The spinlock stays locked until the list element is removed.
549 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
554 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
556 if (p_cmd_elem->b_is_completed) {
557 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
561 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
563 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
565 } else if (rc != -EAGAIN) {
569 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
570 } while (++cnt < max_retries);
572 if (cnt >= max_retries) {
574 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
575 p_mb_params->cmd, p_mb_params->param);
576 qed_mcp_print_cpu_info(p_hwfn, p_ptt);
578 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
579 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
580 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
582 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
583 qed_mcp_cmd_set_blocking(p_hwfn, true);
588 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
589 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
590 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
594 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
595 p_mb_params->mcp_resp,
596 p_mb_params->mcp_param,
597 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
599 /* Clear the sequence number from the MFW response */
600 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
605 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
609 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
610 struct qed_ptt *p_ptt,
611 struct qed_mcp_mb_params *p_mb_params)
613 size_t union_data_size = sizeof(union drv_union_data);
614 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
615 u32 usecs = QED_MCP_RESP_ITER_US;
617 /* MCP not initialized */
618 if (!qed_mcp_is_init(p_hwfn)) {
619 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
623 if (p_hwfn->mcp_info->b_block_cmd) {
625 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
626 p_mb_params->cmd, p_mb_params->param);
630 if (p_mb_params->data_src_size > union_data_size ||
631 p_mb_params->data_dst_size > union_data_size) {
633 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
634 p_mb_params->data_src_size,
635 p_mb_params->data_dst_size, union_data_size);
639 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
640 max_retries = DIV_ROUND_UP(max_retries, 1000);
644 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
648 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
649 struct qed_ptt *p_ptt,
655 struct qed_mcp_mb_params mb_params;
658 memset(&mb_params, 0, sizeof(mb_params));
660 mb_params.param = param;
662 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
666 *o_mcp_resp = mb_params.mcp_resp;
667 *o_mcp_param = mb_params.mcp_param;
672 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
673 struct qed_ptt *p_ptt,
677 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
679 struct qed_mcp_mb_params mb_params;
680 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
683 memset(&mb_params, 0, sizeof(mb_params));
685 mb_params.param = param;
686 mb_params.p_data_dst = raw_data;
688 /* Use the maximal value since the actual one is part of the response */
689 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
691 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
695 *o_mcp_resp = mb_params.mcp_resp;
696 *o_mcp_param = mb_params.mcp_param;
698 *o_txn_size = *o_mcp_param;
699 memcpy(o_buf, raw_data, *o_txn_size);
705 qed_mcp_can_force_load(u8 drv_role,
707 enum qed_override_force_load override_force_load)
709 bool can_force_load = false;
711 switch (override_force_load) {
712 case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
713 can_force_load = true;
715 case QED_OVERRIDE_FORCE_LOAD_NEVER:
716 can_force_load = false;
719 can_force_load = (drv_role == DRV_ROLE_OS &&
720 exist_drv_role == DRV_ROLE_PREBOOT) ||
721 (drv_role == DRV_ROLE_KDUMP &&
722 exist_drv_role == DRV_ROLE_OS);
726 return can_force_load;
729 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
730 struct qed_ptt *p_ptt)
732 u32 resp = 0, param = 0;
735 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
739 "Failed to send cancel load request, rc = %d\n", rc);
744 #define CONFIG_QEDE_BITMAP_IDX BIT(0)
745 #define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
746 #define CONFIG_QEDR_BITMAP_IDX BIT(2)
747 #define CONFIG_QEDF_BITMAP_IDX BIT(4)
748 #define CONFIG_QEDI_BITMAP_IDX BIT(5)
749 #define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
751 static u32 qed_get_config_bitmap(void)
753 u32 config_bitmap = 0x0;
755 if (IS_ENABLED(CONFIG_QEDE))
756 config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
758 if (IS_ENABLED(CONFIG_QED_SRIOV))
759 config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
761 if (IS_ENABLED(CONFIG_QED_RDMA))
762 config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
764 if (IS_ENABLED(CONFIG_QED_FCOE))
765 config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
767 if (IS_ENABLED(CONFIG_QED_ISCSI))
768 config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
770 if (IS_ENABLED(CONFIG_QED_LL2))
771 config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
773 return config_bitmap;
776 struct qed_load_req_in_params {
778 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0
779 #define QED_LOAD_REQ_HSI_VER_1 1
786 bool avoid_eng_reset;
789 struct qed_load_req_out_params {
800 __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
801 struct qed_ptt *p_ptt,
802 struct qed_load_req_in_params *p_in_params,
803 struct qed_load_req_out_params *p_out_params)
805 struct qed_mcp_mb_params mb_params;
806 struct load_req_stc load_req;
807 struct load_rsp_stc load_rsp;
811 memset(&load_req, 0, sizeof(load_req));
812 load_req.drv_ver_0 = p_in_params->drv_ver_0;
813 load_req.drv_ver_1 = p_in_params->drv_ver_1;
814 load_req.fw_ver = p_in_params->fw_ver;
815 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
816 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
817 p_in_params->timeout_val);
818 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
819 p_in_params->force_cmd);
820 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
821 p_in_params->avoid_eng_reset);
823 hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
824 DRV_ID_MCP_HSI_VER_CURRENT :
825 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
827 memset(&mb_params, 0, sizeof(mb_params));
828 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
829 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
830 mb_params.p_data_src = &load_req;
831 mb_params.data_src_size = sizeof(load_req);
832 mb_params.p_data_dst = &load_rsp;
833 mb_params.data_dst_size = sizeof(load_rsp);
834 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
836 DP_VERBOSE(p_hwfn, QED_MSG_SP,
837 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
839 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
840 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
841 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
842 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
844 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
845 DP_VERBOSE(p_hwfn, QED_MSG_SP,
846 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
851 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
852 QED_MFW_GET_FIELD(load_req.misc0,
854 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
855 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
858 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
860 DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
864 DP_VERBOSE(p_hwfn, QED_MSG_SP,
865 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
866 p_out_params->load_code = mb_params.mcp_resp;
868 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
869 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
872 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
877 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
878 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
879 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
881 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
882 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
883 p_out_params->exist_fw_ver = load_rsp.fw_ver;
884 p_out_params->exist_drv_role =
885 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
886 p_out_params->mfw_hsi_ver =
887 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
888 p_out_params->drv_exists =
889 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
890 LOAD_RSP_FLAGS0_DRV_EXISTS;
896 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
897 enum qed_drv_role drv_role,
901 case QED_DRV_ROLE_OS:
902 *p_mfw_drv_role = DRV_ROLE_OS;
904 case QED_DRV_ROLE_KDUMP:
905 *p_mfw_drv_role = DRV_ROLE_KDUMP;
908 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
915 enum qed_load_req_force {
916 QED_LOAD_REQ_FORCE_NONE,
917 QED_LOAD_REQ_FORCE_PF,
918 QED_LOAD_REQ_FORCE_ALL,
921 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
923 enum qed_load_req_force force_cmd,
927 case QED_LOAD_REQ_FORCE_NONE:
928 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
930 case QED_LOAD_REQ_FORCE_PF:
931 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
933 case QED_LOAD_REQ_FORCE_ALL:
934 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
939 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
940 struct qed_ptt *p_ptt,
941 struct qed_load_req_params *p_params)
943 struct qed_load_req_out_params out_params;
944 struct qed_load_req_in_params in_params;
945 u8 mfw_drv_role, mfw_force_cmd;
948 memset(&in_params, 0, sizeof(in_params));
949 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
950 in_params.drv_ver_0 = QED_VERSION;
951 in_params.drv_ver_1 = qed_get_config_bitmap();
952 in_params.fw_ver = STORM_FW_VERSION;
953 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
957 in_params.drv_role = mfw_drv_role;
958 in_params.timeout_val = p_params->timeout_val;
959 qed_get_mfw_force_cmd(p_hwfn,
960 QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
962 in_params.force_cmd = mfw_force_cmd;
963 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
965 memset(&out_params, 0, sizeof(out_params));
966 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
970 /* First handle cases where another load request should/might be sent:
971 * - MFW expects the old interface [HSI version = 1]
972 * - MFW responds that a force load request is required
974 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
976 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
978 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
979 memset(&out_params, 0, sizeof(out_params));
980 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
983 } else if (out_params.load_code ==
984 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
985 if (qed_mcp_can_force_load(in_params.drv_role,
986 out_params.exist_drv_role,
987 p_params->override_force_load)) {
989 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
990 in_params.drv_role, in_params.fw_ver,
991 in_params.drv_ver_0, in_params.drv_ver_1,
992 out_params.exist_drv_role,
993 out_params.exist_fw_ver,
994 out_params.exist_drv_ver_0,
995 out_params.exist_drv_ver_1);
997 qed_get_mfw_force_cmd(p_hwfn,
998 QED_LOAD_REQ_FORCE_ALL,
1001 in_params.force_cmd = mfw_force_cmd;
1002 memset(&out_params, 0, sizeof(out_params));
1003 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1009 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1010 in_params.drv_role, in_params.fw_ver,
1011 in_params.drv_ver_0, in_params.drv_ver_1,
1012 out_params.exist_drv_role,
1013 out_params.exist_fw_ver,
1014 out_params.exist_drv_ver_0,
1015 out_params.exist_drv_ver_1);
1017 "Avoid sending a force load request to prevent disruption of active PFs\n");
1019 qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1024 /* Now handle the other types of responses.
1025 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1026 * expected here after the additional revised load requests were sent.
1028 switch (out_params.load_code) {
1029 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1030 case FW_MSG_CODE_DRV_LOAD_PORT:
1031 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1032 if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1033 out_params.drv_exists) {
1034 /* The role and fw/driver version match, but the PF is
1035 * already loaded and has not been unloaded gracefully.
1038 "PF is already loaded\n");
1044 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1045 out_params.load_code);
1049 p_params->load_code = out_params.load_code;
1054 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1056 struct qed_mcp_mb_params mb_params;
1059 switch (p_hwfn->cdev->wol_config) {
1060 case QED_OV_WOL_DISABLED:
1061 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1063 case QED_OV_WOL_ENABLED:
1064 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1068 "Unknown WoL configuration %02x\n",
1069 p_hwfn->cdev->wol_config);
1071 case QED_OV_WOL_DEFAULT:
1072 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1075 memset(&mb_params, 0, sizeof(mb_params));
1076 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1077 mb_params.param = wol_param;
1078 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1080 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1083 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1085 struct qed_mcp_mb_params mb_params;
1086 struct mcp_mac wol_mac;
1088 memset(&mb_params, 0, sizeof(mb_params));
1089 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1091 /* Set the primary MAC if WoL is enabled */
1092 if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1093 u8 *p_mac = p_hwfn->cdev->wol_mac;
1095 memset(&wol_mac, 0, sizeof(wol_mac));
1096 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1097 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1098 p_mac[4] << 8 | p_mac[5];
1101 (QED_MSG_SP | NETIF_MSG_IFDOWN),
1102 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1103 p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1105 mb_params.p_data_src = &wol_mac;
1106 mb_params.data_src_size = sizeof(wol_mac);
1109 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1112 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1113 struct qed_ptt *p_ptt)
1115 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1117 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1118 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1119 QED_PATH_ID(p_hwfn));
1120 u32 disabled_vfs[VF_MAX_STATIC / 32];
1125 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1126 mfw_path_offsize, path_addr);
1128 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1129 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1131 offsetof(struct public_path,
1134 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1135 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1136 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1139 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1140 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1143 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1144 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1146 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1148 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1149 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1151 struct qed_mcp_mb_params mb_params;
1155 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1156 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1157 "Acking VFs [%08x,...,%08x] - %08x\n",
1158 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1160 memset(&mb_params, 0, sizeof(mb_params));
1161 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1162 mb_params.p_data_src = vfs_to_ack;
1163 mb_params.data_src_size = VF_MAX_STATIC / 8;
1164 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1166 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1170 /* Clear the ACK bits */
1171 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1172 qed_wr(p_hwfn, p_ptt,
1174 offsetof(struct public_func, drv_ack_vf_disabled) +
1175 i * sizeof(u32), 0);
1180 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1181 struct qed_ptt *p_ptt)
1183 u32 transceiver_state;
1185 transceiver_state = qed_rd(p_hwfn, p_ptt,
1186 p_hwfn->mcp_info->port_addr +
1187 offsetof(struct public_port,
1191 (NETIF_MSG_HW | QED_MSG_SP),
1192 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1194 (u32)(p_hwfn->mcp_info->port_addr +
1195 offsetof(struct public_port, transceiver_data)));
1197 transceiver_state = GET_FIELD(transceiver_state,
1198 ETH_TRANSCEIVER_STATE);
1200 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1201 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1203 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1206 static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1207 struct qed_ptt *p_ptt,
1208 struct qed_mcp_link_state *p_link)
1210 u32 eee_status, val;
1212 p_link->eee_adv_caps = 0;
1213 p_link->eee_lp_adv_caps = 0;
1214 eee_status = qed_rd(p_hwfn,
1216 p_hwfn->mcp_info->port_addr +
1217 offsetof(struct public_port, eee_status));
1218 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1219 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1220 if (val & EEE_1G_ADV)
1221 p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1222 if (val & EEE_10G_ADV)
1223 p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1224 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1225 if (val & EEE_1G_ADV)
1226 p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1227 if (val & EEE_10G_ADV)
1228 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1231 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1232 struct qed_ptt *p_ptt, bool b_reset)
1234 struct qed_mcp_link_state *p_link;
1238 /* Prevent SW/attentions from doing this at the same time */
1239 spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1241 p_link = &p_hwfn->mcp_info->link_output;
1242 memset(p_link, 0, sizeof(*p_link));
1244 status = qed_rd(p_hwfn, p_ptt,
1245 p_hwfn->mcp_info->port_addr +
1246 offsetof(struct public_port, link_status));
1247 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1248 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1250 (u32)(p_hwfn->mcp_info->port_addr +
1251 offsetof(struct public_port, link_status)));
1253 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1254 "Resetting link indications\n");
1258 if (p_hwfn->b_drv_link_init)
1259 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1261 p_link->link_up = false;
1263 p_link->full_duplex = true;
1264 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1265 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1266 p_link->speed = 100000;
1268 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1269 p_link->speed = 50000;
1271 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1272 p_link->speed = 40000;
1274 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1275 p_link->speed = 25000;
1277 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1278 p_link->speed = 20000;
1280 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1281 p_link->speed = 10000;
1283 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1284 p_link->full_duplex = false;
1286 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1287 p_link->speed = 1000;
1291 p_link->link_up = 0;
1294 if (p_link->link_up && p_link->speed)
1295 p_link->line_speed = p_link->speed;
1297 p_link->line_speed = 0;
1299 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1300 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1302 /* Max bandwidth configuration */
1303 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1305 /* Min bandwidth configuration */
1306 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1307 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1308 p_link->min_pf_rate);
1310 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1311 p_link->an_complete = !!(status &
1312 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1313 p_link->parallel_detection = !!(status &
1314 LINK_STATUS_PARALLEL_DETECTION_USED);
1315 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1317 p_link->partner_adv_speed |=
1318 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1319 QED_LINK_PARTNER_SPEED_1G_FD : 0;
1320 p_link->partner_adv_speed |=
1321 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1322 QED_LINK_PARTNER_SPEED_1G_HD : 0;
1323 p_link->partner_adv_speed |=
1324 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1325 QED_LINK_PARTNER_SPEED_10G : 0;
1326 p_link->partner_adv_speed |=
1327 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1328 QED_LINK_PARTNER_SPEED_20G : 0;
1329 p_link->partner_adv_speed |=
1330 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1331 QED_LINK_PARTNER_SPEED_25G : 0;
1332 p_link->partner_adv_speed |=
1333 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1334 QED_LINK_PARTNER_SPEED_40G : 0;
1335 p_link->partner_adv_speed |=
1336 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1337 QED_LINK_PARTNER_SPEED_50G : 0;
1338 p_link->partner_adv_speed |=
1339 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1340 QED_LINK_PARTNER_SPEED_100G : 0;
1342 p_link->partner_tx_flow_ctrl_en =
1343 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1344 p_link->partner_rx_flow_ctrl_en =
1345 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1347 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1348 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1349 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1351 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1352 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1354 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1355 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1358 p_link->partner_adv_pause = 0;
1361 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1363 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1364 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1366 qed_link_update(p_hwfn, p_ptt);
1368 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1371 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1373 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1374 struct qed_mcp_mb_params mb_params;
1375 struct eth_phy_cfg phy_cfg;
1379 /* Set the shmem configuration according to params */
1380 memset(&phy_cfg, 0, sizeof(phy_cfg));
1381 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1382 if (!params->speed.autoneg)
1383 phy_cfg.speed = params->speed.forced_speed;
1384 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1385 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1386 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1387 phy_cfg.adv_speed = params->speed.advertised_speeds;
1388 phy_cfg.loopback_mode = params->loopback_mode;
1390 /* There are MFWs that share this capability regardless of whether
1391 * this is feasible or not. And given that at the very least adv_caps
1392 * would be set internally by qed, we want to make sure LFA would
1395 if ((p_hwfn->mcp_info->capabilities &
1396 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1397 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1398 if (params->eee.tx_lpi_enable)
1399 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1400 if (params->eee.adv_caps & QED_EEE_1G_ADV)
1401 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1402 if (params->eee.adv_caps & QED_EEE_10G_ADV)
1403 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1404 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1405 EEE_TX_TIMER_USEC_OFFSET) &
1406 EEE_TX_TIMER_USEC_MASK;
1409 p_hwfn->b_drv_link_init = b_up;
1412 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1413 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1417 phy_cfg.loopback_mode,
1418 phy_cfg.feature_config_flags);
1420 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1421 "Resetting link\n");
1424 memset(&mb_params, 0, sizeof(mb_params));
1425 mb_params.cmd = cmd;
1426 mb_params.p_data_src = &phy_cfg;
1427 mb_params.data_src_size = sizeof(phy_cfg);
1428 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1430 /* if mcp fails to respond we must abort */
1432 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1436 /* Mimic link-change attention, done for several reasons:
1437 * - On reset, there's no guarantee MFW would trigger
1439 * - On initialization, older MFWs might not indicate link change
1440 * during LFA, so we'll never get an UP indication.
1442 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1447 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1448 struct qed_ptt *p_ptt,
1449 enum MFW_DRV_MSG_TYPE type)
1451 enum qed_mcp_protocol_type stats_type;
1452 union qed_mcp_protocol_stats stats;
1453 struct qed_mcp_mb_params mb_params;
1457 case MFW_DRV_MSG_GET_LAN_STATS:
1458 stats_type = QED_MCP_LAN_STATS;
1459 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1461 case MFW_DRV_MSG_GET_FCOE_STATS:
1462 stats_type = QED_MCP_FCOE_STATS;
1463 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1465 case MFW_DRV_MSG_GET_ISCSI_STATS:
1466 stats_type = QED_MCP_ISCSI_STATS;
1467 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1469 case MFW_DRV_MSG_GET_RDMA_STATS:
1470 stats_type = QED_MCP_RDMA_STATS;
1471 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1474 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1478 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1480 memset(&mb_params, 0, sizeof(mb_params));
1481 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1482 mb_params.param = hsi_param;
1483 mb_params.p_data_src = &stats;
1484 mb_params.data_src_size = sizeof(stats);
1485 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1488 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1489 struct public_func *p_shmem_info)
1491 struct qed_mcp_function_info *p_info;
1493 p_info = &p_hwfn->mcp_info->func_info;
1495 p_info->bandwidth_min = (p_shmem_info->config &
1496 FUNC_MF_CFG_MIN_BW_MASK) >>
1497 FUNC_MF_CFG_MIN_BW_SHIFT;
1498 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1500 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1501 p_info->bandwidth_min);
1502 p_info->bandwidth_min = 1;
1505 p_info->bandwidth_max = (p_shmem_info->config &
1506 FUNC_MF_CFG_MAX_BW_MASK) >>
1507 FUNC_MF_CFG_MAX_BW_SHIFT;
1508 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1510 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1511 p_info->bandwidth_max);
1512 p_info->bandwidth_max = 100;
1516 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1517 struct qed_ptt *p_ptt,
1518 struct public_func *p_data, int pfid)
1520 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1522 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1523 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1526 memset(p_data, 0, sizeof(*p_data));
1528 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1529 for (i = 0; i < size / sizeof(u32); i++)
1530 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1531 func_addr + (i << 2));
1535 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1537 struct qed_mcp_function_info *p_info;
1538 struct public_func shmem_info;
1539 u32 resp = 0, param = 0;
1541 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1543 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1545 p_info = &p_hwfn->mcp_info->func_info;
1547 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1548 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1550 /* Acknowledge the MFW */
1551 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1555 static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1557 struct public_func shmem_info;
1558 u32 resp = 0, param = 0;
1560 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1562 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1563 FUNC_MF_CFG_OV_STAG_MASK;
1564 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1565 if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
1566 (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
1567 qed_wr(p_hwfn, p_ptt,
1568 NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
1569 qed_sp_pf_update_stag(p_hwfn);
1572 /* Acknowledge the MFW */
1573 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1577 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1578 struct qed_ptt *p_ptt)
1580 struct qed_mcp_info *info = p_hwfn->mcp_info;
1585 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1587 /* Read Messages from MFW */
1588 qed_mcp_read_mb(p_hwfn, p_ptt);
1590 /* Compare current messages to old ones */
1591 for (i = 0; i < info->mfw_mb_length; i++) {
1592 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1597 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1598 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1599 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1602 case MFW_DRV_MSG_LINK_CHANGE:
1603 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1605 case MFW_DRV_MSG_VF_DISABLED:
1606 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1608 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1609 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1610 QED_DCBX_REMOTE_LLDP_MIB);
1612 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1613 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1614 QED_DCBX_REMOTE_MIB);
1616 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1617 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1618 QED_DCBX_OPERATIONAL_MIB);
1620 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1621 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1623 case MFW_DRV_MSG_GET_LAN_STATS:
1624 case MFW_DRV_MSG_GET_FCOE_STATS:
1625 case MFW_DRV_MSG_GET_ISCSI_STATS:
1626 case MFW_DRV_MSG_GET_RDMA_STATS:
1627 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1629 case MFW_DRV_MSG_BW_UPDATE:
1630 qed_mcp_update_bw(p_hwfn, p_ptt);
1632 case MFW_DRV_MSG_S_TAG_UPDATE:
1633 qed_mcp_update_stag(p_hwfn, p_ptt);
1637 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1642 /* ACK everything */
1643 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1644 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1646 /* MFW expect answer in BE, so we force write in that format */
1647 qed_wr(p_hwfn, p_ptt,
1648 info->mfw_mb_addr + sizeof(u32) +
1649 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1650 sizeof(u32) + i * sizeof(u32),
1656 "Received an MFW message indication but no new message!\n");
1660 /* Copy the new mfw messages into the shadow */
1661 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1666 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1667 struct qed_ptt *p_ptt,
1668 u32 *p_mfw_ver, u32 *p_running_bundle_id)
1672 if (IS_VF(p_hwfn->cdev)) {
1673 if (p_hwfn->vf_iov_info) {
1674 struct pfvf_acquire_resp_tlv *p_resp;
1676 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1677 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1682 "VF requested MFW version prior to ACQUIRE\n");
1687 global_offsize = qed_rd(p_hwfn, p_ptt,
1688 SECTION_OFFSIZE_ADDR(p_hwfn->
1689 mcp_info->public_base,
1692 qed_rd(p_hwfn, p_ptt,
1693 SECTION_ADDR(global_offsize,
1694 0) + offsetof(struct public_global, mfw_ver));
1696 if (p_running_bundle_id != NULL) {
1697 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
1698 SECTION_ADDR(global_offsize, 0) +
1699 offsetof(struct public_global,
1700 running_bundle_id));
1706 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
1707 struct qed_ptt *p_ptt, u32 *p_mbi_ver)
1709 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
1711 if (IS_VF(p_hwfn->cdev))
1714 /* Read the address of the nvm_cfg */
1715 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1716 if (!nvm_cfg_addr) {
1717 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1721 /* Read the offset of nvm_cfg1 */
1722 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1724 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1725 offsetof(struct nvm_cfg1, glob) +
1726 offsetof(struct nvm_cfg1_glob, mbi_version);
1727 *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
1729 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
1730 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
1731 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
1736 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
1737 struct qed_ptt *p_ptt, u32 *p_media_type)
1739 if (IS_VF(p_hwfn->cdev))
1742 if (!qed_mcp_is_init(p_hwfn)) {
1743 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
1748 *p_media_type = MEDIA_UNSPECIFIED;
1752 *p_media_type = qed_rd(p_hwfn, p_ptt,
1753 p_hwfn->mcp_info->port_addr +
1754 offsetof(struct public_port,
1760 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1762 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
1763 enum qed_pci_personality *p_proto)
1765 /* There wasn't ever a legacy MFW that published iwarp.
1766 * So at this point, this is either plain l2 or RoCE.
1768 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
1769 *p_proto = QED_PCI_ETH_ROCE;
1771 *p_proto = QED_PCI_ETH;
1773 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1774 "According to Legacy capabilities, L2 personality is %08x\n",
1779 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
1780 struct qed_ptt *p_ptt,
1781 enum qed_pci_personality *p_proto)
1783 u32 resp = 0, param = 0;
1786 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1787 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
1790 if (resp != FW_MSG_CODE_OK) {
1791 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1792 "MFW lacks support for command; Returns %08x\n",
1798 case FW_MB_PARAM_GET_PF_RDMA_NONE:
1799 *p_proto = QED_PCI_ETH;
1801 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
1802 *p_proto = QED_PCI_ETH_ROCE;
1804 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
1806 "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
1807 *p_proto = QED_PCI_ETH_ROCE;
1809 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
1812 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
1819 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1820 (u32) *p_proto, resp, param);
1825 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
1826 struct public_func *p_info,
1827 struct qed_ptt *p_ptt,
1828 enum qed_pci_personality *p_proto)
1832 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1833 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1834 if (!IS_ENABLED(CONFIG_QED_RDMA))
1835 *p_proto = QED_PCI_ETH;
1836 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
1837 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1839 case FUNC_MF_CFG_PROTOCOL_ISCSI:
1840 *p_proto = QED_PCI_ISCSI;
1842 case FUNC_MF_CFG_PROTOCOL_FCOE:
1843 *p_proto = QED_PCI_FCOE;
1845 case FUNC_MF_CFG_PROTOCOL_ROCE:
1846 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
1855 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
1856 struct qed_ptt *p_ptt)
1858 struct qed_mcp_function_info *info;
1859 struct public_func shmem_info;
1861 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1862 info = &p_hwfn->mcp_info->func_info;
1864 info->pause_on_host = (shmem_info.config &
1865 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1867 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1869 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1870 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1874 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1876 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1877 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1878 info->mac[1] = (u8)(shmem_info.mac_upper);
1879 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1880 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1881 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1882 info->mac[5] = (u8)(shmem_info.mac_lower);
1884 /* Store primary MAC for later possible WoL */
1885 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
1887 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
1890 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
1891 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
1892 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
1893 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
1895 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1897 info->mtu = (u16)shmem_info.mtu_size;
1899 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
1900 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
1901 if (qed_mcp_is_init(p_hwfn)) {
1902 u32 resp = 0, param = 0;
1905 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1906 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
1909 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
1910 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
1913 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
1914 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
1915 info->pause_on_host, info->protocol,
1916 info->bandwidth_min, info->bandwidth_max,
1917 info->mac[0], info->mac[1], info->mac[2],
1918 info->mac[3], info->mac[4], info->mac[5],
1919 info->wwn_port, info->wwn_node,
1920 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
1925 struct qed_mcp_link_params
1926 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
1928 if (!p_hwfn || !p_hwfn->mcp_info)
1930 return &p_hwfn->mcp_info->link_input;
1933 struct qed_mcp_link_state
1934 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
1936 if (!p_hwfn || !p_hwfn->mcp_info)
1938 return &p_hwfn->mcp_info->link_output;
1941 struct qed_mcp_link_capabilities
1942 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
1944 if (!p_hwfn || !p_hwfn->mcp_info)
1946 return &p_hwfn->mcp_info->link_capabilities;
1949 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1951 u32 resp = 0, param = 0;
1954 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1955 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
1957 /* Wait for the drain to complete before returning */
1963 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
1964 struct qed_ptt *p_ptt, u32 *p_flash_size)
1968 if (IS_VF(p_hwfn->cdev))
1971 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1972 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1973 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1974 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1976 *p_flash_size = flash_size;
1982 qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
1983 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
1985 u32 resp = 0, param = 0, rc_param = 0;
1988 /* Only Leader can configure MSIX, and need to take CMT into account */
1989 if (!IS_LEAD_HWFN(p_hwfn))
1991 num *= p_hwfn->cdev->num_hwfns;
1993 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1994 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1995 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1996 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1998 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2001 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2002 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2005 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2006 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2014 qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2015 struct qed_ptt *p_ptt, u8 num)
2017 u32 resp = 0, param = num, rc_param = 0;
2020 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2021 param, &resp, &rc_param);
2023 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2024 DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2027 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2028 "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2034 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2035 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2037 if (QED_IS_BB(p_hwfn->cdev))
2038 return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2040 return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2044 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2045 struct qed_ptt *p_ptt,
2046 struct qed_mcp_drv_version *p_ver)
2048 struct qed_mcp_mb_params mb_params;
2049 struct drv_version_stc drv_version;
2054 memset(&drv_version, 0, sizeof(drv_version));
2055 drv_version.version = p_ver->version;
2056 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2057 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2058 *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2061 memset(&mb_params, 0, sizeof(mb_params));
2062 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2063 mb_params.p_data_src = &drv_version;
2064 mb_params.data_src_size = sizeof(drv_version);
2065 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2067 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2072 /* A maximal 100 msec waiting time for the MCP to halt */
2073 #define QED_MCP_HALT_SLEEP_MS 10
2074 #define QED_MCP_HALT_MAX_RETRIES 10
2076 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2078 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2081 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2084 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2089 msleep(QED_MCP_HALT_SLEEP_MS);
2090 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2091 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2093 } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2095 if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2097 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2098 qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2102 qed_mcp_cmd_set_blocking(p_hwfn, true);
2107 #define QED_MCP_RESUME_SLEEP_MS 10
2109 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2111 u32 cpu_mode, cpu_state;
2113 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2115 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2116 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2117 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2118 msleep(QED_MCP_RESUME_SLEEP_MS);
2119 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2121 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2123 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2124 cpu_mode, cpu_state);
2128 qed_mcp_cmd_set_blocking(p_hwfn, false);
2133 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2134 struct qed_ptt *p_ptt,
2135 enum qed_ov_client client)
2137 u32 resp = 0, param = 0;
2142 case QED_OV_CLIENT_DRV:
2143 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2145 case QED_OV_CLIENT_USER:
2146 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2148 case QED_OV_CLIENT_VENDOR_SPEC:
2149 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2152 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2156 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2157 drv_mb_param, &resp, ¶m);
2159 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2164 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2165 struct qed_ptt *p_ptt,
2166 enum qed_ov_driver_state drv_state)
2168 u32 resp = 0, param = 0;
2172 switch (drv_state) {
2173 case QED_OV_DRIVER_STATE_NOT_LOADED:
2174 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2176 case QED_OV_DRIVER_STATE_DISABLED:
2177 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2179 case QED_OV_DRIVER_STATE_ACTIVE:
2180 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2183 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2187 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2188 drv_mb_param, &resp, ¶m);
2190 DP_ERR(p_hwfn, "Failed to send driver state\n");
2195 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2196 struct qed_ptt *p_ptt, u16 mtu)
2198 u32 resp = 0, param = 0;
2202 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2203 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2204 drv_mb_param, &resp, ¶m);
2206 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2211 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2212 struct qed_ptt *p_ptt, u8 *mac)
2214 struct qed_mcp_mb_params mb_params;
2218 memset(&mb_params, 0, sizeof(mb_params));
2219 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2220 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2221 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2222 mb_params.param |= MCP_PF_ID(p_hwfn);
2224 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2225 * in 32-bit granularity.
2226 * So the MAC has to be set in native order [and not byte order],
2227 * otherwise it would be read incorrectly by MFW after swap.
2229 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2230 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2232 mb_params.p_data_src = (u8 *)mfw_mac;
2233 mb_params.data_src_size = 8;
2234 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2236 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2238 /* Store primary MAC for later possible WoL */
2239 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2244 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2245 struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2247 u32 resp = 0, param = 0;
2251 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2252 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2253 "Can't change WoL configuration when WoL isn't supported\n");
2258 case QED_OV_WOL_DEFAULT:
2259 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2261 case QED_OV_WOL_DISABLED:
2262 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2264 case QED_OV_WOL_ENABLED:
2265 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2268 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2272 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2273 drv_mb_param, &resp, ¶m);
2275 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2277 /* Store the WoL update for a future unload */
2278 p_hwfn->cdev->wol_config = (u8)wol;
2283 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2284 struct qed_ptt *p_ptt,
2285 enum qed_ov_eswitch eswitch)
2287 u32 resp = 0, param = 0;
2292 case QED_OV_ESWITCH_NONE:
2293 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2295 case QED_OV_ESWITCH_VEB:
2296 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2298 case QED_OV_ESWITCH_VEPA:
2299 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2302 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2306 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2307 drv_mb_param, &resp, ¶m);
2309 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2314 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2315 struct qed_ptt *p_ptt, enum qed_led_mode mode)
2317 u32 resp = 0, param = 0, drv_mb_param;
2321 case QED_LED_MODE_ON:
2322 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2324 case QED_LED_MODE_OFF:
2325 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2327 case QED_LED_MODE_RESTORE:
2328 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2331 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2335 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2336 drv_mb_param, &resp, ¶m);
2341 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2342 struct qed_ptt *p_ptt, u32 mask_parities)
2344 u32 resp = 0, param = 0;
2347 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2348 mask_parities, &resp, ¶m);
2352 "MCP response failure for mask parities, aborting\n");
2353 } else if (resp != FW_MSG_CODE_OK) {
2355 "MCP did not acknowledge mask parity request. Old MFW?\n");
2362 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2364 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2365 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2366 u32 resp = 0, resp_param = 0;
2367 struct qed_ptt *p_ptt;
2370 p_ptt = qed_ptt_acquire(p_hwfn);
2374 while (bytes_left > 0) {
2375 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2377 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2378 DRV_MSG_CODE_NVM_READ_NVRAM,
2381 DRV_MB_PARAM_NVM_LEN_SHIFT),
2384 (u32 *)(p_buf + offset));
2386 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2387 DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2391 /* This can be a lengthy process, and it's possible scheduler
2392 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2394 if (bytes_left % 0x1000 <
2395 (bytes_left - read_len) % 0x1000)
2396 usleep_range(1000, 2000);
2399 bytes_left -= read_len;
2402 cdev->mcp_nvm_resp = resp;
2403 qed_ptt_release(p_hwfn, p_ptt);
2408 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2410 u32 drv_mb_param = 0, rsp, param;
2413 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2414 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2416 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2417 drv_mb_param, &rsp, ¶m);
2422 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2423 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2429 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2431 u32 drv_mb_param, rsp, param;
2434 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2435 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2437 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2438 drv_mb_param, &rsp, ¶m);
2443 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2444 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2450 int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
2451 struct qed_ptt *p_ptt,
2454 u32 drv_mb_param = 0, rsp;
2457 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2458 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2460 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2461 drv_mb_param, &rsp, num_images);
2465 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2471 int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
2472 struct qed_ptt *p_ptt,
2473 struct bist_nvm_image_att *p_image_att,
2476 u32 buf_size = 0, param, resp = 0, resp_param = 0;
2479 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2480 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
2481 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
2483 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2484 DRV_MSG_CODE_BIST_TEST, param,
2487 (u32 *)p_image_att);
2491 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2492 (p_image_att->return_code != 1))
2499 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
2500 struct qed_ptt *p_ptt,
2501 enum qed_nvm_images image_id,
2502 struct qed_nvm_image_att *p_image_att)
2504 struct bist_nvm_image_att mfw_image_att;
2505 enum nvm_image_type type;
2509 /* Translate image_id into MFW definitions */
2511 case QED_NVM_IMAGE_ISCSI_CFG:
2512 type = NVM_TYPE_ISCSI_CFG;
2514 case QED_NVM_IMAGE_FCOE_CFG:
2515 type = NVM_TYPE_FCOE_CFG;
2518 DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
2523 /* Learn number of images, then traverse and see if one fits */
2524 rc = qed_mcp_bist_nvm_test_get_num_images(p_hwfn, p_ptt, &num_images);
2525 if (rc || !num_images)
2528 for (i = 0; i < num_images; i++) {
2529 rc = qed_mcp_bist_nvm_test_get_image_att(p_hwfn, p_ptt,
2534 if (type == mfw_image_att.image_type)
2537 if (i == num_images) {
2538 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
2539 "Failed to find nvram image of type %08x\n",
2544 p_image_att->start_addr = mfw_image_att.nvm_start_addr;
2545 p_image_att->length = mfw_image_att.len;
2550 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
2551 struct qed_ptt *p_ptt,
2552 enum qed_nvm_images image_id,
2553 u8 *p_buffer, u32 buffer_len)
2555 struct qed_nvm_image_att image_att;
2558 memset(p_buffer, 0, buffer_len);
2560 rc = qed_mcp_get_nvm_image_att(p_hwfn, p_ptt, image_id, &image_att);
2564 /* Validate sizes - both the image's and the supplied buffer's */
2565 if (image_att.length <= 4) {
2566 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
2567 "Image [%d] is too small - only %d bytes\n",
2568 image_id, image_att.length);
2572 /* Each NVM image is suffixed by CRC; Upper-layer has no need for it */
2573 image_att.length -= 4;
2575 if (image_att.length > buffer_len) {
2578 "Image [%d] is too big - %08x bytes where only %08x are available\n",
2579 image_id, image_att.length, buffer_len);
2583 return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
2584 p_buffer, image_att.length);
2587 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
2589 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2593 mfw_res_id = RESOURCE_NUM_SB_E;
2596 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2599 mfw_res_id = RESOURCE_NUM_VPORT_E;
2602 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2605 mfw_res_id = RESOURCE_NUM_PQ_E;
2608 mfw_res_id = RESOURCE_NUM_RL_E;
2612 /* Each VFC resource can accommodate both a MAC and a VLAN */
2613 mfw_res_id = RESOURCE_VFC_FILTER_E;
2616 mfw_res_id = RESOURCE_ILT_E;
2619 mfw_res_id = RESOURCE_LL2_QUEUE_E;
2621 case QED_RDMA_CNQ_RAM:
2623 /* CNQ/CMDQS are the same resource */
2624 mfw_res_id = RESOURCE_CQS_E;
2626 case QED_RDMA_STATS_QUEUE:
2627 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2630 mfw_res_id = RESOURCE_BDQ_E;
2639 #define QED_RESC_ALLOC_VERSION_MAJOR 2
2640 #define QED_RESC_ALLOC_VERSION_MINOR 0
2641 #define QED_RESC_ALLOC_VERSION \
2642 ((QED_RESC_ALLOC_VERSION_MAJOR << \
2643 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2644 (QED_RESC_ALLOC_VERSION_MINOR << \
2645 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2647 struct qed_resc_alloc_in_params {
2649 enum qed_resources res_id;
2653 struct qed_resc_alloc_out_params {
2664 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
2665 struct qed_ptt *p_ptt,
2666 struct qed_resc_alloc_in_params *p_in_params,
2667 struct qed_resc_alloc_out_params *p_out_params)
2669 struct qed_mcp_mb_params mb_params;
2670 struct resource_info mfw_resc_info;
2673 memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
2675 mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
2676 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
2678 "Failed to match resource %d [%s] with the MFW resources\n",
2679 p_in_params->res_id,
2680 qed_hw_get_resc_name(p_in_params->res_id));
2684 switch (p_in_params->cmd) {
2685 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2686 mfw_resc_info.size = p_in_params->resc_max_val;
2688 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2691 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2696 memset(&mb_params, 0, sizeof(mb_params));
2697 mb_params.cmd = p_in_params->cmd;
2698 mb_params.param = QED_RESC_ALLOC_VERSION;
2699 mb_params.p_data_src = &mfw_resc_info;
2700 mb_params.data_src_size = sizeof(mfw_resc_info);
2701 mb_params.p_data_dst = mb_params.p_data_src;
2702 mb_params.data_dst_size = mb_params.data_src_size;
2706 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2708 p_in_params->res_id,
2709 qed_hw_get_resc_name(p_in_params->res_id),
2710 QED_MFW_GET_FIELD(mb_params.param,
2711 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2712 QED_MFW_GET_FIELD(mb_params.param,
2713 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2714 p_in_params->resc_max_val);
2716 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2720 p_out_params->mcp_resp = mb_params.mcp_resp;
2721 p_out_params->mcp_param = mb_params.mcp_param;
2722 p_out_params->resc_num = mfw_resc_info.size;
2723 p_out_params->resc_start = mfw_resc_info.offset;
2724 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
2725 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
2726 p_out_params->flags = mfw_resc_info.flags;
2730 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2731 QED_MFW_GET_FIELD(p_out_params->mcp_param,
2732 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2733 QED_MFW_GET_FIELD(p_out_params->mcp_param,
2734 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2735 p_out_params->resc_num,
2736 p_out_params->resc_start,
2737 p_out_params->vf_resc_num,
2738 p_out_params->vf_resc_start, p_out_params->flags);
2744 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
2745 struct qed_ptt *p_ptt,
2746 enum qed_resources res_id,
2747 u32 resc_max_val, u32 *p_mcp_resp)
2749 struct qed_resc_alloc_out_params out_params;
2750 struct qed_resc_alloc_in_params in_params;
2753 memset(&in_params, 0, sizeof(in_params));
2754 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
2755 in_params.res_id = res_id;
2756 in_params.resc_max_val = resc_max_val;
2757 memset(&out_params, 0, sizeof(out_params));
2758 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2763 *p_mcp_resp = out_params.mcp_resp;
2769 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
2770 struct qed_ptt *p_ptt,
2771 enum qed_resources res_id,
2772 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
2774 struct qed_resc_alloc_out_params out_params;
2775 struct qed_resc_alloc_in_params in_params;
2778 memset(&in_params, 0, sizeof(in_params));
2779 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2780 in_params.res_id = res_id;
2781 memset(&out_params, 0, sizeof(out_params));
2782 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2787 *p_mcp_resp = out_params.mcp_resp;
2789 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
2790 *p_resc_num = out_params.resc_num;
2791 *p_resc_start = out_params.resc_start;
2797 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2799 u32 mcp_resp, mcp_param;
2801 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2802 &mcp_resp, &mcp_param);
2805 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
2806 struct qed_ptt *p_ptt,
2807 u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
2811 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
2812 p_mcp_resp, p_mcp_param);
2816 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
2818 "The resource command is unsupported by the MFW\n");
2822 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
2823 u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
2826 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
2835 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
2836 struct qed_ptt *p_ptt,
2837 struct qed_resc_lock_params *p_params)
2839 u32 param = 0, mcp_resp, mcp_param;
2843 switch (p_params->timeout) {
2844 case QED_MCP_RESC_LOCK_TO_DEFAULT:
2845 opcode = RESOURCE_OPCODE_REQ;
2846 p_params->timeout = 0;
2848 case QED_MCP_RESC_LOCK_TO_NONE:
2849 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
2850 p_params->timeout = 0;
2853 opcode = RESOURCE_OPCODE_REQ_W_AGING;
2857 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
2858 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2859 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
2863 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
2864 param, p_params->timeout, opcode, p_params->resource);
2866 /* Attempt to acquire the resource */
2867 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
2871 /* Analyze the response */
2872 p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
2873 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2877 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
2878 mcp_param, opcode, p_params->owner);
2881 case RESOURCE_OPCODE_GNT:
2882 p_params->b_granted = true;
2884 case RESOURCE_OPCODE_BUSY:
2885 p_params->b_granted = false;
2889 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
2898 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
2899 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
2905 /* No need for an interval before the first iteration */
2907 if (p_params->sleep_b4_retry) {
2908 u16 retry_interval_in_ms =
2909 DIV_ROUND_UP(p_params->retry_interval,
2912 msleep(retry_interval_in_ms);
2914 udelay(p_params->retry_interval);
2918 rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
2922 if (p_params->b_granted)
2924 } while (retry_cnt++ < p_params->retry_num);
2930 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
2931 struct qed_ptt *p_ptt,
2932 struct qed_resc_unlock_params *p_params)
2934 u32 param = 0, mcp_resp, mcp_param;
2938 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
2939 : RESOURCE_OPCODE_RELEASE;
2940 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
2941 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2943 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2944 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
2945 param, opcode, p_params->resource);
2947 /* Attempt to release the resource */
2948 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
2952 /* Analyze the response */
2953 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2955 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2956 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
2960 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
2962 "Resource unlock request for an already released resource [%d]\n",
2963 p_params->resource);
2965 case RESOURCE_OPCODE_RELEASED:
2966 p_params->b_released = true;
2968 case RESOURCE_OPCODE_WRONG_OWNER:
2969 p_params->b_released = false;
2973 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
2981 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
2982 struct qed_resc_unlock_params *p_unlock,
2984 resource, bool b_is_permanent)
2987 memset(p_lock, 0, sizeof(*p_lock));
2989 /* Permanent resources don't require aging, and there's no
2990 * point in trying to acquire them more than once since it's
2991 * unexpected another entity would release them.
2993 if (b_is_permanent) {
2994 p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
2996 p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
2997 p_lock->retry_interval =
2998 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
2999 p_lock->sleep_b4_retry = true;
3002 p_lock->resource = resource;
3006 memset(p_unlock, 0, sizeof(*p_unlock));
3007 p_unlock->resource = resource;
3011 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3016 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3017 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3019 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3020 "MFW supported features: %08x\n",
3021 p_hwfn->mcp_info->capabilities);
3026 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3028 u32 mcp_resp, mcp_param, features;
3030 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
3032 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3033 features, &mcp_resp, &mcp_param);