1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/crc32.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
53 #include "qed_sriov.h"
55 #include "qed_dev_api.h"
58 #include "qed_iscsi.h"
62 #include "qed_selftest.h"
63 #include "qed_debug.h"
65 #define QED_ROCE_QPS (8192)
66 #define QED_ROCE_DPIS (8)
67 #define QED_RDMA_SRQS QED_ROCE_QPS
69 static char version[] =
70 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
72 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 #define FW_FILE_VERSION \
77 __stringify(FW_MAJOR_VERSION) "." \
78 __stringify(FW_MINOR_VERSION) "." \
79 __stringify(FW_REVISION_VERSION) "." \
80 __stringify(FW_ENGINEERING_VERSION)
82 #define QED_FW_FILE_NAME \
87 static int __init qed_init(void)
89 pr_info("%s", version);
94 static void __exit qed_cleanup(void)
96 pr_notice("qed_cleanup called\n");
99 module_init(qed_init);
100 module_exit(qed_cleanup);
102 /* Check if the DMA controller on the machine can properly handle the DMA
103 * addressing required by the device.
105 static int qed_set_coherency_mask(struct qed_dev *cdev)
107 struct device *dev = &cdev->pdev->dev;
109 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
110 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
112 "Can't request 64-bit consistent allocations\n");
115 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
116 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
123 static void qed_free_pci(struct qed_dev *cdev)
125 struct pci_dev *pdev = cdev->pdev;
127 if (cdev->doorbells && cdev->db_size)
128 iounmap(cdev->doorbells);
130 iounmap(cdev->regview);
131 if (atomic_read(&pdev->enable_cnt) == 1)
132 pci_release_regions(pdev);
134 pci_disable_device(pdev);
137 #define PCI_REVISION_ID_ERROR_VAL 0xff
139 /* Performs PCI initializations as well as initializing PCI-related parameters
140 * in the device structrue. Returns 0 in case of success.
142 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
149 rc = pci_enable_device(pdev);
151 DP_NOTICE(cdev, "Cannot enable PCI device\n");
155 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
156 DP_NOTICE(cdev, "No memory region found in bar #0\n");
161 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
162 DP_NOTICE(cdev, "No memory region found in bar #2\n");
167 if (atomic_read(&pdev->enable_cnt) == 1) {
168 rc = pci_request_regions(pdev, "qed");
171 "Failed to request PCI memory resources\n");
174 pci_set_master(pdev);
175 pci_save_state(pdev);
178 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
179 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
181 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
186 if (!pci_is_pcie(pdev)) {
187 DP_NOTICE(cdev, "The bus is not PCI Express\n");
192 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
193 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
194 DP_NOTICE(cdev, "Cannot find power management capability\n");
196 rc = qed_set_coherency_mask(cdev);
200 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
201 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
202 cdev->pci_params.irq = pdev->irq;
204 cdev->regview = pci_ioremap_bar(pdev, 0);
205 if (!cdev->regview) {
206 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
211 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
212 cdev->db_size = pci_resource_len(cdev->pdev, 2);
213 if (!cdev->db_size) {
215 DP_NOTICE(cdev, "No Doorbell bar available\n");
222 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
224 if (!cdev->doorbells) {
225 DP_NOTICE(cdev, "Cannot map doorbell space\n");
232 pci_release_regions(pdev);
234 pci_disable_device(pdev);
239 int qed_fill_dev_info(struct qed_dev *cdev,
240 struct qed_dev_info *dev_info)
242 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
243 struct qed_hw_info *hw_info = &p_hwfn->hw_info;
244 struct qed_tunnel_info *tun = &cdev->tunnel;
247 memset(dev_info, 0, sizeof(struct qed_dev_info));
249 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
250 tun->vxlan.b_mode_enabled)
251 dev_info->vxlan_enable = true;
253 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
254 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
255 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
256 dev_info->gre_enable = true;
258 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
259 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
260 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
261 dev_info->geneve_enable = true;
263 dev_info->num_hwfns = cdev->num_hwfns;
264 dev_info->pci_mem_start = cdev->pci_params.mem_start;
265 dev_info->pci_mem_end = cdev->pci_params.mem_end;
266 dev_info->pci_irq = cdev->pci_params.irq;
267 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
268 dev_info->dev_type = cdev->type;
269 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
272 dev_info->fw_major = FW_MAJOR_VERSION;
273 dev_info->fw_minor = FW_MINOR_VERSION;
274 dev_info->fw_rev = FW_REVISION_VERSION;
275 dev_info->fw_eng = FW_ENGINEERING_VERSION;
276 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
278 dev_info->tx_switching = true;
280 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
281 dev_info->wol_support = true;
283 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
285 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
286 &dev_info->fw_minor, &dev_info->fw_rev,
291 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
293 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
294 &dev_info->mfw_rev, NULL);
296 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
297 &dev_info->mbi_version);
299 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
300 &dev_info->flash_size);
302 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
305 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
306 &dev_info->mfw_rev, NULL);
309 dev_info->mtu = hw_info->mtu;
314 static void qed_free_cdev(struct qed_dev *cdev)
319 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
321 struct qed_dev *cdev;
323 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
327 qed_init_struct(cdev);
332 /* Sets the requested power state */
333 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
338 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
343 static struct qed_dev *qed_probe(struct pci_dev *pdev,
344 struct qed_probe_params *params)
346 struct qed_dev *cdev;
349 cdev = qed_alloc_cdev(pdev);
353 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
354 cdev->protocol = params->protocol;
357 cdev->b_is_vf = true;
359 qed_init_dp(cdev, params->dp_module, params->dp_level);
361 rc = qed_init_pci(cdev, pdev);
363 DP_ERR(cdev, "init pci failed\n");
366 DP_INFO(cdev, "PCI init completed successfully\n");
368 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
370 DP_ERR(cdev, "hw prepare failed\n");
374 DP_INFO(cdev, "qed_probe completed successfully\n");
386 static void qed_remove(struct qed_dev *cdev)
395 qed_set_power_state(cdev, PCI_D3hot);
400 static void qed_disable_msix(struct qed_dev *cdev)
402 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
403 pci_disable_msix(cdev->pdev);
404 kfree(cdev->int_params.msix_table);
405 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
406 pci_disable_msi(cdev->pdev);
409 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
412 static int qed_enable_msix(struct qed_dev *cdev,
413 struct qed_int_params *int_params)
417 cnt = int_params->in.num_vectors;
419 for (i = 0; i < cnt; i++)
420 int_params->msix_table[i].entry = i;
422 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
423 int_params->in.min_msix_cnt, cnt);
424 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
425 (rc % cdev->num_hwfns)) {
426 pci_disable_msix(cdev->pdev);
428 /* If fastpath is initialized, we need at least one interrupt
429 * per hwfn [and the slow path interrupts]. New requested number
430 * should be a multiple of the number of hwfns.
432 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
434 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
435 cnt, int_params->in.num_vectors);
436 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
442 /* For VFs, we should return with an error in case we didn't get the
443 * exact number of msix vectors as we requested.
444 * Not doing that will lead to a crash when starting queues for
447 if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
448 /* MSI-x configuration was achieved */
449 int_params->out.int_mode = QED_INT_MODE_MSIX;
450 int_params->out.num_vectors = rc;
454 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
461 /* This function outputs the int mode and the number of enabled msix vector */
462 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
464 struct qed_int_params *int_params = &cdev->int_params;
465 struct msix_entry *tbl;
468 switch (int_params->in.int_mode) {
469 case QED_INT_MODE_MSIX:
470 /* Allocate MSIX table */
471 cnt = int_params->in.num_vectors;
472 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
473 if (!int_params->msix_table) {
479 rc = qed_enable_msix(cdev, int_params);
483 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
484 kfree(int_params->msix_table);
489 case QED_INT_MODE_MSI:
490 if (cdev->num_hwfns == 1) {
491 rc = pci_enable_msi(cdev->pdev);
493 int_params->out.int_mode = QED_INT_MODE_MSI;
497 DP_NOTICE(cdev, "Failed to enable MSI\n");
503 case QED_INT_MODE_INTA:
504 int_params->out.int_mode = QED_INT_MODE_INTA;
508 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
509 int_params->in.int_mode);
515 DP_INFO(cdev, "Using %s interrupts\n",
516 int_params->out.int_mode == QED_INT_MODE_INTA ?
517 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
519 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
524 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
525 int index, void(*handler)(void *))
527 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
528 int relative_idx = index / cdev->num_hwfns;
530 hwfn->simd_proto_handler[relative_idx].func = handler;
531 hwfn->simd_proto_handler[relative_idx].token = token;
534 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
536 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
537 int relative_idx = index / cdev->num_hwfns;
539 memset(&hwfn->simd_proto_handler[relative_idx], 0,
540 sizeof(struct qed_simd_fp_handler));
543 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
545 tasklet_schedule((struct tasklet_struct *)tasklet);
549 static irqreturn_t qed_single_int(int irq, void *dev_instance)
551 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
552 struct qed_hwfn *hwfn;
553 irqreturn_t rc = IRQ_NONE;
557 for (i = 0; i < cdev->num_hwfns; i++) {
558 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
563 hwfn = &cdev->hwfns[i];
565 /* Slowpath interrupt */
566 if (unlikely(status & 0x1)) {
567 tasklet_schedule(hwfn->sp_dpc);
572 /* Fastpath interrupts */
573 for (j = 0; j < 64; j++) {
574 if ((0x2ULL << j) & status) {
575 struct qed_simd_fp_handler *p_handler =
576 &hwfn->simd_proto_handler[j];
579 p_handler->func(p_handler->token);
582 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
585 status &= ~(0x2ULL << j);
590 if (unlikely(status))
591 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
592 "got an unknown interrupt status 0x%llx\n",
599 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
601 struct qed_dev *cdev = hwfn->cdev;
606 int_mode = cdev->int_params.out.int_mode;
607 if (int_mode == QED_INT_MODE_MSIX) {
609 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
610 id, cdev->pdev->bus->number,
611 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
612 rc = request_irq(cdev->int_params.msix_table[id].vector,
613 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
615 unsigned long flags = 0;
617 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
618 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
619 PCI_FUNC(cdev->pdev->devfn));
621 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
622 flags |= IRQF_SHARED;
624 rc = request_irq(cdev->pdev->irq, qed_single_int,
625 flags, cdev->name, cdev);
629 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
631 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
632 "Requested slowpath %s\n",
633 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
638 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
640 /* Calling the disable function will make sure that any
641 * currently-running function is completed. The following call to the
642 * enable function makes this sequence a flush-like operation.
644 if (p_hwfn->b_sp_dpc_enabled) {
645 tasklet_disable(p_hwfn->sp_dpc);
646 tasklet_enable(p_hwfn->sp_dpc);
650 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
652 struct qed_dev *cdev = p_hwfn->cdev;
653 u8 id = p_hwfn->my_id;
656 int_mode = cdev->int_params.out.int_mode;
657 if (int_mode == QED_INT_MODE_MSIX)
658 synchronize_irq(cdev->int_params.msix_table[id].vector);
660 synchronize_irq(cdev->pdev->irq);
662 qed_slowpath_tasklet_flush(p_hwfn);
665 static void qed_slowpath_irq_free(struct qed_dev *cdev)
669 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
670 for_each_hwfn(cdev, i) {
671 if (!cdev->hwfns[i].b_int_requested)
673 synchronize_irq(cdev->int_params.msix_table[i].vector);
674 free_irq(cdev->int_params.msix_table[i].vector,
675 cdev->hwfns[i].sp_dpc);
678 if (QED_LEADING_HWFN(cdev)->b_int_requested)
679 free_irq(cdev->pdev->irq, cdev);
681 qed_int_disable_post_isr_release(cdev);
684 static int qed_nic_stop(struct qed_dev *cdev)
688 rc = qed_hw_stop(cdev);
690 for (i = 0; i < cdev->num_hwfns; i++) {
691 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
693 if (p_hwfn->b_sp_dpc_enabled) {
694 tasklet_disable(p_hwfn->sp_dpc);
695 p_hwfn->b_sp_dpc_enabled = false;
696 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
697 "Disabled sp tasklet [hwfn %d] at %p\n",
702 qed_dbg_pf_exit(cdev);
707 static int qed_nic_setup(struct qed_dev *cdev)
711 /* Determine if interface is going to require LL2 */
712 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
713 for (i = 0; i < cdev->num_hwfns; i++) {
714 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
716 p_hwfn->using_ll2 = true;
720 rc = qed_resc_alloc(cdev);
724 DP_INFO(cdev, "Allocated qed resources\n");
726 qed_resc_setup(cdev);
731 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
735 /* Mark the fastpath as free/used */
736 cdev->int_params.fp_initialized = cnt ? true : false;
738 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
739 limit = cdev->num_hwfns * 63;
740 else if (cdev->int_params.fp_msix_cnt)
741 limit = cdev->int_params.fp_msix_cnt;
746 return min_t(int, cnt, limit);
749 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
751 memset(info, 0, sizeof(struct qed_int_info));
753 if (!cdev->int_params.fp_initialized) {
755 "Protocol driver requested interrupt information, but its support is not yet configured\n");
759 /* Need to expose only MSI-X information; Single IRQ is handled solely
762 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
763 int msix_base = cdev->int_params.fp_msix_base;
765 info->msix_cnt = cdev->int_params.fp_msix_cnt;
766 info->msix = &cdev->int_params.msix_table[msix_base];
772 static int qed_slowpath_setup_int(struct qed_dev *cdev,
773 enum qed_int_mode int_mode)
775 struct qed_sb_cnt_info sb_cnt_info;
776 int num_l2_queues = 0;
780 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
781 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
785 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
786 cdev->int_params.in.int_mode = int_mode;
787 for_each_hwfn(cdev, i) {
788 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
789 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
790 cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
791 cdev->int_params.in.num_vectors++; /* slowpath */
794 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
795 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
797 if (is_kdump_kernel()) {
799 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
800 cdev->int_params.in.min_msix_cnt);
801 cdev->int_params.in.num_vectors =
802 cdev->int_params.in.min_msix_cnt;
805 rc = qed_set_int_mode(cdev, false);
807 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
811 cdev->int_params.fp_msix_base = cdev->num_hwfns;
812 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
815 if (!IS_ENABLED(CONFIG_QED_RDMA) ||
816 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
819 for_each_hwfn(cdev, i)
820 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
822 DP_VERBOSE(cdev, QED_MSG_RDMA,
823 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
824 cdev->int_params.fp_msix_cnt, num_l2_queues);
826 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
827 cdev->int_params.rdma_msix_cnt =
828 (cdev->int_params.fp_msix_cnt - num_l2_queues)
830 cdev->int_params.rdma_msix_base =
831 cdev->int_params.fp_msix_base + num_l2_queues;
832 cdev->int_params.fp_msix_cnt = num_l2_queues;
834 cdev->int_params.rdma_msix_cnt = 0;
837 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
838 cdev->int_params.rdma_msix_cnt,
839 cdev->int_params.rdma_msix_base);
844 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
848 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
849 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
851 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
852 &cdev->int_params.in.num_vectors);
853 if (cdev->num_hwfns > 1) {
856 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
857 cdev->int_params.in.num_vectors += vectors;
860 /* We want a minimum of one fastpath vector per vf hwfn */
861 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
863 rc = qed_set_int_mode(cdev, true);
867 cdev->int_params.fp_msix_base = 0;
868 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
873 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
874 u8 *input_buf, u32 max_size, u8 *unzip_buf)
878 p_hwfn->stream->next_in = input_buf;
879 p_hwfn->stream->avail_in = input_len;
880 p_hwfn->stream->next_out = unzip_buf;
881 p_hwfn->stream->avail_out = max_size;
883 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
886 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
891 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
892 zlib_inflateEnd(p_hwfn->stream);
894 if (rc != Z_OK && rc != Z_STREAM_END) {
895 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
896 p_hwfn->stream->msg, rc);
900 return p_hwfn->stream->total_out / 4;
903 static int qed_alloc_stream_mem(struct qed_dev *cdev)
908 for_each_hwfn(cdev, i) {
909 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
911 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
915 workspace = vzalloc(zlib_inflate_workspacesize());
918 p_hwfn->stream->workspace = workspace;
924 static void qed_free_stream_mem(struct qed_dev *cdev)
928 for_each_hwfn(cdev, i) {
929 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
934 vfree(p_hwfn->stream->workspace);
935 kfree(p_hwfn->stream);
939 static void qed_update_pf_params(struct qed_dev *cdev,
940 struct qed_pf_params *params)
944 if (IS_ENABLED(CONFIG_QED_RDMA)) {
945 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
946 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
947 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
948 /* divide by 3 the MRs to avoid MF ILT overflow */
949 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
952 if (cdev->num_hwfns > 1 || IS_VF(cdev))
953 params->eth_pf_params.num_arfs_filters = 0;
955 /* In case we might support RDMA, don't allow qede to be greedy
956 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
959 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
962 num_cons = ¶ms->eth_pf_params.num_cons;
963 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
966 for (i = 0; i < cdev->num_hwfns; i++) {
967 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
969 p_hwfn->pf_params = *params;
973 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
980 for_each_hwfn(cdev, i) {
981 if (!cdev->hwfns[i].slowpath_wq)
984 flush_workqueue(cdev->hwfns[i].slowpath_wq);
985 destroy_workqueue(cdev->hwfns[i].slowpath_wq);
989 static void qed_slowpath_task(struct work_struct *work)
991 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
993 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
996 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
1000 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1001 &hwfn->slowpath_task_flags))
1002 qed_mfw_process_tlv_req(hwfn, ptt);
1004 qed_ptt_release(hwfn, ptt);
1007 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1009 struct qed_hwfn *hwfn;
1010 char name[NAME_SIZE];
1016 for_each_hwfn(cdev, i) {
1017 hwfn = &cdev->hwfns[i];
1019 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1020 cdev->pdev->bus->number,
1021 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1023 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1024 if (!hwfn->slowpath_wq) {
1025 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1029 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1035 static int qed_slowpath_start(struct qed_dev *cdev,
1036 struct qed_slowpath_params *params)
1038 struct qed_drv_load_params drv_load_params;
1039 struct qed_hw_init_params hw_init_params;
1040 struct qed_mcp_drv_version drv_version;
1041 struct qed_tunnel_info tunn_info;
1042 const u8 *data = NULL;
1043 struct qed_hwfn *hwfn;
1044 struct qed_ptt *p_ptt;
1047 if (qed_iov_wq_start(cdev))
1050 if (qed_slowpath_wq_start(cdev))
1054 rc = reject_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1058 "Failed to find fw file - /lib/firmware/%s\n",
1063 if (cdev->num_hwfns == 1) {
1064 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1066 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1069 "Failed to acquire PTT for aRFS\n");
1075 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1076 rc = qed_nic_setup(cdev);
1081 rc = qed_slowpath_setup_int(cdev, params->int_mode);
1083 rc = qed_slowpath_vf_setup_int(cdev);
1088 /* Allocate stream for unzipping */
1089 rc = qed_alloc_stream_mem(cdev);
1093 /* First Dword used to differentiate between various sources */
1094 data = cdev->firmware->data + sizeof(u32);
1096 qed_dbg_pf_init(cdev);
1099 /* Start the slowpath */
1100 memset(&hw_init_params, 0, sizeof(hw_init_params));
1101 memset(&tunn_info, 0, sizeof(tunn_info));
1102 tunn_info.vxlan.b_mode_enabled = true;
1103 tunn_info.l2_gre.b_mode_enabled = true;
1104 tunn_info.ip_gre.b_mode_enabled = true;
1105 tunn_info.l2_geneve.b_mode_enabled = true;
1106 tunn_info.ip_geneve.b_mode_enabled = true;
1107 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1108 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1109 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1110 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1111 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1112 hw_init_params.p_tunn = &tunn_info;
1113 hw_init_params.b_hw_start = true;
1114 hw_init_params.int_mode = cdev->int_params.out.int_mode;
1115 hw_init_params.allow_npar_tx_switch = true;
1116 hw_init_params.bin_fw_data = data;
1118 memset(&drv_load_params, 0, sizeof(drv_load_params));
1119 drv_load_params.is_crash_kernel = is_kdump_kernel();
1120 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1121 drv_load_params.avoid_eng_reset = false;
1122 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1123 hw_init_params.p_drv_load_params = &drv_load_params;
1125 rc = qed_hw_init(cdev, &hw_init_params);
1130 "HW initialization and function start completed successfully\n");
1133 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1134 BIT(QED_MODE_L2GENEVE_TUNN) |
1135 BIT(QED_MODE_IPGENEVE_TUNN) |
1136 BIT(QED_MODE_L2GRE_TUNN) |
1137 BIT(QED_MODE_IPGRE_TUNN));
1140 /* Allocate LL2 interface if needed */
1141 if (QED_LEADING_HWFN(cdev)->using_ll2) {
1142 rc = qed_ll2_alloc_if(cdev);
1147 hwfn = QED_LEADING_HWFN(cdev);
1148 drv_version.version = (params->drv_major << 24) |
1149 (params->drv_minor << 16) |
1150 (params->drv_rev << 8) |
1152 strlcpy(drv_version.name, params->name,
1153 MCP_DRV_VER_STR_SIZE - 4);
1154 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1157 DP_NOTICE(cdev, "Failed sending drv version command\n");
1162 qed_reset_vport_stats(cdev);
1167 qed_ll2_dealloc_if(cdev);
1171 qed_hw_timers_stop_all(cdev);
1173 qed_slowpath_irq_free(cdev);
1174 qed_free_stream_mem(cdev);
1175 qed_disable_msix(cdev);
1177 qed_resc_free(cdev);
1180 release_firmware(cdev->firmware);
1182 if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1183 QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1184 qed_ptt_release(QED_LEADING_HWFN(cdev),
1185 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1187 qed_iov_wq_stop(cdev, false);
1189 qed_slowpath_wq_stop(cdev);
1194 static int qed_slowpath_stop(struct qed_dev *cdev)
1199 qed_slowpath_wq_stop(cdev);
1201 qed_ll2_dealloc_if(cdev);
1204 if (cdev->num_hwfns == 1)
1205 qed_ptt_release(QED_LEADING_HWFN(cdev),
1206 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1207 qed_free_stream_mem(cdev);
1208 if (IS_QED_ETH_IF(cdev))
1209 qed_sriov_disable(cdev, true);
1215 qed_slowpath_irq_free(cdev);
1217 qed_disable_msix(cdev);
1219 qed_resc_free(cdev);
1221 qed_iov_wq_stop(cdev, true);
1224 release_firmware(cdev->firmware);
1229 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1233 memcpy(cdev->name, name, NAME_SIZE);
1234 for_each_hwfn(cdev, i)
1235 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1238 static u32 qed_sb_init(struct qed_dev *cdev,
1239 struct qed_sb_info *sb_info,
1241 dma_addr_t sb_phy_addr, u16 sb_id,
1242 enum qed_sb_type type)
1244 struct qed_hwfn *p_hwfn;
1245 struct qed_ptt *p_ptt;
1251 /* RoCE uses single engine and CMT uses two engines. When using both
1252 * we force only a single engine. Storage uses only engine 0 too.
1254 if (type == QED_SB_TYPE_L2_QUEUE)
1255 n_hwfns = cdev->num_hwfns;
1259 hwfn_index = sb_id % n_hwfns;
1260 p_hwfn = &cdev->hwfns[hwfn_index];
1261 rel_sb_id = sb_id / n_hwfns;
1263 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1264 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1265 hwfn_index, rel_sb_id, sb_id);
1267 if (IS_PF(p_hwfn->cdev)) {
1268 p_ptt = qed_ptt_acquire(p_hwfn);
1272 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1273 sb_phy_addr, rel_sb_id);
1274 qed_ptt_release(p_hwfn, p_ptt);
1276 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1277 sb_phy_addr, rel_sb_id);
1283 static u32 qed_sb_release(struct qed_dev *cdev,
1284 struct qed_sb_info *sb_info, u16 sb_id)
1286 struct qed_hwfn *p_hwfn;
1291 hwfn_index = sb_id % cdev->num_hwfns;
1292 p_hwfn = &cdev->hwfns[hwfn_index];
1293 rel_sb_id = sb_id / cdev->num_hwfns;
1295 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1296 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1297 hwfn_index, rel_sb_id, sb_id);
1299 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1304 static bool qed_can_link_change(struct qed_dev *cdev)
1309 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1311 struct qed_hwfn *hwfn;
1312 struct qed_mcp_link_params *link_params;
1313 struct qed_ptt *ptt;
1319 /* The link should be set only once per PF */
1320 hwfn = &cdev->hwfns[0];
1322 /* When VF wants to set link, force it to read the bulletin instead.
1323 * This mimics the PF behavior, where a noitification [both immediate
1324 * and possible later] would be generated when changing properties.
1327 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1331 ptt = qed_ptt_acquire(hwfn);
1335 link_params = qed_mcp_get_link_params(hwfn);
1336 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1337 link_params->speed.autoneg = params->autoneg;
1338 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1339 link_params->speed.advertised_speeds = 0;
1340 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
1341 (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
1342 link_params->speed.advertised_speeds |=
1343 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1344 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
1345 link_params->speed.advertised_speeds |=
1346 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1347 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
1348 link_params->speed.advertised_speeds |=
1349 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1350 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
1351 link_params->speed.advertised_speeds |=
1352 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1353 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
1354 link_params->speed.advertised_speeds |=
1355 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1356 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
1357 link_params->speed.advertised_speeds |=
1358 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1360 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1361 link_params->speed.forced_speed = params->forced_speed;
1362 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1363 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1364 link_params->pause.autoneg = true;
1366 link_params->pause.autoneg = false;
1367 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1368 link_params->pause.forced_rx = true;
1370 link_params->pause.forced_rx = false;
1371 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1372 link_params->pause.forced_tx = true;
1374 link_params->pause.forced_tx = false;
1376 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1377 switch (params->loopback_mode) {
1378 case QED_LINK_LOOPBACK_INT_PHY:
1379 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1381 case QED_LINK_LOOPBACK_EXT_PHY:
1382 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1384 case QED_LINK_LOOPBACK_EXT:
1385 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1387 case QED_LINK_LOOPBACK_MAC:
1388 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1391 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1396 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1397 memcpy(&link_params->eee, ¶ms->eee,
1398 sizeof(link_params->eee));
1400 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1402 qed_ptt_release(hwfn, ptt);
1407 static int qed_get_port_type(u32 media_type)
1411 switch (media_type) {
1412 case MEDIA_SFPP_10G_FIBER:
1413 case MEDIA_SFP_1G_FIBER:
1414 case MEDIA_XFP_FIBER:
1415 case MEDIA_MODULE_FIBER:
1417 port_type = PORT_FIBRE;
1419 case MEDIA_DA_TWINAX:
1420 port_type = PORT_DA;
1423 port_type = PORT_TP;
1425 case MEDIA_NOT_PRESENT:
1426 port_type = PORT_NONE;
1428 case MEDIA_UNSPECIFIED:
1430 port_type = PORT_OTHER;
1436 static int qed_get_link_data(struct qed_hwfn *hwfn,
1437 struct qed_mcp_link_params *params,
1438 struct qed_mcp_link_state *link,
1439 struct qed_mcp_link_capabilities *link_caps)
1443 if (!IS_PF(hwfn->cdev)) {
1444 qed_vf_get_link_params(hwfn, params);
1445 qed_vf_get_link_state(hwfn, link);
1446 qed_vf_get_link_caps(hwfn, link_caps);
1451 p = qed_mcp_get_link_params(hwfn);
1454 memcpy(params, p, sizeof(*params));
1456 p = qed_mcp_get_link_state(hwfn);
1459 memcpy(link, p, sizeof(*link));
1461 p = qed_mcp_get_link_capabilities(hwfn);
1464 memcpy(link_caps, p, sizeof(*link_caps));
1469 static void qed_fill_link(struct qed_hwfn *hwfn,
1470 struct qed_ptt *ptt,
1471 struct qed_link_output *if_link)
1473 struct qed_mcp_link_params params;
1474 struct qed_mcp_link_state link;
1475 struct qed_mcp_link_capabilities link_caps;
1478 memset(if_link, 0, sizeof(*if_link));
1480 /* Prepare source inputs */
1481 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1482 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1486 /* Set the link parameters to pass to protocol driver */
1488 if_link->link_up = true;
1490 /* TODO - at the moment assume supported and advertised speed equal */
1491 if_link->supported_caps = QED_LM_FIBRE_BIT;
1492 if (link_caps.default_speed_autoneg)
1493 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1494 if (params.pause.autoneg ||
1495 (params.pause.forced_rx && params.pause.forced_tx))
1496 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1497 if (params.pause.autoneg || params.pause.forced_rx ||
1498 params.pause.forced_tx)
1499 if_link->supported_caps |= QED_LM_Pause_BIT;
1501 if_link->advertised_caps = if_link->supported_caps;
1502 if (params.speed.autoneg)
1503 if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1505 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1506 if (params.speed.advertised_speeds &
1507 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1508 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
1509 QED_LM_1000baseT_Full_BIT;
1510 if (params.speed.advertised_speeds &
1511 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1512 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
1513 if (params.speed.advertised_speeds &
1514 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1515 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
1516 if (params.speed.advertised_speeds &
1517 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1518 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
1519 if (params.speed.advertised_speeds &
1520 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1521 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
1522 if (params.speed.advertised_speeds &
1523 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1524 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
1526 if (link_caps.speed_capabilities &
1527 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1528 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
1529 QED_LM_1000baseT_Full_BIT;
1530 if (link_caps.speed_capabilities &
1531 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1532 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
1533 if (link_caps.speed_capabilities &
1534 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1535 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
1536 if (link_caps.speed_capabilities &
1537 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1538 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
1539 if (link_caps.speed_capabilities &
1540 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1541 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
1542 if (link_caps.speed_capabilities &
1543 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1544 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
1547 if_link->speed = link.speed;
1549 /* TODO - fill duplex properly */
1550 if_link->duplex = DUPLEX_FULL;
1551 qed_mcp_get_media_type(hwfn, ptt, &media_type);
1552 if_link->port = qed_get_port_type(media_type);
1554 if_link->autoneg = params.speed.autoneg;
1556 if (params.pause.autoneg)
1557 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1558 if (params.pause.forced_rx)
1559 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1560 if (params.pause.forced_tx)
1561 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1563 /* Link partner capabilities */
1564 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
1565 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
1566 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
1567 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1568 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1569 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1570 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1571 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1572 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1573 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1574 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1575 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1576 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1577 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1579 if (link.an_complete)
1580 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1582 if (link.partner_adv_pause)
1583 if_link->lp_caps |= QED_LM_Pause_BIT;
1584 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1585 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1586 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1588 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1589 if_link->eee_supported = false;
1591 if_link->eee_supported = true;
1592 if_link->eee_active = link.eee_active;
1593 if_link->sup_caps = link_caps.eee_speed_caps;
1594 /* MFW clears adv_caps on eee disable; use configured value */
1595 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1596 params.eee.adv_caps;
1597 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1598 if_link->eee.enable = params.eee.enable;
1599 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1600 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1604 static void qed_get_current_link(struct qed_dev *cdev,
1605 struct qed_link_output *if_link)
1607 struct qed_hwfn *hwfn;
1608 struct qed_ptt *ptt;
1611 hwfn = &cdev->hwfns[0];
1613 ptt = qed_ptt_acquire(hwfn);
1615 qed_fill_link(hwfn, ptt, if_link);
1616 qed_ptt_release(hwfn, ptt);
1618 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
1621 qed_fill_link(hwfn, NULL, if_link);
1624 for_each_hwfn(cdev, i)
1625 qed_inform_vf_link_state(&cdev->hwfns[i]);
1628 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1630 void *cookie = hwfn->cdev->ops_cookie;
1631 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1632 struct qed_link_output if_link;
1634 qed_fill_link(hwfn, ptt, &if_link);
1635 qed_inform_vf_link_state(hwfn);
1637 if (IS_LEAD_HWFN(hwfn) && cookie)
1638 op->link_update(cookie, &if_link);
1641 static int qed_drain(struct qed_dev *cdev)
1643 struct qed_hwfn *hwfn;
1644 struct qed_ptt *ptt;
1650 for_each_hwfn(cdev, i) {
1651 hwfn = &cdev->hwfns[i];
1652 ptt = qed_ptt_acquire(hwfn);
1654 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1657 rc = qed_mcp_drain(hwfn, ptt);
1658 qed_ptt_release(hwfn, ptt);
1666 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
1667 struct qed_nvm_image_att *nvm_image,
1674 /* Allocate a buffer for holding the nvram image */
1675 buf = kzalloc(nvm_image->length, GFP_KERNEL);
1679 /* Read image into buffer */
1680 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
1681 buf, nvm_image->length);
1683 DP_ERR(cdev, "Failed reading image from nvm\n");
1687 /* Convert the buffer into big-endian format (excluding the
1688 * closing 4 bytes of CRC).
1690 for (j = 0; j < nvm_image->length - 4; j += 4) {
1691 val = cpu_to_be32(*(u32 *)&buf[j]);
1692 *(u32 *)&buf[j] = val;
1695 /* Calc CRC for the "actual" image buffer, i.e. not including
1696 * the last 4 CRC bytes.
1698 *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
1706 /* Binary file format -
1707 * /----------------------------------------------------------------------\
1708 * 0B | 0x4 [command index] |
1709 * 4B | image_type | Options | Number of register settings |
1713 * \----------------------------------------------------------------------/
1714 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
1715 * Options - 0'b - Calculate & Update CRC for image
1717 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
1720 struct qed_nvm_image_att nvm_image;
1721 struct qed_hwfn *p_hwfn;
1722 bool is_crc = false;
1728 image_type = **data;
1729 p_hwfn = QED_LEADING_HWFN(cdev);
1730 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
1731 if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
1733 if (i == p_hwfn->nvm_info.num_images) {
1734 DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
1739 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
1740 nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
1742 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1743 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
1744 **data, image_type, nvm_image.start_addr,
1745 nvm_image.start_addr + nvm_image.length - 1);
1747 is_crc = !!(**data & BIT(0));
1749 len = *((u16 *)*data);
1754 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
1756 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
1760 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1761 (nvm_image.start_addr +
1762 nvm_image.length - 4), (u8 *)&crc, 4);
1764 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
1765 nvm_image.start_addr + nvm_image.length - 4, rc);
1769 /* Iterate over the values for setting */
1771 u32 offset, mask, value, cur_value;
1774 value = *((u32 *)*data);
1776 mask = *((u32 *)*data);
1778 offset = *((u32 *)*data);
1781 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
1784 DP_ERR(cdev, "Failed reading from %08x\n",
1785 nvm_image.start_addr + offset);
1789 cur_value = le32_to_cpu(*((__le32 *)buf));
1790 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1791 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
1792 nvm_image.start_addr + offset, cur_value,
1793 (cur_value & ~mask) | (value & mask), value, mask);
1794 value = (value & mask) | (cur_value & ~mask);
1795 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1796 nvm_image.start_addr + offset,
1799 DP_ERR(cdev, "Failed writing to %08x\n",
1800 nvm_image.start_addr + offset);
1810 /* Binary file format -
1811 * /----------------------------------------------------------------------\
1812 * 0B | 0x3 [command index] |
1813 * 4B | b'0: check_response? | b'1-31 reserved |
1814 * 8B | File-type | reserved |
1815 * \----------------------------------------------------------------------/
1816 * Start a new file of the provided type
1818 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
1819 const u8 **data, bool *check_resp)
1824 *check_resp = !!(**data & BIT(0));
1827 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1828 "About to start a new file of type %02x\n", **data);
1829 rc = qed_mcp_nvm_put_file_begin(cdev, **data);
1835 /* Binary file format -
1836 * /----------------------------------------------------------------------\
1837 * 0B | 0x2 [command index] |
1838 * 4B | Length in bytes |
1839 * 8B | b'0: check_response? | b'1-31 reserved |
1840 * 12B | Offset in bytes |
1842 * \----------------------------------------------------------------------/
1843 * Write data as part of a file that was previously started. Data should be
1844 * of length equal to that provided in the message
1846 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
1847 const u8 **data, bool *check_resp)
1853 len = *((u32 *)(*data));
1855 *check_resp = !!(**data & BIT(0));
1857 offset = *((u32 *)(*data));
1860 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1861 "About to write File-data: %08x bytes to offset %08x\n",
1864 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
1865 (char *)(*data), len);
1871 /* Binary file format [General header] -
1872 * /----------------------------------------------------------------------\
1873 * 0B | QED_NVM_SIGNATURE |
1874 * 4B | Length in bytes |
1875 * 8B | Highest command in this batchfile | Reserved |
1876 * \----------------------------------------------------------------------/
1878 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
1879 const struct firmware *image,
1884 /* Check minimum size */
1885 if (image->size < 12) {
1886 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
1890 /* Check signature */
1891 signature = *((u32 *)(*data));
1892 if (signature != QED_NVM_SIGNATURE) {
1893 DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
1898 /* Validate internal size equals the image-size */
1899 len = *((u32 *)(*data));
1900 if (len != image->size) {
1901 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
1902 len, (u32)image->size);
1907 /* Make sure driver familiar with all commands necessary for this */
1908 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
1909 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
1919 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
1921 const struct firmware *image;
1922 const u8 *data, *data_end;
1926 rc = reject_firmware(&image, name, &cdev->pdev->dev);
1928 DP_ERR(cdev, "Failed to find '%s'\n", name);
1932 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1933 "Flashing '%s' - firmware's data at %p, size is %08x\n",
1934 name, image->data, (u32)image->size);
1936 data_end = data + image->size;
1938 rc = qed_nvm_flash_image_validate(cdev, image, &data);
1942 while (data < data_end) {
1943 bool check_resp = false;
1945 /* Parse the actual command */
1946 cmd_type = *((u32 *)data);
1948 case QED_NVM_FLASH_CMD_FILE_DATA:
1949 rc = qed_nvm_flash_image_file_data(cdev, &data,
1952 case QED_NVM_FLASH_CMD_FILE_START:
1953 rc = qed_nvm_flash_image_file_start(cdev, &data,
1956 case QED_NVM_FLASH_CMD_NVM_CHANGE:
1957 rc = qed_nvm_flash_image_access(cdev, &data,
1961 DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
1967 DP_ERR(cdev, "Command %08x failed\n", cmd_type);
1971 /* Check response if needed */
1973 u32 mcp_response = 0;
1975 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
1976 DP_ERR(cdev, "Failed getting MCP response\n");
1981 switch (mcp_response & FW_MSG_CODE_MASK) {
1982 case FW_MSG_CODE_OK:
1983 case FW_MSG_CODE_NVM_OK:
1984 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
1985 case FW_MSG_CODE_PHY_OK:
1988 DP_ERR(cdev, "MFW returns error: %08x\n",
1997 release_firmware(image);
2002 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2005 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2007 return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2010 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2013 return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2016 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2018 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2019 struct qed_ptt *ptt;
2022 ptt = qed_ptt_acquire(hwfn);
2026 status = qed_mcp_set_led(hwfn, ptt, mode);
2028 qed_ptt_release(hwfn, ptt);
2033 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2035 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2036 struct qed_ptt *ptt;
2042 ptt = qed_ptt_acquire(hwfn);
2046 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2047 : QED_OV_WOL_DISABLED);
2050 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2053 qed_ptt_release(hwfn, ptt);
2057 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2059 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2060 struct qed_ptt *ptt;
2066 ptt = qed_ptt_acquire(hwfn);
2070 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2071 QED_OV_DRIVER_STATE_ACTIVE :
2072 QED_OV_DRIVER_STATE_DISABLED);
2074 qed_ptt_release(hwfn, ptt);
2079 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2081 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2082 struct qed_ptt *ptt;
2088 ptt = qed_ptt_acquire(hwfn);
2092 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2096 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2099 qed_ptt_release(hwfn, ptt);
2103 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2105 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2106 struct qed_ptt *ptt;
2112 ptt = qed_ptt_acquire(hwfn);
2116 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2120 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2123 qed_ptt_release(hwfn, ptt);
2127 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2128 u8 dev_addr, u32 offset, u32 len)
2130 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2131 struct qed_ptt *ptt;
2137 ptt = qed_ptt_acquire(hwfn);
2141 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2144 qed_ptt_release(hwfn, ptt);
2149 static struct qed_selftest_ops qed_selftest_ops_pass = {
2150 .selftest_memory = &qed_selftest_memory,
2151 .selftest_interrupt = &qed_selftest_interrupt,
2152 .selftest_register = &qed_selftest_register,
2153 .selftest_clock = &qed_selftest_clock,
2154 .selftest_nvram = &qed_selftest_nvram,
2157 const struct qed_common_ops qed_common_ops_pass = {
2158 .selftest = &qed_selftest_ops_pass,
2159 .probe = &qed_probe,
2160 .remove = &qed_remove,
2161 .set_power_state = &qed_set_power_state,
2162 .set_name = &qed_set_name,
2163 .update_pf_params = &qed_update_pf_params,
2164 .slowpath_start = &qed_slowpath_start,
2165 .slowpath_stop = &qed_slowpath_stop,
2166 .set_fp_int = &qed_set_int_fp,
2167 .get_fp_int = &qed_get_int_fp,
2168 .sb_init = &qed_sb_init,
2169 .sb_release = &qed_sb_release,
2170 .simd_handler_config = &qed_simd_handler_config,
2171 .simd_handler_clean = &qed_simd_handler_clean,
2172 .dbg_grc = &qed_dbg_grc,
2173 .dbg_grc_size = &qed_dbg_grc_size,
2174 .can_link_change = &qed_can_link_change,
2175 .set_link = &qed_set_link,
2176 .get_link = &qed_get_current_link,
2177 .drain = &qed_drain,
2178 .update_msglvl = &qed_init_dp,
2179 .dbg_all_data = &qed_dbg_all_data,
2180 .dbg_all_data_size = &qed_dbg_all_data_size,
2181 .chain_alloc = &qed_chain_alloc,
2182 .chain_free = &qed_chain_free,
2183 .nvm_flash = &qed_nvm_flash,
2184 .nvm_get_image = &qed_nvm_get_image,
2185 .set_coalesce = &qed_set_coalesce,
2186 .set_led = &qed_set_led,
2187 .update_drv_state = &qed_update_drv_state,
2188 .update_mac = &qed_update_mac,
2189 .update_mtu = &qed_update_mtu,
2190 .update_wol = &qed_update_wol,
2191 .read_module_eeprom = &qed_read_module_eeprom,
2194 void qed_get_protocol_stats(struct qed_dev *cdev,
2195 enum qed_mcp_protocol_type type,
2196 union qed_mcp_protocol_stats *stats)
2198 struct qed_eth_stats eth_stats;
2200 memset(stats, 0, sizeof(*stats));
2203 case QED_MCP_LAN_STATS:
2204 qed_get_vport_stats(cdev, ð_stats);
2205 stats->lan_stats.ucast_rx_pkts =
2206 eth_stats.common.rx_ucast_pkts;
2207 stats->lan_stats.ucast_tx_pkts =
2208 eth_stats.common.tx_ucast_pkts;
2209 stats->lan_stats.fcs_err = -1;
2211 case QED_MCP_FCOE_STATS:
2212 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2214 case QED_MCP_ISCSI_STATS:
2215 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2218 DP_VERBOSE(cdev, QED_MSG_SP,
2219 "Invalid protocol type = %d\n", type);
2224 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2226 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2227 "Scheduling slowpath task [Flag: %d]\n",
2228 QED_SLOWPATH_MFW_TLV_REQ);
2229 smp_mb__before_atomic();
2230 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
2231 smp_mb__after_atomic();
2232 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2238 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2240 struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2241 struct qed_eth_stats_common *p_common;
2242 struct qed_generic_tlvs gen_tlvs;
2243 struct qed_eth_stats stats;
2246 memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2247 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2249 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2250 tlv->flags.ipv4_csum_offload = true;
2251 if (gen_tlvs.feat_flags & QED_TLV_LSO)
2252 tlv->flags.lso_supported = true;
2253 tlv->flags.b_set = true;
2255 for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2256 if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2257 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2258 tlv->mac_set[i] = true;
2262 qed_get_vport_stats(cdev, &stats);
2263 p_common = &stats.common;
2264 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2265 p_common->rx_bcast_pkts;
2266 tlv->rx_frames_set = true;
2267 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2268 p_common->rx_bcast_bytes;
2269 tlv->rx_bytes_set = true;
2270 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2271 p_common->tx_bcast_pkts;
2272 tlv->tx_frames_set = true;
2273 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2274 p_common->tx_bcast_bytes;
2275 tlv->rx_bytes_set = true;
2278 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2279 union qed_mfw_tlv_data *tlv_buf)
2281 struct qed_dev *cdev = hwfn->cdev;
2282 struct qed_common_cb_ops *ops;
2284 ops = cdev->protocol_ops.common;
2285 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2286 DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2291 case QED_MFW_TLV_GENERIC:
2292 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2294 case QED_MFW_TLV_ETH:
2295 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2297 case QED_MFW_TLV_FCOE:
2298 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2300 case QED_MFW_TLV_ISCSI:
2301 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);