1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/netdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/interrupt.h>
16 #include <linux/etherdevice.h>
20 #include "bnxt_sriov.h"
22 #include "bnxt_ethtool.h"
24 #ifdef CONFIG_BNXT_SRIOV
25 static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
26 struct bnxt_vf_info *vf, u16 event_id)
28 struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr;
29 struct hwrm_fwd_async_event_cmpl_input req = {0};
30 struct hwrm_async_event_cmpl *async_cmpl;
33 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1);
35 req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
37 /* broadcast this async event to all VFs */
38 req.encap_async_event_target_id = cpu_to_le16(0xffff);
39 async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl;
40 async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
41 async_cmpl->event_id = cpu_to_le16(event_id);
43 mutex_lock(&bp->hwrm_cmd_lock);
44 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
47 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
49 goto fwd_async_event_cmpl_exit;
52 if (resp->error_code) {
53 netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n",
58 fwd_async_event_cmpl_exit:
59 mutex_unlock(&bp->hwrm_cmd_lock);
63 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
65 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
66 netdev_err(bp->dev, "vf ndo called though PF is down\n");
69 if (!bp->pf.active_vfs) {
70 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
73 if (vf_id >= bp->pf.active_vfs) {
74 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
80 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
82 struct hwrm_func_cfg_input req = {0};
83 struct bnxt *bp = netdev_priv(dev);
84 struct bnxt_vf_info *vf;
85 bool old_setting = false;
89 if (bp->hwrm_spec_code < 0x10701)
92 rc = bnxt_vf_ndo_prep(bp, vf_id);
96 vf = &bp->pf.vf[vf_id];
97 if (vf->flags & BNXT_VF_SPOOFCHK)
99 if (old_setting == setting)
103 func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
105 func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
106 /*TODO: if the driver supports VLAN filter on guest VLAN,
107 * the spoof check should also include vlan anti-spoofing
109 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
110 req.fid = cpu_to_le16(vf->fw_fid);
111 req.flags = cpu_to_le32(func_flags);
112 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
115 vf->flags |= BNXT_VF_SPOOFCHK;
117 vf->flags &= ~BNXT_VF_SPOOFCHK;
122 int bnxt_get_vf_config(struct net_device *dev, int vf_id,
123 struct ifla_vf_info *ivi)
125 struct bnxt *bp = netdev_priv(dev);
126 struct bnxt_vf_info *vf;
129 rc = bnxt_vf_ndo_prep(bp, vf_id);
134 vf = &bp->pf.vf[vf_id];
136 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
137 ivi->max_tx_rate = vf->max_tx_rate;
138 ivi->min_tx_rate = vf->min_tx_rate;
139 ivi->vlan = vf->vlan;
140 if (vf->flags & BNXT_VF_QOS)
141 ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
144 ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
145 if (!(vf->flags & BNXT_VF_LINK_FORCED))
146 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
147 else if (vf->flags & BNXT_VF_LINK_UP)
148 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
150 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
155 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
157 struct hwrm_func_cfg_input req = {0};
158 struct bnxt *bp = netdev_priv(dev);
159 struct bnxt_vf_info *vf;
162 rc = bnxt_vf_ndo_prep(bp, vf_id);
165 /* reject bc or mc mac addr, zero mac addr means allow
166 * VF to use its own mac addr
168 if (is_multicast_ether_addr(mac)) {
169 netdev_err(dev, "Invalid VF ethernet address\n");
172 vf = &bp->pf.vf[vf_id];
174 memcpy(vf->mac_addr, mac, ETH_ALEN);
175 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
176 req.fid = cpu_to_le16(vf->fw_fid);
177 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
178 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
179 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
182 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
185 struct hwrm_func_cfg_input req = {0};
186 struct bnxt *bp = netdev_priv(dev);
187 struct bnxt_vf_info *vf;
191 if (bp->hwrm_spec_code < 0x10201)
194 if (vlan_proto != htons(ETH_P_8021Q))
195 return -EPROTONOSUPPORT;
197 rc = bnxt_vf_ndo_prep(bp, vf_id);
201 /* TODO: needed to implement proper handling of user priority,
202 * currently fail the command if there is valid priority
204 if (vlan_id > 4095 || qos)
207 vf = &bp->pf.vf[vf_id];
209 if (vlan_tag == vf->vlan)
212 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
213 req.fid = cpu_to_le16(vf->fw_fid);
214 req.dflt_vlan = cpu_to_le16(vlan_tag);
215 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
216 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
222 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
225 struct hwrm_func_cfg_input req = {0};
226 struct bnxt *bp = netdev_priv(dev);
227 struct bnxt_vf_info *vf;
231 rc = bnxt_vf_ndo_prep(bp, vf_id);
235 vf = &bp->pf.vf[vf_id];
236 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
237 if (max_tx_rate > pf_link_speed) {
238 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
243 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
244 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
248 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
250 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
251 req.fid = cpu_to_le16(vf->fw_fid);
252 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
253 req.max_bw = cpu_to_le32(max_tx_rate);
254 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
255 req.min_bw = cpu_to_le32(min_tx_rate);
256 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
258 vf->min_tx_rate = min_tx_rate;
259 vf->max_tx_rate = max_tx_rate;
264 int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
266 struct bnxt *bp = netdev_priv(dev);
267 struct bnxt_vf_info *vf;
270 rc = bnxt_vf_ndo_prep(bp, vf_id);
274 vf = &bp->pf.vf[vf_id];
276 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
278 case IFLA_VF_LINK_STATE_AUTO:
279 vf->flags |= BNXT_VF_LINK_UP;
281 case IFLA_VF_LINK_STATE_DISABLE:
282 vf->flags |= BNXT_VF_LINK_FORCED;
284 case IFLA_VF_LINK_STATE_ENABLE:
285 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
288 netdev_err(bp->dev, "Invalid link option\n");
292 if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
293 rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
294 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
298 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
301 struct bnxt_vf_info *vf;
303 for (i = 0; i < num_vfs; i++) {
305 memset(vf, 0, sizeof(*vf));
310 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
313 struct bnxt_pf_info *pf = &bp->pf;
314 struct hwrm_func_vf_resc_free_input req = {0};
316 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
318 mutex_lock(&bp->hwrm_cmd_lock);
319 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
320 req.vf_id = cpu_to_le16(i);
321 rc = _hwrm_send_message(bp, &req, sizeof(req),
326 mutex_unlock(&bp->hwrm_cmd_lock);
330 static void bnxt_free_vf_resources(struct bnxt *bp)
332 struct pci_dev *pdev = bp->pdev;
335 kfree(bp->pf.vf_event_bmap);
336 bp->pf.vf_event_bmap = NULL;
338 for (i = 0; i < 4; i++) {
339 if (bp->pf.hwrm_cmd_req_addr[i]) {
340 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
341 bp->pf.hwrm_cmd_req_addr[i],
342 bp->pf.hwrm_cmd_req_dma_addr[i]);
343 bp->pf.hwrm_cmd_req_addr[i] = NULL;
347 bp->pf.active_vfs = 0;
352 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
354 struct pci_dev *pdev = bp->pdev;
355 u32 nr_pages, size, i, j, k = 0;
357 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
361 bnxt_set_vf_attr(bp, num_vfs);
363 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
364 nr_pages = size / BNXT_PAGE_SIZE;
365 if (size & (BNXT_PAGE_SIZE - 1))
368 for (i = 0; i < nr_pages; i++) {
369 bp->pf.hwrm_cmd_req_addr[i] =
370 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
371 &bp->pf.hwrm_cmd_req_dma_addr[i],
374 if (!bp->pf.hwrm_cmd_req_addr[i])
377 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
378 struct bnxt_vf_info *vf = &bp->pf.vf[k];
380 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
381 j * BNXT_HWRM_REQ_MAX_SIZE;
382 vf->hwrm_cmd_req_dma_addr =
383 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
384 BNXT_HWRM_REQ_MAX_SIZE;
390 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
391 if (!bp->pf.vf_event_bmap)
394 bp->pf.hwrm_cmd_req_pages = nr_pages;
398 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
400 struct hwrm_func_buf_rgtr_input req = {0};
402 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
404 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
405 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
406 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
407 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
408 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
409 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
410 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
412 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
415 /* only call by PF to reserve resources for VF */
416 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
419 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
421 struct hwrm_func_cfg_input req = {0};
422 struct bnxt_pf_info *pf = &bp->pf;
423 int total_vf_tx_rings = 0;
425 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
427 /* Remaining rings are distributed equally amongs VF's for now */
428 vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
429 vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
430 if (bp->flags & BNXT_FLAG_AGG_RINGS)
431 vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
434 vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
435 vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
436 vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
437 vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
438 vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
440 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
441 FUNC_CFG_REQ_ENABLES_MRU |
442 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
443 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
444 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
445 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
446 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
447 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
448 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
449 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
451 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
452 req.mru = cpu_to_le16(mtu);
453 req.mtu = cpu_to_le16(mtu);
455 req.num_rsscos_ctxs = cpu_to_le16(1);
456 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
457 req.num_tx_rings = cpu_to_le16(vf_tx_rings);
458 req.num_rx_rings = cpu_to_le16(vf_rx_rings);
459 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
460 req.num_l2_ctxs = cpu_to_le16(4);
462 req.num_vnics = cpu_to_le16(vf_vnics);
463 /* FIXME spec currently uses 1 bit for stats ctx */
464 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
466 mutex_lock(&bp->hwrm_cmd_lock);
467 for (i = 0; i < num_vfs; i++) {
468 int vf_tx_rsvd = vf_tx_rings;
470 req.fid = cpu_to_le16(pf->first_vf_id + i);
471 rc = _hwrm_send_message(bp, &req, sizeof(req),
475 pf->active_vfs = i + 1;
476 pf->vf[i].fw_fid = le16_to_cpu(req.fid);
477 rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
481 total_vf_tx_rings += vf_tx_rsvd;
483 mutex_unlock(&bp->hwrm_cmd_lock);
485 pf->max_tx_rings -= total_vf_tx_rings;
486 pf->max_rx_rings -= vf_rx_rings * num_vfs;
487 pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
488 pf->max_cp_rings -= vf_cp_rings * num_vfs;
489 pf->max_rsscos_ctxs -= num_vfs;
490 pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
491 pf->max_vnics -= vf_vnics * num_vfs;
496 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
498 int rc = 0, vfs_supported;
499 int min_rx_rings, min_tx_rings, min_rss_ctxs;
500 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
501 int avail_cp, avail_stat;
503 /* Check if we can enable requested num of vf's. At a mininum
504 * we require 1 RX 1 TX rings for each VF. In this minimum conf
505 * features like TPA will not be available.
507 vfs_supported = *num_vfs;
509 avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
510 avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
511 avail_cp = min_t(int, avail_cp, avail_stat);
513 while (vfs_supported) {
514 min_rx_rings = vfs_supported;
515 min_tx_rings = vfs_supported;
516 min_rss_ctxs = vfs_supported;
518 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
519 if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
523 if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
527 if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
528 avail_cp < min_rx_rings)
531 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
532 avail_cp >= min_tx_rings)
535 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
538 if (tx_ok && rx_ok && rss_ok)
544 if (!vfs_supported) {
545 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
549 if (vfs_supported != *num_vfs) {
550 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
551 *num_vfs, vfs_supported);
552 *num_vfs = vfs_supported;
555 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
559 /* Reserve resources for VFs */
560 rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
564 /* Register buffers for VFs */
565 rc = bnxt_hwrm_func_buf_rgtr(bp);
569 bnxt_ulp_sriov_cfg(bp, *num_vfs);
571 rc = pci_enable_sriov(bp->pdev, *num_vfs);
578 /* Free the resources reserved for various VF's */
579 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
582 bnxt_free_vf_resources(bp);
587 void bnxt_sriov_disable(struct bnxt *bp)
589 u16 num_vfs = pci_num_vf(bp->pdev);
594 /* synchronize VF and VF-rep create and destroy */
595 mutex_lock(&bp->sriov_lock);
596 bnxt_vf_reps_destroy(bp);
598 if (pci_vfs_assigned(bp->pdev)) {
599 bnxt_hwrm_fwd_async_event_cmpl(
600 bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
601 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
604 pci_disable_sriov(bp->pdev);
605 /* Free the HW resources reserved for various VF's */
606 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
608 mutex_unlock(&bp->sriov_lock);
610 bnxt_free_vf_resources(bp);
612 /* Reclaim all resources for the PF. */
614 bnxt_restore_pf_fw_resources(bp);
617 bnxt_ulp_sriov_cfg(bp, 0);
620 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
622 struct net_device *dev = pci_get_drvdata(pdev);
623 struct bnxt *bp = netdev_priv(dev);
625 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
626 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
631 if (!netif_running(dev)) {
632 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
636 bp->sriov_cfg = true;
639 if (pci_vfs_assigned(bp->pdev)) {
640 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
645 /* Check if enabled VFs is same as requested */
646 if (num_vfs && num_vfs == bp->pf.active_vfs)
649 /* if there are previous existing VFs, clean them up */
650 bnxt_sriov_disable(bp);
654 bnxt_sriov_enable(bp, &num_vfs);
657 bp->sriov_cfg = false;
658 wake_up(&bp->sriov_cfg_wait);
663 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
664 void *encap_resp, __le64 encap_resp_addr,
665 __le16 encap_resp_cpr, u32 msg_size)
668 struct hwrm_fwd_resp_input req = {0};
669 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
671 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
673 /* Set the new target id */
674 req.target_id = cpu_to_le16(vf->fw_fid);
675 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
676 req.encap_resp_len = cpu_to_le16(msg_size);
677 req.encap_resp_addr = encap_resp_addr;
678 req.encap_resp_cmpl_ring = encap_resp_cpr;
679 memcpy(req.encap_resp, encap_resp, msg_size);
681 mutex_lock(&bp->hwrm_cmd_lock);
682 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
685 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
689 if (resp->error_code) {
690 netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
696 mutex_unlock(&bp->hwrm_cmd_lock);
700 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
704 struct hwrm_reject_fwd_resp_input req = {0};
705 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
707 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
708 /* Set the new target id */
709 req.target_id = cpu_to_le16(vf->fw_fid);
710 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
711 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
713 mutex_lock(&bp->hwrm_cmd_lock);
714 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
717 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
718 goto fwd_err_resp_exit;
721 if (resp->error_code) {
722 netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
728 mutex_unlock(&bp->hwrm_cmd_lock);
732 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
736 struct hwrm_exec_fwd_resp_input req = {0};
737 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
739 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
740 /* Set the new target id */
741 req.target_id = cpu_to_le16(vf->fw_fid);
742 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
743 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
745 mutex_lock(&bp->hwrm_cmd_lock);
746 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
749 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
750 goto exec_fwd_resp_exit;
753 if (resp->error_code) {
754 netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
760 mutex_unlock(&bp->hwrm_cmd_lock);
764 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
766 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
767 struct hwrm_cfa_l2_filter_alloc_input *req =
768 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
770 if (!is_valid_ether_addr(vf->mac_addr) ||
771 ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
772 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
774 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
777 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
781 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
783 rc = bnxt_hwrm_exec_fwd_resp(
784 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
786 struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
787 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
790 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
791 mutex_lock(&bp->hwrm_cmd_lock);
792 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
793 sizeof(phy_qcfg_resp));
794 mutex_unlock(&bp->hwrm_cmd_lock);
795 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
797 if (vf->flags & BNXT_VF_LINK_UP) {
798 /* if physical link is down, force link up on VF */
799 if (phy_qcfg_resp.link !=
800 PORT_PHY_QCFG_RESP_LINK_LINK) {
802 PORT_PHY_QCFG_RESP_LINK_LINK;
803 phy_qcfg_resp.link_speed = cpu_to_le16(
804 PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
805 phy_qcfg_resp.duplex_cfg =
806 PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
807 phy_qcfg_resp.duplex_state =
808 PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
809 phy_qcfg_resp.pause =
810 (PORT_PHY_QCFG_RESP_PAUSE_TX |
811 PORT_PHY_QCFG_RESP_PAUSE_RX);
814 /* force link down */
815 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
816 phy_qcfg_resp.link_speed = 0;
817 phy_qcfg_resp.duplex_state =
818 PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
819 phy_qcfg_resp.pause = 0;
821 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
822 phy_qcfg_req->resp_addr,
823 phy_qcfg_req->cmpl_ring,
824 sizeof(phy_qcfg_resp));
829 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
832 struct input *encap_req = vf->hwrm_cmd_req_addr;
833 u32 req_type = le16_to_cpu(encap_req->req_type);
836 case HWRM_CFA_L2_FILTER_ALLOC:
837 rc = bnxt_vf_validate_set_mac(bp, vf);
840 /* TODO Validate if VF is allowed to change mac address,
841 * mtu, num of rings etc
843 rc = bnxt_hwrm_exec_fwd_resp(
844 bp, vf, sizeof(struct hwrm_func_cfg_input));
846 case HWRM_PORT_PHY_QCFG:
847 rc = bnxt_vf_set_link(bp, vf);
855 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
857 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
859 /* Scan through VF's and process commands */
861 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
862 if (vf_id >= active_vfs)
865 clear_bit(vf_id, bp->pf.vf_event_bmap);
866 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
871 void bnxt_update_vf_mac(struct bnxt *bp)
873 struct hwrm_func_qcaps_input req = {0};
874 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
876 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
877 req.fid = cpu_to_le16(0xffff);
879 mutex_lock(&bp->hwrm_cmd_lock);
880 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
881 goto update_vf_mac_exit;
883 /* Store MAC address from the firmware. There are 2 cases:
884 * 1. MAC address is valid. It is assigned from the PF and we
885 * need to override the current VF MAC address with it.
886 * 2. MAC address is zero. The VF will use a random MAC address by
887 * default but the stored zero MAC will allow the VF user to change
888 * the random MAC address using ndo_set_mac_address() if he wants.
890 if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
891 memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
893 /* overwrite netdev dev_addr with admin VF MAC */
894 if (is_valid_ether_addr(bp->vf.mac_addr))
895 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
897 mutex_unlock(&bp->hwrm_cmd_lock);
900 int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
902 struct hwrm_func_vf_cfg_input req = {0};
908 if (bp->hwrm_spec_code < 0x10202) {
909 if (is_valid_ether_addr(bp->vf.mac_addr))
913 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
914 req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
915 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
916 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
920 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
927 void bnxt_sriov_disable(struct bnxt *bp)
931 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
933 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
936 void bnxt_update_vf_mac(struct bnxt *bp)
940 int bnxt_approve_mac(struct bnxt *bp, u8 *mac)