1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
29 /*********************notification routines***********************/
32 * i40e_vc_vf_broadcast
33 * @pf: pointer to the PF structure
34 * @opcode: operation code
35 * @retval: return value
36 * @msg: pointer to the msg buffer
39 * send a message to all VFs on a given PF
41 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
42 enum virtchnl_ops v_opcode,
43 i40e_status v_retval, u8 *msg,
46 struct i40e_hw *hw = &pf->hw;
47 struct i40e_vf *vf = pf->vf;
50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
51 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
52 /* Not all vfs are enabled so skip the ones that are not */
53 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
54 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
57 /* Ignore return value on purpose - a given VF may fail, but
58 * we need to keep going and send to all of them
60 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
66 * i40e_vc_notify_vf_link_state
67 * @vf: pointer to the VF structure
69 * send a link status message to a single VF
71 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
73 struct virtchnl_pf_event pfe;
74 struct i40e_pf *pf = vf->pf;
75 struct i40e_hw *hw = &pf->hw;
76 struct i40e_link_status *ls = &pf->hw.phy.link_info;
77 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
79 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
80 pfe.severity = PF_EVENT_SEVERITY_INFO;
81 if (vf->link_forced) {
82 pfe.event_data.link_event.link_status = vf->link_up;
83 pfe.event_data.link_event.link_speed =
84 (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
86 pfe.event_data.link_event.link_status =
87 ls->link_info & I40E_AQ_LINK_UP;
88 pfe.event_data.link_event.link_speed =
89 (enum virtchnl_link_speed)ls->link_speed;
91 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
92 0, (u8 *)&pfe, sizeof(pfe), NULL);
96 * i40e_vc_notify_link_state
97 * @pf: pointer to the PF structure
99 * send a link status message to all VFs on a given PF
101 void i40e_vc_notify_link_state(struct i40e_pf *pf)
105 for (i = 0; i < pf->num_alloc_vfs; i++)
106 i40e_vc_notify_vf_link_state(&pf->vf[i]);
110 * i40e_vc_notify_reset
111 * @pf: pointer to the PF structure
113 * indicate a pending reset to all VFs on a given PF
115 void i40e_vc_notify_reset(struct i40e_pf *pf)
117 struct virtchnl_pf_event pfe;
119 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
120 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
121 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
122 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
126 * i40e_vc_notify_vf_reset
127 * @vf: pointer to the VF structure
129 * indicate a pending reset to the given VF
131 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
133 struct virtchnl_pf_event pfe;
136 /* validate the request */
137 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
140 /* verify if the VF is in either init or active before proceeding */
141 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
142 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
145 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
147 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
148 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
149 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
151 sizeof(struct virtchnl_pf_event), NULL);
153 /***********************misc routines*****************************/
157 * @pf: pointer to the PF info
158 * @vf: pointer to the VF info
160 * Disable the VF through a SW reset
162 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
164 i40e_vc_notify_vf_reset(vf);
165 i40e_reset_vf(vf, false);
169 * i40e_vc_isvalid_vsi_id
170 * @vf: pointer to the VF info
171 * @vsi_id: VF relative VSI id
173 * check for the valid VSI id
175 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
177 struct i40e_pf *pf = vf->pf;
178 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
180 return (vsi && (vsi->vf_id == vf->vf_id));
184 * i40e_vc_isvalid_queue_id
185 * @vf: pointer to the VF info
187 * @qid: vsi relative queue id
189 * check for the valid queue id
191 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
194 struct i40e_pf *pf = vf->pf;
195 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
197 return (vsi && (qid < vsi->alloc_queue_pairs));
201 * i40e_vc_isvalid_vector_id
202 * @vf: pointer to the VF info
203 * @vector_id: VF relative vector id
205 * check for the valid vector id
207 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
209 struct i40e_pf *pf = vf->pf;
211 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
214 /***********************vf resource mgmt routines*****************/
217 * i40e_vc_get_pf_queue_id
218 * @vf: pointer to the VF info
219 * @vsi_id: id of VSI as provided by the FW
220 * @vsi_queue_id: vsi relative queue id
222 * return PF relative queue id
224 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
227 struct i40e_pf *pf = vf->pf;
228 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
229 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
234 if (le16_to_cpu(vsi->info.mapping_flags) &
235 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
237 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
239 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
246 * i40e_config_irq_link_list
247 * @vf: pointer to the VF info
248 * @vsi_id: id of VSI as given by the FW
249 * @vecmap: irq map info
251 * configure irq link list from the map
253 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
254 struct virtchnl_vector_map *vecmap)
256 unsigned long linklistmap = 0, tempmap;
257 struct i40e_pf *pf = vf->pf;
258 struct i40e_hw *hw = &pf->hw;
259 u16 vsi_queue_id, pf_queue_id;
260 enum i40e_queue_type qtype;
261 u16 next_q, vector_id;
265 vector_id = vecmap->vector_id;
268 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
270 reg_idx = I40E_VPINT_LNKLSTN(
271 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
274 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
275 /* Special case - No queues mapped on this vector */
276 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
279 tempmap = vecmap->rxq_map;
280 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
281 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
285 tempmap = vecmap->txq_map;
286 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
287 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
291 next_q = find_first_bit(&linklistmap,
293 I40E_VIRTCHNL_SUPPORTED_QTYPES));
294 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
295 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
296 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
297 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
299 wr32(hw, reg_idx, reg);
301 while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
303 case I40E_QUEUE_TYPE_RX:
304 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
305 itr_idx = vecmap->rxitr_idx;
307 case I40E_QUEUE_TYPE_TX:
308 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
309 itr_idx = vecmap->txitr_idx;
315 next_q = find_next_bit(&linklistmap,
317 I40E_VIRTCHNL_SUPPORTED_QTYPES),
320 (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
321 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
322 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
323 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
326 pf_queue_id = I40E_QUEUE_END_OF_LIST;
330 /* format for the RQCTL & TQCTL regs is same */
332 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
333 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
334 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
335 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
336 wr32(hw, reg_idx, reg);
339 /* if the vf is running in polling mode and using interrupt zero,
340 * need to disable auto-mask on enabling zero interrupt for VFs.
342 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
344 reg = rd32(hw, I40E_GLINT_CTL);
345 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
346 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
347 wr32(hw, I40E_GLINT_CTL, reg);
356 * i40e_release_iwarp_qvlist
357 * @vf: pointer to the VF.
360 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
362 struct i40e_pf *pf = vf->pf;
363 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
367 if (!vf->qvlist_info)
370 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
371 for (i = 0; i < qvlist_info->num_vectors; i++) {
372 struct virtchnl_iwarp_qv_info *qv_info;
373 u32 next_q_index, next_q_type;
374 struct i40e_hw *hw = &pf->hw;
375 u32 v_idx, reg_idx, reg;
377 qv_info = &qvlist_info->qv_info[i];
380 v_idx = qv_info->v_idx;
381 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
382 /* Figure out the queue after CEQ and make that the
385 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
386 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
387 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
388 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
389 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
390 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
392 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
393 reg = (next_q_index &
394 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
396 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
398 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
401 kfree(vf->qvlist_info);
402 vf->qvlist_info = NULL;
406 * i40e_config_iwarp_qvlist
407 * @vf: pointer to the VF info
408 * @qvlist_info: queue and vector list
410 * Return 0 on success or < 0 on error
412 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
413 struct virtchnl_iwarp_qvlist_info *qvlist_info)
415 struct i40e_pf *pf = vf->pf;
416 struct i40e_hw *hw = &pf->hw;
417 struct virtchnl_iwarp_qv_info *qv_info;
418 u32 v_idx, i, reg_idx, reg;
419 u32 next_q_idx, next_q_type;
423 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
425 if (qvlist_info->num_vectors > msix_vf) {
426 dev_warn(&pf->pdev->dev,
427 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
428 qvlist_info->num_vectors,
434 size = sizeof(struct virtchnl_iwarp_qvlist_info) +
435 (sizeof(struct virtchnl_iwarp_qv_info) *
436 (qvlist_info->num_vectors - 1));
437 kfree(vf->qvlist_info);
438 vf->qvlist_info = kzalloc(size, GFP_KERNEL);
439 if (!vf->qvlist_info) {
443 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
445 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
446 for (i = 0; i < qvlist_info->num_vectors; i++) {
447 qv_info = &qvlist_info->qv_info[i];
450 v_idx = qv_info->v_idx;
452 /* Validate vector id belongs to this vf */
453 if (!i40e_vc_isvalid_vector_id(vf, v_idx)) {
458 vf->qvlist_info->qv_info[i] = *qv_info;
460 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
461 /* We might be sharing the interrupt, so get the first queue
462 * index and type, push it down the list by adding the new
463 * queue on top. Also link it with the new queue in CEQCTL.
465 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
466 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
467 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
468 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
469 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
471 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
472 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
473 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
474 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
475 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
476 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
477 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
478 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
480 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
481 reg = (qv_info->ceq_idx &
482 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
483 (I40E_QUEUE_TYPE_PE_CEQ <<
484 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
485 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
488 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
489 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
490 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
491 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
493 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
499 kfree(vf->qvlist_info);
500 vf->qvlist_info = NULL;
506 * i40e_config_vsi_tx_queue
507 * @vf: pointer to the VF info
508 * @vsi_id: id of VSI as provided by the FW
509 * @vsi_queue_id: vsi relative queue index
510 * @info: config. info
514 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
516 struct virtchnl_txq_info *info)
518 struct i40e_pf *pf = vf->pf;
519 struct i40e_hw *hw = &pf->hw;
520 struct i40e_hmc_obj_txq tx_ctx;
521 struct i40e_vsi *vsi;
526 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
530 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
531 vsi = i40e_find_vsi_from_id(pf, vsi_id);
537 /* clear the context structure first */
538 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
540 /* only set the required fields */
541 tx_ctx.base = info->dma_ring_addr / 128;
542 tx_ctx.qlen = info->ring_len;
543 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
544 tx_ctx.rdylist_act = 0;
545 tx_ctx.head_wb_ena = info->headwb_enabled;
546 tx_ctx.head_wb_addr = info->dma_headwb_addr;
548 /* clear the context in the HMC */
549 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
551 dev_err(&pf->pdev->dev,
552 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
558 /* set the context in the HMC */
559 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
561 dev_err(&pf->pdev->dev,
562 "Failed to set VF LAN Tx queue context %d error: %d\n",
568 /* associate this queue with the PCI VF function */
569 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
570 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
571 & I40E_QTX_CTL_PF_INDX_MASK);
572 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
573 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
574 & I40E_QTX_CTL_VFVM_INDX_MASK);
575 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
583 * i40e_config_vsi_rx_queue
584 * @vf: pointer to the VF info
585 * @vsi_id: id of VSI as provided by the FW
586 * @vsi_queue_id: vsi relative queue index
587 * @info: config. info
591 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
593 struct virtchnl_rxq_info *info)
595 struct i40e_pf *pf = vf->pf;
596 struct i40e_hw *hw = &pf->hw;
597 struct i40e_hmc_obj_rxq rx_ctx;
601 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
603 /* clear the context structure first */
604 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
606 /* only set the required fields */
607 rx_ctx.base = info->dma_ring_addr / 128;
608 rx_ctx.qlen = info->ring_len;
610 if (info->splithdr_enabled) {
611 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
613 I40E_RX_SPLIT_TCP_UDP |
615 /* header length validation */
616 if (info->hdr_size > ((2 * 1024) - 64)) {
620 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
622 /* set split mode 10b */
623 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
626 /* databuffer length validation */
627 if (info->databuffer_size > ((16 * 1024) - 128)) {
631 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
633 /* max pkt. length validation */
634 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
638 rx_ctx.rxmax = info->max_pkt_size;
640 /* enable 32bytes desc always */
644 rx_ctx.lrxqthresh = 2;
649 /* clear the context in the HMC */
650 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
652 dev_err(&pf->pdev->dev,
653 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
659 /* set the context in the HMC */
660 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
662 dev_err(&pf->pdev->dev,
663 "Failed to set VF LAN Rx queue context %d error: %d\n",
675 * @vf: pointer to the VF info
676 * @type: type of VSI to allocate
678 * alloc VF vsi context & resources
680 static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
682 struct i40e_mac_filter *f = NULL;
683 struct i40e_pf *pf = vf->pf;
684 struct i40e_vsi *vsi;
687 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
690 dev_err(&pf->pdev->dev,
691 "add vsi failed for VF %d, aq_err %d\n",
692 vf->vf_id, pf->hw.aq.asq_last_status);
694 goto error_alloc_vsi_res;
696 if (type == I40E_VSI_SRIOV) {
697 u64 hena = i40e_pf_get_default_rss_hena(pf);
698 u8 broadcast[ETH_ALEN];
700 vf->lan_vsi_idx = vsi->idx;
701 vf->lan_vsi_id = vsi->id;
702 /* If the port VLAN has been configured and then the
703 * VF driver was removed then the VSI port VLAN
704 * configuration was destroyed. Check if there is
705 * a port VLAN and restore the VSI configuration if
708 if (vf->port_vlan_id)
709 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
711 spin_lock_bh(&vsi->mac_filter_hash_lock);
712 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
713 f = i40e_add_mac_filter(vsi,
714 vf->default_lan_addr.addr);
716 dev_info(&pf->pdev->dev,
717 "Could not add MAC filter %pM for VF %d\n",
718 vf->default_lan_addr.addr, vf->vf_id);
720 eth_broadcast_addr(broadcast);
721 f = i40e_add_mac_filter(vsi, broadcast);
723 dev_info(&pf->pdev->dev,
724 "Could not allocate VF broadcast filter\n");
725 spin_unlock_bh(&vsi->mac_filter_hash_lock);
726 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
727 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
730 /* program mac filter */
731 ret = i40e_sync_vsi_filters(vsi);
733 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
735 /* Set VF bandwidth if specified */
737 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
738 vf->tx_rate / 50, 0, NULL);
740 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
749 * i40e_enable_vf_mappings
750 * @vf: pointer to the VF info
754 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
756 struct i40e_pf *pf = vf->pf;
757 struct i40e_hw *hw = &pf->hw;
758 u32 reg, total_queue_pairs = 0;
761 /* Tell the hardware we're using noncontiguous mapping. HW requires
762 * that VF queues be mapped using this method, even when they are
763 * contiguous in real life
765 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
766 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
768 /* enable VF vplan_qtable mappings */
769 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
770 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
772 /* map PF queues to VF queues */
773 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
774 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
776 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
777 wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
781 /* map PF queues to VSI */
782 for (j = 0; j < 7; j++) {
783 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
784 reg = 0x07FF07FF; /* unused */
786 u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
789 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
793 i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
801 * i40e_disable_vf_mappings
802 * @vf: pointer to the VF info
804 * disable VF mappings
806 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
808 struct i40e_pf *pf = vf->pf;
809 struct i40e_hw *hw = &pf->hw;
812 /* disable qp mappings */
813 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
814 for (i = 0; i < I40E_MAX_VSI_QP; i++)
815 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
816 I40E_QUEUE_END_OF_LIST);
822 * @vf: pointer to the VF info
826 static void i40e_free_vf_res(struct i40e_vf *vf)
828 struct i40e_pf *pf = vf->pf;
829 struct i40e_hw *hw = &pf->hw;
833 /* Start by disabling VF's configuration API to prevent the OS from
834 * accessing the VF's VSI after it's freed / invalidated.
836 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
838 /* free vsi & disconnect it from the parent uplink */
839 if (vf->lan_vsi_idx) {
840 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
845 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
847 /* disable interrupts so the VF starts in a known state */
848 for (i = 0; i < msix_vf; i++) {
849 /* format is same for both registers */
851 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
853 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
856 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
860 /* clear the irq settings */
861 for (i = 0; i < msix_vf; i++) {
862 /* format is same for both registers */
864 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
866 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
869 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
870 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
871 wr32(hw, reg_idx, reg);
874 /* reset some of the state variables keeping track of the resources */
875 vf->num_queue_pairs = 0;
881 * @vf: pointer to the VF info
883 * allocate VF resources
885 static int i40e_alloc_vf_res(struct i40e_vf *vf)
887 struct i40e_pf *pf = vf->pf;
888 int total_queue_pairs = 0;
891 /* allocate hw vsi context & associated resources */
892 ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
895 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
898 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
900 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
902 /* store the total qps number for the runtime
905 vf->num_queue_pairs = total_queue_pairs;
907 /* VF is now completely initialized */
908 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
912 i40e_free_vf_res(vf);
917 #define VF_DEVICE_STATUS 0xAA
918 #define VF_TRANS_PENDING_MASK 0x20
920 * i40e_quiesce_vf_pci
921 * @vf: pointer to the VF structure
923 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
924 * if the transactions never clear.
926 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
928 struct i40e_pf *pf = vf->pf;
929 struct i40e_hw *hw = &pf->hw;
933 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
935 wr32(hw, I40E_PF_PCI_CIAA,
936 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
937 for (i = 0; i < 100; i++) {
938 reg = rd32(hw, I40E_PF_PCI_CIAD);
939 if ((reg & VF_TRANS_PENDING_MASK) == 0)
947 * i40e_trigger_vf_reset
948 * @vf: pointer to the VF structure
949 * @flr: VFLR was issued or not
951 * Trigger hardware to start a reset for a particular VF. Expects the caller
952 * to wait the proper amount of time to allow hardware to reset the VF before
953 * it cleans up and restores VF functionality.
955 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
957 struct i40e_pf *pf = vf->pf;
958 struct i40e_hw *hw = &pf->hw;
959 u32 reg, reg_idx, bit_idx;
962 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
964 /* Disable VF's configuration API during reset. The flag is re-enabled
965 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
966 * It's normally disabled in i40e_free_vf_res(), but it's safer
967 * to do it earlier to give some time to finish to any VF config
968 * functions that may still be running at this point.
970 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
972 /* In the case of a VFLR, the HW has already reset the VF and we
973 * just need to clean up, so don't hit the VFRTRIG register.
976 /* reset VF using VPGEN_VFRTRIG reg */
977 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
978 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
979 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
982 /* clear the VFLR bit in GLGEN_VFLRSTAT */
983 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
984 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
985 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
988 if (i40e_quiesce_vf_pci(vf))
989 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
994 * i40e_cleanup_reset_vf
995 * @vf: pointer to the VF structure
997 * Cleanup a VF after the hardware reset is finished. Expects the caller to
998 * have verified whether the reset is finished properly, and ensure the
999 * minimum amount of wait time has passed.
1001 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1003 struct i40e_pf *pf = vf->pf;
1004 struct i40e_hw *hw = &pf->hw;
1007 /* free VF resources to begin resetting the VSI state */
1008 i40e_free_vf_res(vf);
1010 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1011 * By doing this we allow HW to access VF memory at any point. If we
1012 * did it any sooner, HW could access memory while it was being freed
1013 * in i40e_free_vf_res(), causing an IOMMU fault.
1015 * On the other hand, this needs to be done ASAP, because the VF driver
1016 * is waiting for this to happen and may report a timeout. It's
1017 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1020 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1021 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1022 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1024 /* reallocate VF resources to finish resetting the VSI state */
1025 if (!i40e_alloc_vf_res(vf)) {
1026 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1027 i40e_enable_vf_mappings(vf);
1028 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1029 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1030 /* Do not notify the client during VF init */
1031 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1033 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1037 /* Tell the VF driver the reset is done. This needs to be done only
1038 * after VF has been fully initialized, because the VF driver may
1039 * request resources immediately after setting this flag.
1041 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1046 * @vf: pointer to the VF structure
1047 * @flr: VFLR was issued or not
1051 void i40e_reset_vf(struct i40e_vf *vf, bool flr)
1053 struct i40e_pf *pf = vf->pf;
1054 struct i40e_hw *hw = &pf->hw;
1059 /* If VFs have been disabled, there is no need to reset */
1060 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1063 i40e_trigger_vf_reset(vf, flr);
1065 /* poll VPGEN_VFRSTAT reg to make sure
1066 * that reset is complete
1068 for (i = 0; i < 10; i++) {
1069 /* VF reset requires driver to first reset the VF and then
1070 * poll the status register to make sure that the reset
1071 * completed successfully. Due to internal HW FIFO flushes,
1072 * we must wait 10ms before the register will be valid.
1074 usleep_range(10000, 20000);
1075 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1076 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1083 usleep_range(10000, 20000);
1086 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1088 usleep_range(10000, 20000);
1090 /* On initial reset, we don't have any queues to disable */
1091 if (vf->lan_vsi_idx != 0)
1092 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1094 i40e_cleanup_reset_vf(vf);
1097 clear_bit(__I40E_VF_DISABLE, pf->state);
1101 * i40e_reset_all_vfs
1102 * @pf: pointer to the PF structure
1103 * @flr: VFLR was issued or not
1105 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1106 * VF, then do all the waiting in one chunk, and finally finish restoring each
1107 * VF after the wait. This is useful during PF routines which need to reset
1108 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1110 void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1112 struct i40e_hw *hw = &pf->hw;
1117 /* If we don't have any VFs, then there is nothing to reset */
1118 if (!pf->num_alloc_vfs)
1121 /* If VFs have been disabled, there is no need to reset */
1122 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1125 /* Begin reset on all VFs at once */
1126 for (v = 0; v < pf->num_alloc_vfs; v++)
1127 i40e_trigger_vf_reset(&pf->vf[v], flr);
1129 /* HW requires some time to make sure it can flush the FIFO for a VF
1130 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1131 * sequence to make sure that it has completed. We'll keep track of
1132 * the VFs using a simple iterator that increments once that VF has
1133 * finished resetting.
1135 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1136 usleep_range(10000, 20000);
1138 /* Check each VF in sequence, beginning with the VF to fail
1139 * the previous check.
1141 while (v < pf->num_alloc_vfs) {
1143 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1144 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1147 /* If the current VF has finished resetting, move on
1148 * to the next VF in sequence.
1155 usleep_range(10000, 20000);
1157 /* Display a warning if at least one VF didn't manage to reset in
1158 * time, but continue on with the operation.
1160 if (v < pf->num_alloc_vfs)
1161 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1163 usleep_range(10000, 20000);
1165 /* Begin disabling all the rings associated with VFs, but do not wait
1168 for (v = 0; v < pf->num_alloc_vfs; v++) {
1169 /* On initial reset, we don't have any queues to disable */
1170 if (pf->vf[v].lan_vsi_idx == 0)
1173 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1176 /* Now that we've notified HW to disable all of the VF rings, wait
1177 * until they finish.
1179 for (v = 0; v < pf->num_alloc_vfs; v++) {
1180 /* On initial reset, we don't have any queues to disable */
1181 if (pf->vf[v].lan_vsi_idx == 0)
1184 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1187 /* Hw may need up to 50ms to finish disabling the RX queues. We
1188 * minimize the wait by delaying only once for all VFs.
1192 /* Finish the reset on each VF */
1193 for (v = 0; v < pf->num_alloc_vfs; v++)
1194 i40e_cleanup_reset_vf(&pf->vf[v]);
1197 clear_bit(__I40E_VF_DISABLE, pf->state);
1202 * @pf: pointer to the PF structure
1206 void i40e_free_vfs(struct i40e_pf *pf)
1208 struct i40e_hw *hw = &pf->hw;
1209 u32 reg_idx, bit_idx;
1214 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1215 usleep_range(1000, 2000);
1217 i40e_notify_client_of_vf_enable(pf, 0);
1219 /* Amortize wait time by stopping all VFs at the same time */
1220 for (i = 0; i < pf->num_alloc_vfs; i++) {
1221 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1224 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1227 for (i = 0; i < pf->num_alloc_vfs; i++) {
1228 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1231 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1234 /* Disable IOV before freeing resources. This lets any VF drivers
1235 * running in the host get themselves cleaned up before we yank
1236 * the carpet out from underneath their feet.
1238 if (!pci_vfs_assigned(pf->pdev))
1239 pci_disable_sriov(pf->pdev);
1241 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1243 /* free up VF resources */
1244 tmp = pf->num_alloc_vfs;
1245 pf->num_alloc_vfs = 0;
1246 for (i = 0; i < tmp; i++) {
1247 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1248 i40e_free_vf_res(&pf->vf[i]);
1249 /* disable qp mappings */
1250 i40e_disable_vf_mappings(&pf->vf[i]);
1256 /* This check is for when the driver is unloaded while VFs are
1257 * assigned. Setting the number of VFs to 0 through sysfs is caught
1258 * before this function ever gets called.
1260 if (!pci_vfs_assigned(pf->pdev)) {
1261 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1262 * work correctly when SR-IOV gets re-enabled.
1264 for (vf_id = 0; vf_id < tmp; vf_id++) {
1265 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1266 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1267 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1270 clear_bit(__I40E_VF_DISABLE, pf->state);
1273 #ifdef CONFIG_PCI_IOV
1276 * @pf: pointer to the PF structure
1277 * @num_alloc_vfs: number of VFs to allocate
1279 * allocate VF resources
1281 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1283 struct i40e_vf *vfs;
1286 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1287 i40e_irq_dynamic_disable_icr0(pf);
1289 /* Check to see if we're just allocating resources for extant VFs */
1290 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1291 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1293 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1294 pf->num_alloc_vfs = 0;
1298 /* allocate memory */
1299 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1306 /* apply default profile */
1307 for (i = 0; i < num_alloc_vfs; i++) {
1309 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1312 /* assign default capabilities */
1313 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1314 vfs[i].spoofchk = true;
1316 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1319 pf->num_alloc_vfs = num_alloc_vfs;
1321 /* VF resources get allocated during reset */
1322 i40e_reset_all_vfs(pf, false);
1324 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1330 /* Re-enable interrupt 0. */
1331 i40e_irq_dynamic_enable_icr0(pf, false);
1337 * i40e_pci_sriov_enable
1338 * @pdev: pointer to a pci_dev structure
1339 * @num_vfs: number of VFs to allocate
1341 * Enable or change the number of VFs
1343 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1345 #ifdef CONFIG_PCI_IOV
1346 struct i40e_pf *pf = pci_get_drvdata(pdev);
1347 int pre_existing_vfs = pci_num_vf(pdev);
1350 if (test_bit(__I40E_TESTING, pf->state)) {
1351 dev_warn(&pdev->dev,
1352 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1357 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1359 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1362 if (num_vfs > pf->num_req_vfs) {
1363 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1364 num_vfs, pf->num_req_vfs);
1369 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1370 err = i40e_alloc_vfs(pf, num_vfs);
1372 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1386 * i40e_pci_sriov_configure
1387 * @pdev: pointer to a pci_dev structure
1388 * @num_vfs: number of VFs to allocate
1390 * Enable or change the number of VFs. Called when the user updates the number
1393 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1395 struct i40e_pf *pf = pci_get_drvdata(pdev);
1398 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1399 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1400 i40e_do_reset_safe(pf,
1401 BIT_ULL(__I40E_PF_RESET_REQUESTED));
1403 return i40e_pci_sriov_enable(pdev, num_vfs);
1406 if (!pci_vfs_assigned(pf->pdev)) {
1408 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1409 i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
1411 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1417 /***********************virtual channel routines******************/
1420 * i40e_vc_send_msg_to_vf
1421 * @vf: pointer to the VF info
1422 * @v_opcode: virtual channel opcode
1423 * @v_retval: virtual channel return value
1424 * @msg: pointer to the msg buffer
1425 * @msglen: msg length
1429 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1430 u32 v_retval, u8 *msg, u16 msglen)
1437 /* validate the request */
1438 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1443 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1445 /* single place to detect unsuccessful return values */
1447 vf->num_invalid_msgs++;
1448 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1449 vf->vf_id, v_opcode, v_retval);
1450 if (vf->num_invalid_msgs >
1451 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1452 dev_err(&pf->pdev->dev,
1453 "Number of invalid messages exceeded for VF %d\n",
1455 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1456 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1459 vf->num_valid_msgs++;
1460 /* reset the invalid counter, if a valid message is received. */
1461 vf->num_invalid_msgs = 0;
1464 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1467 dev_info(&pf->pdev->dev,
1468 "Unable to send the message to VF %d aq_err %d\n",
1469 vf->vf_id, pf->hw.aq.asq_last_status);
1477 * i40e_vc_send_resp_to_vf
1478 * @vf: pointer to the VF info
1479 * @opcode: operation code
1480 * @retval: return value
1482 * send resp msg to VF
1484 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1485 enum virtchnl_ops opcode,
1488 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1492 * i40e_vc_get_version_msg
1493 * @vf: pointer to the VF info
1495 * called from the VF to request the API version used by the PF
1497 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1499 struct virtchnl_version_info info = {
1500 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1503 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1504 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1505 if (VF_IS_V10(&vf->vf_ver))
1506 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1507 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1508 I40E_SUCCESS, (u8 *)&info,
1509 sizeof(struct virtchnl_version_info));
1513 * i40e_vc_get_vf_resources_msg
1514 * @vf: pointer to the VF info
1515 * @msg: pointer to the msg buffer
1516 * @msglen: msg length
1518 * called from the VF to request its resources
1520 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1522 struct virtchnl_vf_resource *vfres = NULL;
1523 struct i40e_pf *pf = vf->pf;
1524 i40e_status aq_ret = 0;
1525 struct i40e_vsi *vsi;
1530 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1531 aq_ret = I40E_ERR_PARAM;
1535 len = (sizeof(struct virtchnl_vf_resource) +
1536 sizeof(struct virtchnl_vsi_resource) * num_vsis);
1538 vfres = kzalloc(len, GFP_KERNEL);
1540 aq_ret = I40E_ERR_NO_MEMORY;
1544 if (VF_IS_V11(&vf->vf_ver))
1545 vf->driver_caps = *(u32 *)msg;
1547 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1548 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1549 VIRTCHNL_VF_OFFLOAD_VLAN;
1551 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1552 vsi = pf->vsi[vf->lan_vsi_idx];
1553 if (!vsi->info.pvid)
1554 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1556 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1557 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1558 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1559 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1562 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1563 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1565 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1566 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1567 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1569 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1572 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1573 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1574 vfres->vf_cap_flags |=
1575 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1578 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1579 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1581 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1582 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1583 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1585 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1586 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1587 dev_err(&pf->pdev->dev,
1588 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1590 aq_ret = I40E_ERR_PARAM;
1593 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1596 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1597 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1598 vfres->vf_cap_flags |=
1599 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1602 vfres->num_vsis = num_vsis;
1603 vfres->num_queue_pairs = vf->num_queue_pairs;
1604 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1605 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1606 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1608 if (vf->lan_vsi_idx) {
1609 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1610 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1611 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1612 /* VFs only use TC 0 */
1613 vfres->vsi_res[0].qset_handle
1614 = le16_to_cpu(vsi->info.qs_handle[0]);
1615 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1616 vf->default_lan_addr.addr);
1618 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1621 /* send the response back to the VF */
1622 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1623 aq_ret, (u8 *)vfres, len);
1630 * i40e_vc_reset_vf_msg
1631 * @vf: pointer to the VF info
1632 * @msg: pointer to the msg buffer
1633 * @msglen: msg length
1635 * called from the VF to reset itself,
1636 * unlike other virtchnl messages, PF driver
1637 * doesn't send the response back to the VF
1639 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1641 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1642 i40e_reset_vf(vf, false);
1646 * i40e_getnum_vf_vsi_vlan_filters
1647 * @vsi: pointer to the vsi
1649 * called to get the number of VLANs offloaded on this VF
1651 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1653 struct i40e_mac_filter *f;
1654 int num_vlans = 0, bkt;
1656 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1657 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1665 * i40e_vc_config_promiscuous_mode_msg
1666 * @vf: pointer to the VF info
1667 * @msg: pointer to the msg buffer
1668 * @msglen: msg length
1670 * called from the VF to configure the promiscuous mode of
1673 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1674 u8 *msg, u16 msglen)
1676 struct virtchnl_promisc_info *info =
1677 (struct virtchnl_promisc_info *)msg;
1678 struct i40e_pf *pf = vf->pf;
1679 struct i40e_hw *hw = &pf->hw;
1680 struct i40e_mac_filter *f;
1681 i40e_status aq_ret = 0;
1682 bool allmulti = false;
1683 struct i40e_vsi *vsi;
1684 bool alluni = false;
1688 vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1689 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
1690 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1692 aq_ret = I40E_ERR_PARAM;
1695 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
1696 dev_err(&pf->pdev->dev,
1697 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1699 /* Lie to the VF on purpose. */
1703 /* Multicast promiscuous handling*/
1704 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1707 if (vf->port_vlan_id) {
1708 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1712 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1713 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1714 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1716 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1721 aq_err = pf->hw.aq.asq_last_status;
1723 dev_err(&pf->pdev->dev,
1724 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1726 i40e_stat_str(&pf->hw, aq_ret),
1727 i40e_aq_str(&pf->hw, aq_err));
1732 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1734 aq_err = pf->hw.aq.asq_last_status;
1736 dev_err(&pf->pdev->dev,
1737 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1739 i40e_stat_str(&pf->hw, aq_ret),
1740 i40e_aq_str(&pf->hw, aq_err));
1746 dev_info(&pf->pdev->dev,
1747 "VF %d successfully set multicast promiscuous mode\n",
1750 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1752 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1755 if (info->flags & FLAG_VF_UNICAST_PROMISC)
1757 if (vf->port_vlan_id) {
1758 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1762 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1763 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1764 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1766 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1771 aq_err = pf->hw.aq.asq_last_status;
1773 dev_err(&pf->pdev->dev,
1774 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1776 i40e_stat_str(&pf->hw, aq_ret),
1777 i40e_aq_str(&pf->hw, aq_err));
1780 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1783 aq_err = pf->hw.aq.asq_last_status;
1785 dev_err(&pf->pdev->dev,
1786 "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
1787 vf->vf_id, info->flags,
1788 i40e_stat_str(&pf->hw, aq_ret),
1789 i40e_aq_str(&pf->hw, aq_err));
1795 dev_info(&pf->pdev->dev,
1796 "VF %d successfully set unicast promiscuous mode\n",
1799 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1801 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1805 /* send the response to the VF */
1806 return i40e_vc_send_resp_to_vf(vf,
1807 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
1812 * i40e_vc_config_queues_msg
1813 * @vf: pointer to the VF info
1814 * @msg: pointer to the msg buffer
1815 * @msglen: msg length
1817 * called from the VF to configure the rx/tx
1820 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1822 struct virtchnl_vsi_queue_config_info *qci =
1823 (struct virtchnl_vsi_queue_config_info *)msg;
1824 struct virtchnl_queue_pair_info *qpi;
1825 struct i40e_pf *pf = vf->pf;
1826 u16 vsi_id, vsi_queue_id;
1827 i40e_status aq_ret = 0;
1830 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1831 aq_ret = I40E_ERR_PARAM;
1835 vsi_id = qci->vsi_id;
1836 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1837 aq_ret = I40E_ERR_PARAM;
1840 for (i = 0; i < qci->num_queue_pairs; i++) {
1841 qpi = &qci->qpair[i];
1842 vsi_queue_id = qpi->txq.queue_id;
1843 if ((qpi->txq.vsi_id != vsi_id) ||
1844 (qpi->rxq.vsi_id != vsi_id) ||
1845 (qpi->rxq.queue_id != vsi_queue_id) ||
1846 !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
1847 aq_ret = I40E_ERR_PARAM;
1851 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
1853 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
1855 aq_ret = I40E_ERR_PARAM;
1859 /* set vsi num_queue_pairs in use to num configured by VF */
1860 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
1863 /* send the response to the VF */
1864 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
1869 * i40e_vc_config_irq_map_msg
1870 * @vf: pointer to the VF info
1871 * @msg: pointer to the msg buffer
1872 * @msglen: msg length
1874 * called from the VF to configure the irq to
1877 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1879 struct virtchnl_irq_map_info *irqmap_info =
1880 (struct virtchnl_irq_map_info *)msg;
1881 struct virtchnl_vector_map *map;
1882 u16 vsi_id, vsi_queue_id, vector_id;
1883 i40e_status aq_ret = 0;
1884 unsigned long tempmap;
1887 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1888 aq_ret = I40E_ERR_PARAM;
1892 for (i = 0; i < irqmap_info->num_vectors; i++) {
1893 map = &irqmap_info->vecmap[i];
1895 vector_id = map->vector_id;
1896 vsi_id = map->vsi_id;
1897 /* validate msg params */
1898 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
1899 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1900 aq_ret = I40E_ERR_PARAM;
1904 /* lookout for the invalid queue index */
1905 tempmap = map->rxq_map;
1906 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1907 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1909 aq_ret = I40E_ERR_PARAM;
1914 tempmap = map->txq_map;
1915 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
1916 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
1918 aq_ret = I40E_ERR_PARAM;
1923 i40e_config_irq_link_list(vf, vsi_id, map);
1926 /* send the response to the VF */
1927 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
1932 * i40e_vc_enable_queues_msg
1933 * @vf: pointer to the VF info
1934 * @msg: pointer to the msg buffer
1935 * @msglen: msg length
1937 * called from the VF to enable all or specific queue(s)
1939 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1941 struct virtchnl_queue_select *vqs =
1942 (struct virtchnl_queue_select *)msg;
1943 struct i40e_pf *pf = vf->pf;
1944 u16 vsi_id = vqs->vsi_id;
1945 i40e_status aq_ret = 0;
1947 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1948 aq_ret = I40E_ERR_PARAM;
1952 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
1953 aq_ret = I40E_ERR_PARAM;
1957 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1958 aq_ret = I40E_ERR_PARAM;
1962 if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
1963 aq_ret = I40E_ERR_TIMEOUT;
1965 /* send the response to the VF */
1966 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
1971 * i40e_vc_disable_queues_msg
1972 * @vf: pointer to the VF info
1973 * @msg: pointer to the msg buffer
1974 * @msglen: msg length
1976 * called from the VF to disable all or specific
1979 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
1981 struct virtchnl_queue_select *vqs =
1982 (struct virtchnl_queue_select *)msg;
1983 struct i40e_pf *pf = vf->pf;
1984 i40e_status aq_ret = 0;
1986 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
1987 aq_ret = I40E_ERR_PARAM;
1991 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1992 aq_ret = I40E_ERR_PARAM;
1996 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
1997 aq_ret = I40E_ERR_PARAM;
2001 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
2004 /* send the response to the VF */
2005 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2010 * i40e_vc_get_stats_msg
2011 * @vf: pointer to the VF info
2012 * @msg: pointer to the msg buffer
2013 * @msglen: msg length
2015 * called from the VF to get vsi stats
2017 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2019 struct virtchnl_queue_select *vqs =
2020 (struct virtchnl_queue_select *)msg;
2021 struct i40e_pf *pf = vf->pf;
2022 struct i40e_eth_stats stats;
2023 i40e_status aq_ret = 0;
2024 struct i40e_vsi *vsi;
2026 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2028 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2029 aq_ret = I40E_ERR_PARAM;
2033 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2034 aq_ret = I40E_ERR_PARAM;
2038 vsi = pf->vsi[vf->lan_vsi_idx];
2040 aq_ret = I40E_ERR_PARAM;
2043 i40e_update_eth_stats(vsi);
2044 stats = vsi->eth_stats;
2047 /* send the response back to the VF */
2048 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2049 (u8 *)&stats, sizeof(stats));
2052 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2053 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2055 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2056 #define I40E_VC_MAX_VLAN_PER_VF 8
2059 * i40e_check_vf_permission
2060 * @vf: pointer to the VF info
2061 * @macaddr: pointer to the MAC Address being checked
2063 * Check if the VF has permission to add or delete unicast MAC address
2064 * filters and return error code -EPERM if not. Then check if the
2065 * address filter requested is broadcast or zero and if so return
2066 * an invalid MAC address error code.
2068 static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
2070 struct i40e_pf *pf = vf->pf;
2073 if (is_broadcast_ether_addr(macaddr) ||
2074 is_zero_ether_addr(macaddr)) {
2075 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
2076 ret = I40E_ERR_INVALID_MAC_ADDR;
2077 } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
2078 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2079 !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
2080 /* If the host VMM administrator has set the VF MAC address
2081 * administratively via the ndo_set_vf_mac command then deny
2082 * permission to the VF to add or delete unicast MAC addresses.
2083 * Unless the VF is privileged and then it can do whatever.
2084 * The VF may request to set the MAC address filter already
2085 * assigned to it so do not return an error in that case.
2087 dev_err(&pf->pdev->dev,
2088 "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
2090 } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
2091 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2092 dev_err(&pf->pdev->dev,
2093 "VF is not trusted, switch the VF to trusted to add more functionality\n");
2100 * i40e_vc_add_mac_addr_msg
2101 * @vf: pointer to the VF info
2102 * @msg: pointer to the msg buffer
2103 * @msglen: msg length
2105 * add guest mac address filter
2107 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2109 struct virtchnl_ether_addr_list *al =
2110 (struct virtchnl_ether_addr_list *)msg;
2111 struct i40e_pf *pf = vf->pf;
2112 struct i40e_vsi *vsi = NULL;
2113 u16 vsi_id = al->vsi_id;
2114 i40e_status ret = 0;
2117 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2118 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2119 ret = I40E_ERR_PARAM;
2123 for (i = 0; i < al->num_elements; i++) {
2124 ret = i40e_check_vf_permission(vf, al->list[i].addr);
2128 vsi = pf->vsi[vf->lan_vsi_idx];
2130 /* Lock once, because all function inside for loop accesses VSI's
2131 * MAC filter list which needs to be protected using same lock.
2133 spin_lock_bh(&vsi->mac_filter_hash_lock);
2135 /* add new addresses to the list */
2136 for (i = 0; i < al->num_elements; i++) {
2137 struct i40e_mac_filter *f;
2139 f = i40e_find_mac(vsi, al->list[i].addr);
2141 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2144 dev_err(&pf->pdev->dev,
2145 "Unable to add MAC filter %pM for VF %d\n",
2146 al->list[i].addr, vf->vf_id);
2147 ret = I40E_ERR_PARAM;
2148 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2154 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2156 /* program the updated filter list */
2157 ret = i40e_sync_vsi_filters(vsi);
2159 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2163 /* send the response to the VF */
2164 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2169 * i40e_vc_del_mac_addr_msg
2170 * @vf: pointer to the VF info
2171 * @msg: pointer to the msg buffer
2172 * @msglen: msg length
2174 * remove guest mac address filter
2176 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2178 struct virtchnl_ether_addr_list *al =
2179 (struct virtchnl_ether_addr_list *)msg;
2180 struct i40e_pf *pf = vf->pf;
2181 struct i40e_vsi *vsi = NULL;
2182 u16 vsi_id = al->vsi_id;
2183 i40e_status ret = 0;
2186 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2187 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2188 ret = I40E_ERR_PARAM;
2192 for (i = 0; i < al->num_elements; i++) {
2193 if (is_broadcast_ether_addr(al->list[i].addr) ||
2194 is_zero_ether_addr(al->list[i].addr)) {
2195 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2196 al->list[i].addr, vf->vf_id);
2197 ret = I40E_ERR_INVALID_MAC_ADDR;
2201 if (vf->pf_set_mac &&
2202 ether_addr_equal(al->list[i].addr,
2203 vf->default_lan_addr.addr)) {
2204 dev_err(&pf->pdev->dev,
2205 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
2206 vf->default_lan_addr.addr, vf->vf_id);
2207 ret = I40E_ERR_PARAM;
2211 vsi = pf->vsi[vf->lan_vsi_idx];
2213 spin_lock_bh(&vsi->mac_filter_hash_lock);
2214 /* delete addresses from the list */
2215 for (i = 0; i < al->num_elements; i++)
2216 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2217 ret = I40E_ERR_INVALID_MAC_ADDR;
2218 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2224 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2226 /* program the updated filter list */
2227 ret = i40e_sync_vsi_filters(vsi);
2229 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2233 /* send the response to the VF */
2234 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2239 * i40e_vc_add_vlan_msg
2240 * @vf: pointer to the VF info
2241 * @msg: pointer to the msg buffer
2242 * @msglen: msg length
2244 * program guest vlan id
2246 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2248 struct virtchnl_vlan_filter_list *vfl =
2249 (struct virtchnl_vlan_filter_list *)msg;
2250 struct i40e_pf *pf = vf->pf;
2251 struct i40e_vsi *vsi = NULL;
2252 u16 vsi_id = vfl->vsi_id;
2253 i40e_status aq_ret = 0;
2256 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2257 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2258 dev_err(&pf->pdev->dev,
2259 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2262 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2263 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2264 aq_ret = I40E_ERR_PARAM;
2268 for (i = 0; i < vfl->num_elements; i++) {
2269 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2270 aq_ret = I40E_ERR_PARAM;
2271 dev_err(&pf->pdev->dev,
2272 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2276 vsi = pf->vsi[vf->lan_vsi_idx];
2277 if (vsi->info.pvid) {
2278 aq_ret = I40E_ERR_PARAM;
2282 i40e_vlan_stripping_enable(vsi);
2283 for (i = 0; i < vfl->num_elements; i++) {
2284 /* add new VLAN filter */
2285 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2289 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2290 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2294 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2295 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2301 dev_err(&pf->pdev->dev,
2302 "Unable to add VLAN filter %d for VF %d, error %d\n",
2303 vfl->vlan_id[i], vf->vf_id, ret);
2307 /* send the response to the VF */
2308 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2312 * i40e_vc_remove_vlan_msg
2313 * @vf: pointer to the VF info
2314 * @msg: pointer to the msg buffer
2315 * @msglen: msg length
2317 * remove programmed guest vlan id
2319 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2321 struct virtchnl_vlan_filter_list *vfl =
2322 (struct virtchnl_vlan_filter_list *)msg;
2323 struct i40e_pf *pf = vf->pf;
2324 struct i40e_vsi *vsi = NULL;
2325 u16 vsi_id = vfl->vsi_id;
2326 i40e_status aq_ret = 0;
2329 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2330 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2331 aq_ret = I40E_ERR_PARAM;
2335 for (i = 0; i < vfl->num_elements; i++) {
2336 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2337 aq_ret = I40E_ERR_PARAM;
2342 vsi = pf->vsi[vf->lan_vsi_idx];
2343 if (vsi->info.pvid) {
2344 aq_ret = I40E_ERR_PARAM;
2348 for (i = 0; i < vfl->num_elements; i++) {
2349 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2352 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2353 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2357 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2358 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2365 /* send the response to the VF */
2366 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2371 * @vf: pointer to the VF info
2372 * @msg: pointer to the msg buffer
2373 * @msglen: msg length
2375 * called from the VF for the iwarp msgs
2377 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2379 struct i40e_pf *pf = vf->pf;
2380 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2381 i40e_status aq_ret = 0;
2383 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2384 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2385 aq_ret = I40E_ERR_PARAM;
2389 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2393 /* send the response to the VF */
2394 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2399 * i40e_vc_iwarp_qvmap_msg
2400 * @vf: pointer to the VF info
2401 * @msg: pointer to the msg buffer
2402 * @msglen: msg length
2403 * @config: config qvmap or release it
2405 * called from the VF for the iwarp msgs
2407 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2410 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2411 (struct virtchnl_iwarp_qvlist_info *)msg;
2412 i40e_status aq_ret = 0;
2414 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2415 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2416 aq_ret = I40E_ERR_PARAM;
2421 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2422 aq_ret = I40E_ERR_PARAM;
2424 i40e_release_iwarp_qvlist(vf);
2428 /* send the response to the VF */
2429 return i40e_vc_send_resp_to_vf(vf,
2430 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2431 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2436 * i40e_vc_config_rss_key
2437 * @vf: pointer to the VF info
2438 * @msg: pointer to the msg buffer
2439 * @msglen: msg length
2441 * Configure the VF's RSS key
2443 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
2445 struct virtchnl_rss_key *vrk =
2446 (struct virtchnl_rss_key *)msg;
2447 struct i40e_pf *pf = vf->pf;
2448 struct i40e_vsi *vsi = NULL;
2449 u16 vsi_id = vrk->vsi_id;
2450 i40e_status aq_ret = 0;
2452 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2453 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2454 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2455 aq_ret = I40E_ERR_PARAM;
2459 vsi = pf->vsi[vf->lan_vsi_idx];
2460 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2462 /* send the response to the VF */
2463 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2468 * i40e_vc_config_rss_lut
2469 * @vf: pointer to the VF info
2470 * @msg: pointer to the msg buffer
2471 * @msglen: msg length
2473 * Configure the VF's RSS LUT
2475 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
2477 struct virtchnl_rss_lut *vrl =
2478 (struct virtchnl_rss_lut *)msg;
2479 struct i40e_pf *pf = vf->pf;
2480 struct i40e_vsi *vsi = NULL;
2481 u16 vsi_id = vrl->vsi_id;
2482 i40e_status aq_ret = 0;
2484 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2485 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2486 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2487 aq_ret = I40E_ERR_PARAM;
2491 vsi = pf->vsi[vf->lan_vsi_idx];
2492 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2493 /* send the response to the VF */
2495 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2500 * i40e_vc_get_rss_hena
2501 * @vf: pointer to the VF info
2502 * @msg: pointer to the msg buffer
2503 * @msglen: msg length
2505 * Return the RSS HENA bits allowed by the hardware
2507 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2509 struct virtchnl_rss_hena *vrh = NULL;
2510 struct i40e_pf *pf = vf->pf;
2511 i40e_status aq_ret = 0;
2514 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2515 aq_ret = I40E_ERR_PARAM;
2518 len = sizeof(struct virtchnl_rss_hena);
2520 vrh = kzalloc(len, GFP_KERNEL);
2522 aq_ret = I40E_ERR_NO_MEMORY;
2526 vrh->hena = i40e_pf_get_default_rss_hena(pf);
2528 /* send the response back to the VF */
2529 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
2530 aq_ret, (u8 *)vrh, len);
2536 * i40e_vc_set_rss_hena
2537 * @vf: pointer to the VF info
2538 * @msg: pointer to the msg buffer
2539 * @msglen: msg length
2541 * Set the RSS HENA bits for the VF
2543 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2545 struct virtchnl_rss_hena *vrh =
2546 (struct virtchnl_rss_hena *)msg;
2547 struct i40e_pf *pf = vf->pf;
2548 struct i40e_hw *hw = &pf->hw;
2549 i40e_status aq_ret = 0;
2551 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2552 aq_ret = I40E_ERR_PARAM;
2555 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
2556 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
2557 (u32)(vrh->hena >> 32));
2559 /* send the response to the VF */
2561 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
2565 * i40e_vc_enable_vlan_stripping
2566 * @vf: pointer to the VF info
2567 * @msg: pointer to the msg buffer
2568 * @msglen: msg length
2570 * Enable vlan header stripping for the VF
2572 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2575 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2576 i40e_status aq_ret = 0;
2578 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2579 aq_ret = I40E_ERR_PARAM;
2583 i40e_vlan_stripping_enable(vsi);
2585 /* send the response to the VF */
2587 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2592 * i40e_vc_disable_vlan_stripping
2593 * @vf: pointer to the VF info
2594 * @msg: pointer to the msg buffer
2595 * @msglen: msg length
2597 * Disable vlan header stripping for the VF
2599 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2602 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2603 i40e_status aq_ret = 0;
2605 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2606 aq_ret = I40E_ERR_PARAM;
2610 i40e_vlan_stripping_disable(vsi);
2612 /* send the response to the VF */
2614 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
2619 * i40e_vc_process_vf_msg
2620 * @pf: pointer to the PF structure
2621 * @vf_id: source VF id
2622 * @msg: pointer to the msg buffer
2623 * @msglen: msg length
2624 * @msghndl: msg handle
2626 * called from the common aeq/arq handler to
2627 * process request from VF
2629 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
2630 u32 v_retval, u8 *msg, u16 msglen)
2632 struct i40e_hw *hw = &pf->hw;
2633 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
2637 pf->vf_aq_requests++;
2638 if (local_vf_id >= pf->num_alloc_vfs)
2640 vf = &(pf->vf[local_vf_id]);
2642 /* Check if VF is disabled. */
2643 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
2644 return I40E_ERR_PARAM;
2646 /* perform basic checks on the msg */
2647 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
2649 /* perform additional checks specific to this driver */
2650 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
2651 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
2653 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
2655 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
2656 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2658 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
2663 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
2664 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
2665 local_vf_id, v_opcode, msglen);
2667 case VIRTCHNL_ERR_PARAM:
2675 case VIRTCHNL_OP_VERSION:
2676 ret = i40e_vc_get_version_msg(vf, msg);
2678 case VIRTCHNL_OP_GET_VF_RESOURCES:
2679 ret = i40e_vc_get_vf_resources_msg(vf, msg);
2681 case VIRTCHNL_OP_RESET_VF:
2682 i40e_vc_reset_vf_msg(vf);
2685 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
2686 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
2688 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
2689 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
2691 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
2692 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
2694 case VIRTCHNL_OP_ENABLE_QUEUES:
2695 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
2696 i40e_vc_notify_vf_link_state(vf);
2698 case VIRTCHNL_OP_DISABLE_QUEUES:
2699 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
2701 case VIRTCHNL_OP_ADD_ETH_ADDR:
2702 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
2704 case VIRTCHNL_OP_DEL_ETH_ADDR:
2705 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
2707 case VIRTCHNL_OP_ADD_VLAN:
2708 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
2710 case VIRTCHNL_OP_DEL_VLAN:
2711 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
2713 case VIRTCHNL_OP_GET_STATS:
2714 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
2716 case VIRTCHNL_OP_IWARP:
2717 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
2719 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
2720 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
2722 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
2723 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
2725 case VIRTCHNL_OP_CONFIG_RSS_KEY:
2726 ret = i40e_vc_config_rss_key(vf, msg, msglen);
2728 case VIRTCHNL_OP_CONFIG_RSS_LUT:
2729 ret = i40e_vc_config_rss_lut(vf, msg, msglen);
2731 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
2732 ret = i40e_vc_get_rss_hena(vf, msg, msglen);
2734 case VIRTCHNL_OP_SET_RSS_HENA:
2735 ret = i40e_vc_set_rss_hena(vf, msg, msglen);
2737 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
2738 ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
2740 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
2741 ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
2744 case VIRTCHNL_OP_UNKNOWN:
2746 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
2747 v_opcode, local_vf_id);
2748 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
2749 I40E_ERR_NOT_IMPLEMENTED);
2757 * i40e_vc_process_vflr_event
2758 * @pf: pointer to the PF structure
2760 * called from the vlfr irq handler to
2761 * free up VF resources and state variables
2763 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
2765 struct i40e_hw *hw = &pf->hw;
2766 u32 reg, reg_idx, bit_idx;
2770 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
2773 /* Re-enable the VFLR interrupt cause here, before looking for which
2774 * VF got reset. Otherwise, if another VF gets a reset while the
2775 * first one is being processed, that interrupt will be lost, and
2776 * that VF will be stuck in reset forever.
2778 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
2779 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
2780 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
2783 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
2784 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
2785 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2786 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2787 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
2788 vf = &pf->vf[vf_id];
2789 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
2790 if (reg & BIT(bit_idx))
2791 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
2792 i40e_reset_vf(vf, true);
2799 * i40e_ndo_set_vf_mac
2800 * @netdev: network interface device structure
2801 * @vf_id: VF identifier
2804 * program VF mac address
2806 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
2808 struct i40e_netdev_priv *np = netdev_priv(netdev);
2809 struct i40e_vsi *vsi = np->vsi;
2810 struct i40e_pf *pf = vsi->back;
2811 struct i40e_mac_filter *f;
2814 struct hlist_node *h;
2818 /* validate the request */
2819 if (vf_id >= pf->num_alloc_vfs) {
2820 dev_err(&pf->pdev->dev,
2821 "Invalid VF Identifier %d\n", vf_id);
2826 vf = &(pf->vf[vf_id]);
2827 vsi = pf->vsi[vf->lan_vsi_idx];
2829 /* When the VF is resetting wait until it is done.
2830 * It can take up to 200 milliseconds,
2831 * but wait for up to 300 milliseconds to be safe.
2833 for (i = 0; i < 15; i++) {
2834 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
2838 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
2839 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
2845 if (is_multicast_ether_addr(mac)) {
2846 dev_err(&pf->pdev->dev,
2847 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
2852 /* Lock once because below invoked function add/del_filter requires
2853 * mac_filter_hash_lock to be held
2855 spin_lock_bh(&vsi->mac_filter_hash_lock);
2857 /* delete the temporary mac address */
2858 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
2859 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
2861 /* Delete all the filters for this VSI - we're going to kill it
2864 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
2865 __i40e_del_filter(vsi, f);
2867 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2869 /* program mac filter */
2870 if (i40e_sync_vsi_filters(vsi)) {
2871 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
2875 ether_addr_copy(vf->default_lan_addr.addr, mac);
2877 if (is_zero_ether_addr(mac)) {
2878 vf->pf_set_mac = false;
2879 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
2881 vf->pf_set_mac = true;
2882 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
2886 /* Force the VF driver stop so it has to reload with new MAC address */
2887 i40e_vc_disable_vf(pf, vf);
2888 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
2895 * i40e_ndo_set_vf_port_vlan
2896 * @netdev: network interface device structure
2897 * @vf_id: VF identifier
2898 * @vlan_id: mac address
2899 * @qos: priority setting
2900 * @vlan_proto: vlan protocol
2902 * program VF vlan id and/or qos
2904 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
2905 u16 vlan_id, u8 qos, __be16 vlan_proto)
2907 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
2908 struct i40e_netdev_priv *np = netdev_priv(netdev);
2909 struct i40e_pf *pf = np->vsi->back;
2910 struct i40e_vsi *vsi;
2914 /* validate the request */
2915 if (vf_id >= pf->num_alloc_vfs) {
2916 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2921 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
2922 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
2927 if (vlan_proto != htons(ETH_P_8021Q)) {
2928 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
2929 ret = -EPROTONOSUPPORT;
2933 vf = &(pf->vf[vf_id]);
2934 vsi = pf->vsi[vf->lan_vsi_idx];
2935 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
2936 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
2942 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
2943 /* duplicate request, so just return success */
2946 /* Locked once because multiple functions below iterate list */
2947 spin_lock_bh(&vsi->mac_filter_hash_lock);
2949 if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) {
2950 dev_err(&pf->pdev->dev,
2951 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
2953 /* Administrator Error - knock the VF offline until he does
2954 * the right thing by reconfiguring his network correctly
2955 * and then reloading the VF driver.
2957 i40e_vc_disable_vf(pf, vf);
2958 /* During reset the VF got a new VSI, so refresh the pointer. */
2959 vsi = pf->vsi[vf->lan_vsi_idx];
2962 /* Check for condition where there was already a port VLAN ID
2963 * filter set and now it is being deleted by setting it to zero.
2964 * Additionally check for the condition where there was a port
2965 * VLAN but now there is a new and different port VLAN being set.
2966 * Before deleting all the old VLAN filters we must add new ones
2967 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
2968 * MAC addresses deleted.
2970 if ((!(vlan_id || qos) ||
2971 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
2973 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
2975 dev_info(&vsi->back->pdev->dev,
2976 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
2977 vsi->back->hw.aq.asq_last_status);
2978 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2983 if (vsi->info.pvid) {
2984 /* remove all filters on the old VLAN */
2985 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
2989 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2991 ret = i40e_vsi_add_pvid(vsi, vlanprio);
2993 i40e_vsi_remove_pvid(vsi);
2994 spin_lock_bh(&vsi->mac_filter_hash_lock);
2997 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
2998 vlan_id, qos, vf_id);
3000 /* add new VLAN filter for each MAC */
3001 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
3003 dev_info(&vsi->back->pdev->dev,
3004 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
3005 vsi->back->hw.aq.asq_last_status);
3006 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3010 /* remove the previously added non-VLAN MAC filters */
3011 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
3014 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3016 /* Schedule the worker thread to take care of applying changes */
3017 i40e_service_event_schedule(vsi->back);
3020 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
3024 /* The Port VLAN needs to be saved across resets the same as the
3025 * default LAN MAC address.
3027 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
3034 #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
3035 #define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
3037 * i40e_ndo_set_vf_bw
3038 * @netdev: network interface device structure
3039 * @vf_id: VF identifier
3042 * configure VF Tx rate
3044 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
3047 struct i40e_netdev_priv *np = netdev_priv(netdev);
3048 struct i40e_pf *pf = np->vsi->back;
3049 struct i40e_vsi *vsi;
3054 /* validate the request */
3055 if (vf_id >= pf->num_alloc_vfs) {
3056 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
3062 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
3063 min_tx_rate, vf_id);
3067 vf = &(pf->vf[vf_id]);
3068 vsi = pf->vsi[vf->lan_vsi_idx];
3069 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3070 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3076 switch (pf->hw.phy.link_info.link_speed) {
3077 case I40E_LINK_SPEED_40GB:
3080 case I40E_LINK_SPEED_25GB:
3083 case I40E_LINK_SPEED_20GB:
3086 case I40E_LINK_SPEED_10GB:
3089 case I40E_LINK_SPEED_1GB:
3096 if (max_tx_rate > speed) {
3097 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n",
3098 max_tx_rate, vf->vf_id);
3103 if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
3104 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
3108 /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
3109 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
3110 max_tx_rate / I40E_BW_CREDIT_DIVISOR,
3111 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
3113 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
3118 vf->tx_rate = max_tx_rate;
3124 * i40e_ndo_get_vf_config
3125 * @netdev: network interface device structure
3126 * @vf_id: VF identifier
3127 * @ivi: VF configuration structure
3129 * return VF configuration
3131 int i40e_ndo_get_vf_config(struct net_device *netdev,
3132 int vf_id, struct ifla_vf_info *ivi)
3134 struct i40e_netdev_priv *np = netdev_priv(netdev);
3135 struct i40e_vsi *vsi = np->vsi;
3136 struct i40e_pf *pf = vsi->back;
3140 /* validate the request */
3141 if (vf_id >= pf->num_alloc_vfs) {
3142 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3147 vf = &(pf->vf[vf_id]);
3148 /* first vsi is always the LAN vsi */
3149 vsi = pf->vsi[vf->lan_vsi_idx];
3150 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3151 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3159 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
3161 ivi->max_tx_rate = vf->tx_rate;
3162 ivi->min_tx_rate = 0;
3163 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
3164 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
3165 I40E_VLAN_PRIORITY_SHIFT;
3166 if (vf->link_forced == false)
3167 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3168 else if (vf->link_up == true)
3169 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3171 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3172 ivi->spoofchk = vf->spoofchk;
3173 ivi->trusted = vf->trusted;
3181 * i40e_ndo_set_vf_link_state
3182 * @netdev: network interface device structure
3183 * @vf_id: VF identifier
3184 * @link: required link state
3186 * Set the link state of a specified VF, regardless of physical link state
3188 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
3190 struct i40e_netdev_priv *np = netdev_priv(netdev);
3191 struct i40e_pf *pf = np->vsi->back;
3192 struct virtchnl_pf_event pfe;
3193 struct i40e_hw *hw = &pf->hw;
3198 /* validate the request */
3199 if (vf_id >= pf->num_alloc_vfs) {
3200 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3205 vf = &pf->vf[vf_id];
3206 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
3208 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
3209 pfe.severity = PF_EVENT_SEVERITY_INFO;
3212 case IFLA_VF_LINK_STATE_AUTO:
3213 vf->link_forced = false;
3214 pfe.event_data.link_event.link_status =
3215 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
3216 pfe.event_data.link_event.link_speed =
3217 (enum virtchnl_link_speed)
3218 pf->hw.phy.link_info.link_speed;
3220 case IFLA_VF_LINK_STATE_ENABLE:
3221 vf->link_forced = true;
3223 pfe.event_data.link_event.link_status = true;
3224 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
3226 case IFLA_VF_LINK_STATE_DISABLE:
3227 vf->link_forced = true;
3228 vf->link_up = false;
3229 pfe.event_data.link_event.link_status = false;
3230 pfe.event_data.link_event.link_speed = 0;
3236 /* Notify the VF of its new link state */
3237 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
3238 0, (u8 *)&pfe, sizeof(pfe), NULL);
3245 * i40e_ndo_set_vf_spoofchk
3246 * @netdev: network interface device structure
3247 * @vf_id: VF identifier
3248 * @enable: flag to enable or disable feature
3250 * Enable or disable VF spoof checking
3252 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
3254 struct i40e_netdev_priv *np = netdev_priv(netdev);
3255 struct i40e_vsi *vsi = np->vsi;
3256 struct i40e_pf *pf = vsi->back;
3257 struct i40e_vsi_context ctxt;
3258 struct i40e_hw *hw = &pf->hw;
3262 /* validate the request */
3263 if (vf_id >= pf->num_alloc_vfs) {
3264 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3269 vf = &(pf->vf[vf_id]);
3270 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3271 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3277 if (enable == vf->spoofchk)
3280 vf->spoofchk = enable;
3281 memset(&ctxt, 0, sizeof(ctxt));
3282 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
3283 ctxt.pf_num = pf->hw.pf_id;
3284 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
3286 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
3287 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
3288 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
3290 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
3299 * i40e_ndo_set_vf_trust
3300 * @netdev: network interface device structure of the pf
3301 * @vf_id: VF identifier
3302 * @setting: trust setting
3304 * Enable or disable VF trust setting
3306 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
3308 struct i40e_netdev_priv *np = netdev_priv(netdev);
3309 struct i40e_pf *pf = np->vsi->back;
3313 /* validate the request */
3314 if (vf_id >= pf->num_alloc_vfs) {
3315 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3319 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
3320 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
3324 vf = &pf->vf[vf_id];
3328 if (setting == vf->trusted)
3331 vf->trusted = setting;
3332 i40e_vc_notify_vf_reset(vf);
3333 i40e_reset_vf(vf, false);
3334 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
3335 vf_id, setting ? "" : "un");