1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
6 /*********************notification routines***********************/
10 * @pf: pointer to the PF structure
11 * @v_opcode: operation code
12 * @v_retval: return value
13 * @msg: pointer to the msg buffer
16 * send a message to all VFs on a given PF
18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 /* Not all vfs are enabled so skip the ones that are not */
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34 /* Ignore return value on purpose - a given VF may fail, but
35 * we need to keep going and send to all of them
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
43 * i40e_vc_notify_vf_link_state
44 * @vf: pointer to the VF structure
46 * send a link status message to a single VF
48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
50 struct virtchnl_pf_event pfe;
51 struct i40e_pf *pf = vf->pf;
52 struct i40e_hw *hw = &pf->hw;
53 struct i40e_link_status *ls = &pf->hw.phy.link_info;
54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
57 pfe.severity = PF_EVENT_SEVERITY_INFO;
58 if (vf->link_forced) {
59 pfe.event_data.link_event.link_status = vf->link_up;
60 pfe.event_data.link_event.link_speed =
61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
63 pfe.event_data.link_event.link_status =
64 ls->link_info & I40E_AQ_LINK_UP;
65 pfe.event_data.link_event.link_speed =
66 i40e_virtchnl_link_speed(ls->link_speed);
68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
69 0, (u8 *)&pfe, sizeof(pfe), NULL);
73 * i40e_vc_notify_link_state
74 * @pf: pointer to the PF structure
76 * send a link status message to all VFs on a given PF
78 void i40e_vc_notify_link_state(struct i40e_pf *pf)
82 for (i = 0; i < pf->num_alloc_vfs; i++)
83 i40e_vc_notify_vf_link_state(&pf->vf[i]);
87 * i40e_vc_notify_reset
88 * @pf: pointer to the PF structure
90 * indicate a pending reset to all VFs on a given PF
92 void i40e_vc_notify_reset(struct i40e_pf *pf)
94 struct virtchnl_pf_event pfe;
96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
103 * i40e_vc_notify_vf_reset
104 * @vf: pointer to the VF structure
106 * indicate a pending reset to the given VF
108 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
110 struct virtchnl_pf_event pfe;
113 /* validate the request */
114 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
117 /* verify if the VF is in either init or active before proceeding */
118 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
119 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
122 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
124 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
125 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
126 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
128 sizeof(struct virtchnl_pf_event), NULL);
130 /***********************misc routines*****************************/
134 * @vf: pointer to the VF info
136 * Disable the VF through a SW reset.
138 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
140 struct i40e_pf *pf = vf->pf;
143 i40e_vc_notify_vf_reset(vf);
145 /* We want to ensure that an actual reset occurs initiated after this
146 * function was called. However, we do not want to wait forever, so
147 * we'll give a reasonable time and print a message if we failed to
150 for (i = 0; i < 20; i++) {
151 /* If PF is in VFs releasing state reset VF is impossible,
154 if (test_bit(__I40E_VFS_RELEASING, pf->state))
156 if (i40e_reset_vf(vf, false))
158 usleep_range(10000, 20000);
161 dev_warn(&vf->pf->pdev->dev,
162 "Failed to initiate reset for VF %d after 200 milliseconds\n",
167 * i40e_vc_isvalid_vsi_id
168 * @vf: pointer to the VF info
169 * @vsi_id: VF relative VSI id
171 * check for the valid VSI id
173 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
175 struct i40e_pf *pf = vf->pf;
176 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
178 return (vsi && (vsi->vf_id == vf->vf_id));
182 * i40e_vc_isvalid_queue_id
183 * @vf: pointer to the VF info
185 * @qid: vsi relative queue id
187 * check for the valid queue id
189 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
192 struct i40e_pf *pf = vf->pf;
193 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
195 return (vsi && (qid < vsi->alloc_queue_pairs));
199 * i40e_vc_isvalid_vector_id
200 * @vf: pointer to the VF info
201 * @vector_id: VF relative vector id
203 * check for the valid vector id
205 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
207 struct i40e_pf *pf = vf->pf;
209 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
212 /***********************vf resource mgmt routines*****************/
215 * i40e_vc_get_pf_queue_id
216 * @vf: pointer to the VF info
217 * @vsi_id: id of VSI as provided by the FW
218 * @vsi_queue_id: vsi relative queue id
220 * return PF relative queue id
222 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
225 struct i40e_pf *pf = vf->pf;
226 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
227 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
232 if (le16_to_cpu(vsi->info.mapping_flags) &
233 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
235 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
237 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
244 * i40e_get_real_pf_qid
245 * @vf: pointer to the VF info
247 * @queue_id: queue number
249 * wrapper function to get pf_queue_id handling ADq code as well
251 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
255 if (vf->adq_enabled) {
256 /* Although VF considers all the queues(can be 1 to 16) as its
257 * own but they may actually belong to different VSIs(up to 4).
258 * We need to find which queues belongs to which VSI.
260 for (i = 0; i < vf->num_tc; i++) {
261 if (queue_id < vf->ch[i].num_qps) {
262 vsi_id = vf->ch[i].vsi_id;
265 /* find right queue id which is relative to a
268 queue_id -= vf->ch[i].num_qps;
272 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
276 * i40e_config_irq_link_list
277 * @vf: pointer to the VF info
278 * @vsi_id: id of VSI as given by the FW
279 * @vecmap: irq map info
281 * configure irq link list from the map
283 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
284 struct virtchnl_vector_map *vecmap)
286 unsigned long linklistmap = 0, tempmap;
287 struct i40e_pf *pf = vf->pf;
288 struct i40e_hw *hw = &pf->hw;
289 u16 vsi_queue_id, pf_queue_id;
290 enum i40e_queue_type qtype;
291 u16 next_q, vector_id, size;
295 vector_id = vecmap->vector_id;
298 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
300 reg_idx = I40E_VPINT_LNKLSTN(
301 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
304 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
305 /* Special case - No queues mapped on this vector */
306 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
309 tempmap = vecmap->rxq_map;
310 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
311 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
315 tempmap = vecmap->txq_map;
316 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
317 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
321 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
322 next_q = find_first_bit(&linklistmap, size);
323 if (unlikely(next_q == size))
326 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
327 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
328 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
329 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
331 wr32(hw, reg_idx, reg);
333 while (next_q < size) {
335 case I40E_QUEUE_TYPE_RX:
336 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
337 itr_idx = vecmap->rxitr_idx;
339 case I40E_QUEUE_TYPE_TX:
340 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
341 itr_idx = vecmap->txitr_idx;
347 next_q = find_next_bit(&linklistmap, size, next_q + 1);
349 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
350 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
351 pf_queue_id = i40e_get_real_pf_qid(vf,
355 pf_queue_id = I40E_QUEUE_END_OF_LIST;
359 /* format for the RQCTL & TQCTL regs is same */
361 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
362 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
363 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
364 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
365 wr32(hw, reg_idx, reg);
368 /* if the vf is running in polling mode and using interrupt zero,
369 * need to disable auto-mask on enabling zero interrupt for VFs.
371 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
373 reg = rd32(hw, I40E_GLINT_CTL);
374 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
375 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
376 wr32(hw, I40E_GLINT_CTL, reg);
385 * i40e_release_iwarp_qvlist
386 * @vf: pointer to the VF.
389 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
391 struct i40e_pf *pf = vf->pf;
392 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
396 if (!vf->qvlist_info)
399 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
400 for (i = 0; i < qvlist_info->num_vectors; i++) {
401 struct virtchnl_iwarp_qv_info *qv_info;
402 u32 next_q_index, next_q_type;
403 struct i40e_hw *hw = &pf->hw;
404 u32 v_idx, reg_idx, reg;
406 qv_info = &qvlist_info->qv_info[i];
409 v_idx = qv_info->v_idx;
410 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
411 /* Figure out the queue after CEQ and make that the
414 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
415 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
416 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
417 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
418 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
419 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
421 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
422 reg = (next_q_index &
423 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
425 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
427 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
430 kfree(vf->qvlist_info);
431 vf->qvlist_info = NULL;
435 * i40e_config_iwarp_qvlist
436 * @vf: pointer to the VF info
437 * @qvlist_info: queue and vector list
439 * Return 0 on success or < 0 on error
441 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
442 struct virtchnl_iwarp_qvlist_info *qvlist_info)
444 struct i40e_pf *pf = vf->pf;
445 struct i40e_hw *hw = &pf->hw;
446 struct virtchnl_iwarp_qv_info *qv_info;
447 u32 v_idx, i, reg_idx, reg;
448 u32 next_q_idx, next_q_type;
452 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
454 if (qvlist_info->num_vectors > msix_vf) {
455 dev_warn(&pf->pdev->dev,
456 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
457 qvlist_info->num_vectors,
463 size = sizeof(struct virtchnl_iwarp_qvlist_info) +
464 (sizeof(struct virtchnl_iwarp_qv_info) *
465 (qvlist_info->num_vectors - 1));
466 kfree(vf->qvlist_info);
467 vf->qvlist_info = kzalloc(size, GFP_KERNEL);
468 if (!vf->qvlist_info) {
472 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
474 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
475 for (i = 0; i < qvlist_info->num_vectors; i++) {
476 qv_info = &qvlist_info->qv_info[i];
479 v_idx = qv_info->v_idx;
481 /* Validate vector id belongs to this vf */
482 if (!i40e_vc_isvalid_vector_id(vf, v_idx)) {
487 vf->qvlist_info->qv_info[i] = *qv_info;
489 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
490 /* We might be sharing the interrupt, so get the first queue
491 * index and type, push it down the list by adding the new
492 * queue on top. Also link it with the new queue in CEQCTL.
494 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
495 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
496 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
497 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
498 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
500 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
501 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
502 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
503 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
504 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
505 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
506 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
507 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
509 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
510 reg = (qv_info->ceq_idx &
511 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
512 (I40E_QUEUE_TYPE_PE_CEQ <<
513 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
514 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
517 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
518 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
519 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
520 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
522 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
528 kfree(vf->qvlist_info);
529 vf->qvlist_info = NULL;
535 * i40e_config_vsi_tx_queue
536 * @vf: pointer to the VF info
537 * @vsi_id: id of VSI as provided by the FW
538 * @vsi_queue_id: vsi relative queue index
539 * @info: config. info
543 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
545 struct virtchnl_txq_info *info)
547 struct i40e_pf *pf = vf->pf;
548 struct i40e_hw *hw = &pf->hw;
549 struct i40e_hmc_obj_txq tx_ctx;
550 struct i40e_vsi *vsi;
555 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
559 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
560 vsi = i40e_find_vsi_from_id(pf, vsi_id);
566 /* clear the context structure first */
567 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
569 /* only set the required fields */
570 tx_ctx.base = info->dma_ring_addr / 128;
571 tx_ctx.qlen = info->ring_len;
572 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
573 tx_ctx.rdylist_act = 0;
574 tx_ctx.head_wb_ena = info->headwb_enabled;
575 tx_ctx.head_wb_addr = info->dma_headwb_addr;
577 /* clear the context in the HMC */
578 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
580 dev_err(&pf->pdev->dev,
581 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
587 /* set the context in the HMC */
588 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
590 dev_err(&pf->pdev->dev,
591 "Failed to set VF LAN Tx queue context %d error: %d\n",
597 /* associate this queue with the PCI VF function */
598 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
599 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
600 & I40E_QTX_CTL_PF_INDX_MASK);
601 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
602 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
603 & I40E_QTX_CTL_VFVM_INDX_MASK);
604 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
612 * i40e_config_vsi_rx_queue
613 * @vf: pointer to the VF info
614 * @vsi_id: id of VSI as provided by the FW
615 * @vsi_queue_id: vsi relative queue index
616 * @info: config. info
620 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
622 struct virtchnl_rxq_info *info)
624 struct i40e_pf *pf = vf->pf;
625 struct i40e_hw *hw = &pf->hw;
626 struct i40e_hmc_obj_rxq rx_ctx;
630 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
632 /* clear the context structure first */
633 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
635 /* only set the required fields */
636 rx_ctx.base = info->dma_ring_addr / 128;
637 rx_ctx.qlen = info->ring_len;
639 if (info->splithdr_enabled) {
640 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
642 I40E_RX_SPLIT_TCP_UDP |
644 /* header length validation */
645 if (info->hdr_size > ((2 * 1024) - 64)) {
649 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
651 /* set split mode 10b */
652 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
655 /* databuffer length validation */
656 if (info->databuffer_size > ((16 * 1024) - 128)) {
660 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
662 /* max pkt. length validation */
663 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
667 rx_ctx.rxmax = info->max_pkt_size;
669 /* enable 32bytes desc always */
673 rx_ctx.lrxqthresh = 1;
678 /* clear the context in the HMC */
679 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
681 dev_err(&pf->pdev->dev,
682 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
688 /* set the context in the HMC */
689 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
691 dev_err(&pf->pdev->dev,
692 "Failed to set VF LAN Rx queue context %d error: %d\n",
704 * @vf: pointer to the VF info
705 * @idx: VSI index, applies only for ADq mode, zero otherwise
707 * alloc VF vsi context & resources
709 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
711 struct i40e_mac_filter *f = NULL;
712 struct i40e_pf *pf = vf->pf;
713 struct i40e_vsi *vsi;
717 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
721 dev_err(&pf->pdev->dev,
722 "add vsi failed for VF %d, aq_err %d\n",
723 vf->vf_id, pf->hw.aq.asq_last_status);
725 goto error_alloc_vsi_res;
729 u64 hena = i40e_pf_get_default_rss_hena(pf);
730 u8 broadcast[ETH_ALEN];
732 vf->lan_vsi_idx = vsi->idx;
733 vf->lan_vsi_id = vsi->id;
734 /* If the port VLAN has been configured and then the
735 * VF driver was removed then the VSI port VLAN
736 * configuration was destroyed. Check if there is
737 * a port VLAN and restore the VSI configuration if
740 if (vf->port_vlan_id)
741 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
743 spin_lock_bh(&vsi->mac_filter_hash_lock);
744 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
745 f = i40e_add_mac_filter(vsi,
746 vf->default_lan_addr.addr);
748 dev_info(&pf->pdev->dev,
749 "Could not add MAC filter %pM for VF %d\n",
750 vf->default_lan_addr.addr, vf->vf_id);
752 eth_broadcast_addr(broadcast);
753 f = i40e_add_mac_filter(vsi, broadcast);
755 dev_info(&pf->pdev->dev,
756 "Could not allocate VF broadcast filter\n");
757 spin_unlock_bh(&vsi->mac_filter_hash_lock);
758 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
759 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
760 /* program mac filter only for VF VSI */
761 ret = i40e_sync_vsi_filters(vsi);
763 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
766 /* storing VSI index and id for ADq and don't apply the mac filter */
767 if (vf->adq_enabled) {
768 vf->ch[idx].vsi_idx = vsi->idx;
769 vf->ch[idx].vsi_id = vsi->id;
772 /* Set VF bandwidth if specified */
774 max_tx_rate = vf->tx_rate;
775 } else if (vf->ch[idx].max_tx_rate) {
776 max_tx_rate = vf->ch[idx].max_tx_rate;
780 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
781 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
782 max_tx_rate, 0, NULL);
784 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
793 * i40e_map_pf_queues_to_vsi
794 * @vf: pointer to the VF info
796 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
797 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
799 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
801 struct i40e_pf *pf = vf->pf;
802 struct i40e_hw *hw = &pf->hw;
803 u32 reg, num_tc = 1; /* VF has at least one traffic class */
810 for (i = 0; i < num_tc; i++) {
811 if (vf->adq_enabled) {
812 qps = vf->ch[i].num_qps;
813 vsi_id = vf->ch[i].vsi_id;
815 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
816 vsi_id = vf->lan_vsi_id;
819 for (j = 0; j < 7; j++) {
824 u16 qid = i40e_vc_get_pf_queue_id(vf,
828 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
832 i40e_write_rx_ctl(hw,
833 I40E_VSILAN_QTABLE(j, vsi_id),
840 * i40e_map_pf_to_vf_queues
841 * @vf: pointer to the VF info
843 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
844 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
846 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
848 struct i40e_pf *pf = vf->pf;
849 struct i40e_hw *hw = &pf->hw;
850 u32 reg, total_qps = 0;
851 u32 qps, num_tc = 1; /* VF has at least one traffic class */
858 for (i = 0; i < num_tc; i++) {
859 if (vf->adq_enabled) {
860 qps = vf->ch[i].num_qps;
861 vsi_id = vf->ch[i].vsi_id;
863 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
864 vsi_id = vf->lan_vsi_id;
867 for (j = 0; j < qps; j++) {
868 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
870 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
871 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
879 * i40e_enable_vf_mappings
880 * @vf: pointer to the VF info
884 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
886 struct i40e_pf *pf = vf->pf;
887 struct i40e_hw *hw = &pf->hw;
890 /* Tell the hardware we're using noncontiguous mapping. HW requires
891 * that VF queues be mapped using this method, even when they are
892 * contiguous in real life
894 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
895 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
897 /* enable VF vplan_qtable mappings */
898 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
899 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
901 i40e_map_pf_to_vf_queues(vf);
902 i40e_map_pf_queues_to_vsi(vf);
908 * i40e_disable_vf_mappings
909 * @vf: pointer to the VF info
911 * disable VF mappings
913 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
915 struct i40e_pf *pf = vf->pf;
916 struct i40e_hw *hw = &pf->hw;
919 /* disable qp mappings */
920 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
921 for (i = 0; i < I40E_MAX_VSI_QP; i++)
922 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
923 I40E_QUEUE_END_OF_LIST);
929 * @vf: pointer to the VF info
933 static void i40e_free_vf_res(struct i40e_vf *vf)
935 struct i40e_pf *pf = vf->pf;
936 struct i40e_hw *hw = &pf->hw;
940 /* Start by disabling VF's configuration API to prevent the OS from
941 * accessing the VF's VSI after it's freed / invalidated.
943 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
945 /* It's possible the VF had requeuested more queues than the default so
946 * do the accounting here when we're about to free them.
948 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
949 pf->queues_left += vf->num_queue_pairs -
950 I40E_DEFAULT_QUEUES_PER_VF;
953 /* free vsi & disconnect it from the parent uplink */
954 if (vf->lan_vsi_idx) {
955 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
961 /* do the accounting and remove additional ADq VSI's */
962 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
963 for (j = 0; j < vf->num_tc; j++) {
964 /* At this point VSI0 is already released so don't
965 * release it again and only clear their values in
966 * structure variables
969 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
970 vf->ch[j].vsi_idx = 0;
971 vf->ch[j].vsi_id = 0;
974 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
976 /* disable interrupts so the VF starts in a known state */
977 for (i = 0; i < msix_vf; i++) {
978 /* format is same for both registers */
980 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
982 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
985 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
989 /* clear the irq settings */
990 for (i = 0; i < msix_vf; i++) {
991 /* format is same for both registers */
993 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
995 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
998 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
999 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1000 wr32(hw, reg_idx, reg);
1003 /* reset some of the state variables keeping track of the resources */
1004 vf->num_queue_pairs = 0;
1005 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1006 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1011 * @vf: pointer to the VF info
1013 * allocate VF resources
1015 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1017 struct i40e_pf *pf = vf->pf;
1018 int total_queue_pairs = 0;
1021 if (vf->num_req_queues &&
1022 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1023 pf->num_vf_qps = vf->num_req_queues;
1025 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1027 /* allocate hw vsi context & associated resources */
1028 ret = i40e_alloc_vsi_res(vf, 0);
1031 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1033 /* allocate additional VSIs based on tc information for ADq */
1034 if (vf->adq_enabled) {
1035 if (pf->queues_left >=
1036 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1037 /* TC 0 always belongs to VF VSI */
1038 for (idx = 1; idx < vf->num_tc; idx++) {
1039 ret = i40e_alloc_vsi_res(vf, idx);
1043 /* send correct number of queues */
1044 total_queue_pairs = I40E_MAX_VF_QUEUES;
1046 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1048 vf->adq_enabled = false;
1052 /* We account for each VF to get a default number of queue pairs. If
1053 * the VF has now requested more, we need to account for that to make
1054 * certain we never request more queues than we actually have left in
1057 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1059 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1062 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1064 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1066 /* store the total qps number for the runtime
1069 vf->num_queue_pairs = total_queue_pairs;
1071 /* VF is now completely initialized */
1072 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1076 i40e_free_vf_res(vf);
1081 #define VF_DEVICE_STATUS 0xAA
1082 #define VF_TRANS_PENDING_MASK 0x20
1084 * i40e_quiesce_vf_pci
1085 * @vf: pointer to the VF structure
1087 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1088 * if the transactions never clear.
1090 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1092 struct i40e_pf *pf = vf->pf;
1093 struct i40e_hw *hw = &pf->hw;
1097 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1099 wr32(hw, I40E_PF_PCI_CIAA,
1100 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1101 for (i = 0; i < 100; i++) {
1102 reg = rd32(hw, I40E_PF_PCI_CIAD);
1103 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1111 * i40e_trigger_vf_reset
1112 * @vf: pointer to the VF structure
1113 * @flr: VFLR was issued or not
1115 * Trigger hardware to start a reset for a particular VF. Expects the caller
1116 * to wait the proper amount of time to allow hardware to reset the VF before
1117 * it cleans up and restores VF functionality.
1119 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1121 struct i40e_pf *pf = vf->pf;
1122 struct i40e_hw *hw = &pf->hw;
1123 u32 reg, reg_idx, bit_idx;
1126 clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1128 /* Disable VF's configuration API during reset. The flag is re-enabled
1129 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1130 * It's normally disabled in i40e_free_vf_res(), but it's safer
1131 * to do it earlier to give some time to finish to any VF config
1132 * functions that may still be running at this point.
1134 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1136 /* In the case of a VFLR, the HW has already reset the VF and we
1137 * just need to clean up, so don't hit the VFRTRIG register.
1140 /* reset VF using VPGEN_VFRTRIG reg */
1141 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1142 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1143 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1146 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1147 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1148 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1149 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1152 if (i40e_quiesce_vf_pci(vf))
1153 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1158 * i40e_cleanup_reset_vf
1159 * @vf: pointer to the VF structure
1161 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1162 * have verified whether the reset is finished properly, and ensure the
1163 * minimum amount of wait time has passed.
1165 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1167 struct i40e_pf *pf = vf->pf;
1168 struct i40e_hw *hw = &pf->hw;
1171 /* free VF resources to begin resetting the VSI state */
1172 i40e_free_vf_res(vf);
1174 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1175 * By doing this we allow HW to access VF memory at any point. If we
1176 * did it any sooner, HW could access memory while it was being freed
1177 * in i40e_free_vf_res(), causing an IOMMU fault.
1179 * On the other hand, this needs to be done ASAP, because the VF driver
1180 * is waiting for this to happen and may report a timeout. It's
1181 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1184 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1185 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1186 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1188 /* reallocate VF resources to finish resetting the VSI state */
1189 if (!i40e_alloc_vf_res(vf)) {
1190 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1191 i40e_enable_vf_mappings(vf);
1192 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1193 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1194 /* Do not notify the client during VF init */
1195 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1197 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1201 /* Tell the VF driver the reset is done. This needs to be done only
1202 * after VF has been fully initialized, because the VF driver may
1203 * request resources immediately after setting this flag.
1205 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1210 * @vf: pointer to the VF structure
1211 * @flr: VFLR was issued or not
1213 * Returns true if the VF is in reset, resets successfully, or resets
1214 * are disabled and false otherwise.
1216 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1218 struct i40e_pf *pf = vf->pf;
1219 struct i40e_hw *hw = &pf->hw;
1224 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1227 /* If the VFs have been disabled, this means something else is
1228 * resetting the VF, so we shouldn't continue.
1230 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1233 i40e_trigger_vf_reset(vf, flr);
1235 /* poll VPGEN_VFRSTAT reg to make sure
1236 * that reset is complete
1238 for (i = 0; i < 10; i++) {
1239 /* VF reset requires driver to first reset the VF and then
1240 * poll the status register to make sure that the reset
1241 * completed successfully. Due to internal HW FIFO flushes,
1242 * we must wait 10ms before the register will be valid.
1244 usleep_range(10000, 20000);
1245 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1246 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1253 usleep_range(10000, 20000);
1256 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1258 usleep_range(10000, 20000);
1260 /* On initial reset, we don't have any queues to disable */
1261 if (vf->lan_vsi_idx != 0)
1262 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1264 i40e_cleanup_reset_vf(vf);
1267 clear_bit(__I40E_VF_DISABLE, pf->state);
1273 * i40e_reset_all_vfs
1274 * @pf: pointer to the PF structure
1275 * @flr: VFLR was issued or not
1277 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1278 * VF, then do all the waiting in one chunk, and finally finish restoring each
1279 * VF after the wait. This is useful during PF routines which need to reset
1280 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1282 * Returns true if any VFs were reset, and false otherwise.
1284 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1286 struct i40e_hw *hw = &pf->hw;
1291 /* If we don't have any VFs, then there is nothing to reset */
1292 if (!pf->num_alloc_vfs)
1295 /* If VFs have been disabled, there is no need to reset */
1296 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1299 /* Begin reset on all VFs at once */
1300 for (v = 0; v < pf->num_alloc_vfs; v++)
1301 i40e_trigger_vf_reset(&pf->vf[v], flr);
1303 /* HW requires some time to make sure it can flush the FIFO for a VF
1304 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1305 * sequence to make sure that it has completed. We'll keep track of
1306 * the VFs using a simple iterator that increments once that VF has
1307 * finished resetting.
1309 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1310 usleep_range(10000, 20000);
1312 /* Check each VF in sequence, beginning with the VF to fail
1313 * the previous check.
1315 while (v < pf->num_alloc_vfs) {
1317 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1318 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1321 /* If the current VF has finished resetting, move on
1322 * to the next VF in sequence.
1329 usleep_range(10000, 20000);
1331 /* Display a warning if at least one VF didn't manage to reset in
1332 * time, but continue on with the operation.
1334 if (v < pf->num_alloc_vfs)
1335 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1337 usleep_range(10000, 20000);
1339 /* Begin disabling all the rings associated with VFs, but do not wait
1342 for (v = 0; v < pf->num_alloc_vfs; v++) {
1343 /* On initial reset, we don't have any queues to disable */
1344 if (pf->vf[v].lan_vsi_idx == 0)
1347 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]);
1350 /* Now that we've notified HW to disable all of the VF rings, wait
1351 * until they finish.
1353 for (v = 0; v < pf->num_alloc_vfs; v++) {
1354 /* On initial reset, we don't have any queues to disable */
1355 if (pf->vf[v].lan_vsi_idx == 0)
1358 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]);
1361 /* Hw may need up to 50ms to finish disabling the RX queues. We
1362 * minimize the wait by delaying only once for all VFs.
1366 /* Finish the reset on each VF */
1367 for (v = 0; v < pf->num_alloc_vfs; v++)
1368 i40e_cleanup_reset_vf(&pf->vf[v]);
1371 clear_bit(__I40E_VF_DISABLE, pf->state);
1378 * @pf: pointer to the PF structure
1382 void i40e_free_vfs(struct i40e_pf *pf)
1384 struct i40e_hw *hw = &pf->hw;
1385 u32 reg_idx, bit_idx;
1391 set_bit(__I40E_VFS_RELEASING, pf->state);
1392 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1393 usleep_range(1000, 2000);
1395 i40e_notify_client_of_vf_enable(pf, 0);
1397 /* Disable IOV before freeing resources. This lets any VF drivers
1398 * running in the host get themselves cleaned up before we yank
1399 * the carpet out from underneath their feet.
1401 if (!pci_vfs_assigned(pf->pdev))
1402 pci_disable_sriov(pf->pdev);
1404 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1406 /* Amortize wait time by stopping all VFs at the same time */
1407 for (i = 0; i < pf->num_alloc_vfs; i++) {
1408 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1411 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1414 for (i = 0; i < pf->num_alloc_vfs; i++) {
1415 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1418 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1421 /* free up VF resources */
1422 tmp = pf->num_alloc_vfs;
1423 pf->num_alloc_vfs = 0;
1424 for (i = 0; i < tmp; i++) {
1425 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1426 i40e_free_vf_res(&pf->vf[i]);
1427 /* disable qp mappings */
1428 i40e_disable_vf_mappings(&pf->vf[i]);
1434 /* This check is for when the driver is unloaded while VFs are
1435 * assigned. Setting the number of VFs to 0 through sysfs is caught
1436 * before this function ever gets called.
1438 if (!pci_vfs_assigned(pf->pdev)) {
1439 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1440 * work correctly when SR-IOV gets re-enabled.
1442 for (vf_id = 0; vf_id < tmp; vf_id++) {
1443 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1444 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1445 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1448 clear_bit(__I40E_VF_DISABLE, pf->state);
1449 clear_bit(__I40E_VFS_RELEASING, pf->state);
1452 #ifdef CONFIG_PCI_IOV
1455 * @pf: pointer to the PF structure
1456 * @num_alloc_vfs: number of VFs to allocate
1458 * allocate VF resources
1460 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1462 struct i40e_vf *vfs;
1465 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1466 i40e_irq_dynamic_disable_icr0(pf);
1468 /* Check to see if we're just allocating resources for extant VFs */
1469 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1470 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1472 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1473 pf->num_alloc_vfs = 0;
1477 /* allocate memory */
1478 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1485 /* apply default profile */
1486 for (i = 0; i < num_alloc_vfs; i++) {
1488 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1491 /* assign default capabilities */
1492 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1493 vfs[i].spoofchk = true;
1495 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1498 pf->num_alloc_vfs = num_alloc_vfs;
1500 /* VF resources get allocated during reset */
1501 i40e_reset_all_vfs(pf, false);
1503 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1509 /* Re-enable interrupt 0. */
1510 i40e_irq_dynamic_enable_icr0(pf);
1516 * i40e_pci_sriov_enable
1517 * @pdev: pointer to a pci_dev structure
1518 * @num_vfs: number of VFs to allocate
1520 * Enable or change the number of VFs
1522 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1524 #ifdef CONFIG_PCI_IOV
1525 struct i40e_pf *pf = pci_get_drvdata(pdev);
1526 int pre_existing_vfs = pci_num_vf(pdev);
1529 if (test_bit(__I40E_TESTING, pf->state)) {
1530 dev_warn(&pdev->dev,
1531 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1536 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1538 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1541 if (num_vfs > pf->num_req_vfs) {
1542 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1543 num_vfs, pf->num_req_vfs);
1548 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1549 err = i40e_alloc_vfs(pf, num_vfs);
1551 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1565 * i40e_pci_sriov_configure
1566 * @pdev: pointer to a pci_dev structure
1567 * @num_vfs: number of VFs to allocate
1569 * Enable or change the number of VFs. Called when the user updates the number
1572 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1574 struct i40e_pf *pf = pci_get_drvdata(pdev);
1577 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1578 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1579 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1581 return i40e_pci_sriov_enable(pdev, num_vfs);
1584 if (!pci_vfs_assigned(pf->pdev)) {
1586 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1587 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1589 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1595 /***********************virtual channel routines******************/
1598 * i40e_vc_send_msg_to_vf
1599 * @vf: pointer to the VF info
1600 * @v_opcode: virtual channel opcode
1601 * @v_retval: virtual channel return value
1602 * @msg: pointer to the msg buffer
1603 * @msglen: msg length
1607 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1608 u32 v_retval, u8 *msg, u16 msglen)
1615 /* validate the request */
1616 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1621 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1623 /* single place to detect unsuccessful return values */
1625 vf->num_invalid_msgs++;
1626 dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
1627 vf->vf_id, v_opcode, v_retval);
1628 if (vf->num_invalid_msgs >
1629 I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
1630 dev_err(&pf->pdev->dev,
1631 "Number of invalid messages exceeded for VF %d\n",
1633 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n");
1634 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1637 vf->num_valid_msgs++;
1638 /* reset the invalid counter, if a valid message is received. */
1639 vf->num_invalid_msgs = 0;
1642 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1645 dev_info(&pf->pdev->dev,
1646 "Unable to send the message to VF %d aq_err %d\n",
1647 vf->vf_id, pf->hw.aq.asq_last_status);
1655 * i40e_vc_send_resp_to_vf
1656 * @vf: pointer to the VF info
1657 * @opcode: operation code
1658 * @retval: return value
1660 * send resp msg to VF
1662 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1663 enum virtchnl_ops opcode,
1666 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1670 * i40e_vc_get_version_msg
1671 * @vf: pointer to the VF info
1672 * @msg: pointer to the msg buffer
1674 * called from the VF to request the API version used by the PF
1676 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
1678 struct virtchnl_version_info info = {
1679 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1682 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1683 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1684 if (VF_IS_V10(&vf->vf_ver))
1685 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1686 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1687 I40E_SUCCESS, (u8 *)&info,
1688 sizeof(struct virtchnl_version_info));
1692 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
1693 * @vf: pointer to VF structure
1695 static void i40e_del_qch(struct i40e_vf *vf)
1697 struct i40e_pf *pf = vf->pf;
1700 /* first element in the array belongs to primary VF VSI and we shouldn't
1701 * delete it. We should however delete the rest of the VSIs created
1703 for (i = 1; i < vf->num_tc; i++) {
1704 if (vf->ch[i].vsi_idx) {
1705 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
1706 vf->ch[i].vsi_idx = 0;
1707 vf->ch[i].vsi_id = 0;
1713 * i40e_vc_get_vf_resources_msg
1714 * @vf: pointer to the VF info
1715 * @msg: pointer to the msg buffer
1717 * called from the VF to request its resources
1719 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
1721 struct virtchnl_vf_resource *vfres = NULL;
1722 struct i40e_pf *pf = vf->pf;
1723 i40e_status aq_ret = 0;
1724 struct i40e_vsi *vsi;
1729 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
1730 aq_ret = I40E_ERR_PARAM;
1734 len = (sizeof(struct virtchnl_vf_resource) +
1735 sizeof(struct virtchnl_vsi_resource) * num_vsis);
1737 vfres = kzalloc(len, GFP_KERNEL);
1739 aq_ret = I40E_ERR_NO_MEMORY;
1743 if (VF_IS_V11(&vf->vf_ver))
1744 vf->driver_caps = *(u32 *)msg;
1746 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1747 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1748 VIRTCHNL_VF_OFFLOAD_VLAN;
1750 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1751 vsi = pf->vsi[vf->lan_vsi_idx];
1752 if (!vsi->info.pvid)
1753 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1755 if (i40e_vf_client_capable(pf, vf->vf_id) &&
1756 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
1757 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
1758 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1760 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
1763 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1764 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1766 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
1767 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
1768 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1770 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1773 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
1774 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1775 vfres->vf_cap_flags |=
1776 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1779 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1780 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1782 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
1783 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
1784 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1786 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
1787 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
1788 dev_err(&pf->pdev->dev,
1789 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
1791 aq_ret = I40E_ERR_PARAM;
1794 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1797 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
1798 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1799 vfres->vf_cap_flags |=
1800 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1803 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1804 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1806 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
1807 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
1809 vfres->num_vsis = num_vsis;
1810 vfres->num_queue_pairs = vf->num_queue_pairs;
1811 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
1812 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
1813 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
1815 if (vf->lan_vsi_idx) {
1816 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
1817 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1818 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
1819 /* VFs only use TC 0 */
1820 vfres->vsi_res[0].qset_handle
1821 = le16_to_cpu(vsi->info.qs_handle[0]);
1822 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1823 vf->default_lan_addr.addr);
1825 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1828 /* send the response back to the VF */
1829 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
1830 aq_ret, (u8 *)vfres, len);
1837 * i40e_vc_reset_vf_msg
1838 * @vf: pointer to the VF info
1840 * called from the VF to reset itself,
1841 * unlike other virtchnl messages, PF driver
1842 * doesn't send the response back to the VF
1844 static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
1846 if (test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
1847 i40e_reset_vf(vf, false);
1851 * i40e_getnum_vf_vsi_vlan_filters
1852 * @vsi: pointer to the vsi
1854 * called to get the number of VLANs offloaded on this VF
1856 static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1858 struct i40e_mac_filter *f;
1859 int num_vlans = 0, bkt;
1861 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1862 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1870 * i40e_vc_config_promiscuous_mode_msg
1871 * @vf: pointer to the VF info
1872 * @msg: pointer to the msg buffer
1873 * @msglen: msg length
1875 * called from the VF to configure the promiscuous mode of
1878 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
1879 u8 *msg, u16 msglen)
1881 struct virtchnl_promisc_info *info =
1882 (struct virtchnl_promisc_info *)msg;
1883 struct i40e_pf *pf = vf->pf;
1884 struct i40e_hw *hw = &pf->hw;
1885 struct i40e_mac_filter *f;
1886 i40e_status aq_ret = 0;
1887 bool allmulti = false;
1888 struct i40e_vsi *vsi;
1889 bool alluni = false;
1893 vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
1894 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
1895 !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
1897 aq_ret = I40E_ERR_PARAM;
1900 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
1901 dev_err(&pf->pdev->dev,
1902 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
1904 /* Lie to the VF on purpose. */
1908 /* Multicast promiscuous handling*/
1909 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
1912 if (vf->port_vlan_id) {
1913 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
1917 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1918 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1919 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1921 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
1926 aq_err = pf->hw.aq.asq_last_status;
1928 dev_err(&pf->pdev->dev,
1929 "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
1931 i40e_stat_str(&pf->hw, aq_ret),
1932 i40e_aq_str(&pf->hw, aq_err));
1937 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
1939 aq_err = pf->hw.aq.asq_last_status;
1941 dev_err(&pf->pdev->dev,
1942 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1944 i40e_stat_str(&pf->hw, aq_ret),
1945 i40e_aq_str(&pf->hw, aq_err));
1951 dev_info(&pf->pdev->dev,
1952 "VF %d successfully set multicast promiscuous mode\n",
1955 set_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1957 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1960 if (info->flags & FLAG_VF_UNICAST_PROMISC)
1962 if (vf->port_vlan_id) {
1963 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
1967 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1968 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1969 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1971 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
1976 aq_err = pf->hw.aq.asq_last_status;
1978 dev_err(&pf->pdev->dev,
1979 "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
1981 i40e_stat_str(&pf->hw, aq_ret),
1982 i40e_aq_str(&pf->hw, aq_err));
1985 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
1988 aq_err = pf->hw.aq.asq_last_status;
1990 dev_err(&pf->pdev->dev,
1991 "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
1992 vf->vf_id, info->flags,
1993 i40e_stat_str(&pf->hw, aq_ret),
1994 i40e_aq_str(&pf->hw, aq_err));
2000 dev_info(&pf->pdev->dev,
2001 "VF %d successfully set unicast promiscuous mode\n",
2004 set_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
2006 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
2010 /* send the response to the VF */
2011 return i40e_vc_send_resp_to_vf(vf,
2012 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2017 * i40e_vc_config_queues_msg
2018 * @vf: pointer to the VF info
2019 * @msg: pointer to the msg buffer
2020 * @msglen: msg length
2022 * called from the VF to configure the rx/tx
2025 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2027 struct virtchnl_vsi_queue_config_info *qci =
2028 (struct virtchnl_vsi_queue_config_info *)msg;
2029 struct virtchnl_queue_pair_info *qpi;
2030 struct i40e_pf *pf = vf->pf;
2031 u16 vsi_id, vsi_queue_id = 0;
2032 i40e_status aq_ret = 0;
2033 int i, j = 0, idx = 0;
2035 vsi_id = qci->vsi_id;
2037 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2038 aq_ret = I40E_ERR_PARAM;
2042 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2043 aq_ret = I40E_ERR_PARAM;
2047 for (i = 0; i < qci->num_queue_pairs; i++) {
2048 qpi = &qci->qpair[i];
2050 if (!vf->adq_enabled) {
2051 vsi_queue_id = qpi->txq.queue_id;
2053 if (qpi->txq.vsi_id != qci->vsi_id ||
2054 qpi->rxq.vsi_id != qci->vsi_id ||
2055 qpi->rxq.queue_id != vsi_queue_id) {
2056 aq_ret = I40E_ERR_PARAM;
2061 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
2062 aq_ret = I40E_ERR_PARAM;
2066 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2068 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2070 aq_ret = I40E_ERR_PARAM;
2074 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2075 * VF does not know about these additional VSIs and all
2076 * it cares is about its own queues. PF configures these queues
2077 * to its appropriate VSIs based on TC mapping
2079 if (vf->adq_enabled) {
2080 if (j == (vf->ch[idx].num_qps - 1)) {
2082 j = 0; /* resetting the queue count */
2088 vsi_id = vf->ch[idx].vsi_id;
2091 /* set vsi num_queue_pairs in use to num configured by VF */
2092 if (!vf->adq_enabled) {
2093 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2094 qci->num_queue_pairs;
2096 for (i = 0; i < vf->num_tc; i++)
2097 pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
2102 /* send the response to the VF */
2103 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2108 * i40e_validate_queue_map
2110 * @queuemap: Tx or Rx queue map
2112 * check if Tx or Rx queue map is valid
2114 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2115 unsigned long queuemap)
2117 u16 vsi_queue_id, queue_id;
2119 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2120 if (vf->adq_enabled) {
2121 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2122 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2124 queue_id = vsi_queue_id;
2127 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2135 * i40e_vc_config_irq_map_msg
2136 * @vf: pointer to the VF info
2137 * @msg: pointer to the msg buffer
2138 * @msglen: msg length
2140 * called from the VF to configure the irq to
2143 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2145 struct virtchnl_irq_map_info *irqmap_info =
2146 (struct virtchnl_irq_map_info *)msg;
2147 struct virtchnl_vector_map *map;
2148 u16 vsi_id, vector_id;
2149 i40e_status aq_ret = 0;
2152 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2153 aq_ret = I40E_ERR_PARAM;
2157 for (i = 0; i < irqmap_info->num_vectors; i++) {
2158 map = &irqmap_info->vecmap[i];
2159 vector_id = map->vector_id;
2160 vsi_id = map->vsi_id;
2161 /* validate msg params */
2162 if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
2163 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2164 aq_ret = I40E_ERR_PARAM;
2168 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2169 aq_ret = I40E_ERR_PARAM;
2173 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2174 aq_ret = I40E_ERR_PARAM;
2178 i40e_config_irq_link_list(vf, vsi_id, map);
2181 /* send the response to the VF */
2182 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2187 * i40e_ctrl_vf_tx_rings
2188 * @vsi: the SRIOV VSI being configured
2189 * @q_map: bit map of the queues to be enabled
2190 * @enable: start or stop the queue
2192 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2195 struct i40e_pf *pf = vsi->back;
2199 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2200 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2201 vsi->base_queue + q_id,
2202 false /*is xdp*/, enable);
2210 * i40e_ctrl_vf_rx_rings
2211 * @vsi: the SRIOV VSI being configured
2212 * @q_map: bit map of the queues to be enabled
2213 * @enable: start or stop the queue
2215 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2218 struct i40e_pf *pf = vsi->back;
2222 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2223 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2232 * i40e_vc_enable_queues_msg
2233 * @vf: pointer to the VF info
2234 * @msg: pointer to the msg buffer
2235 * @msglen: msg length
2237 * called from the VF to enable all or specific queue(s)
2239 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2241 struct virtchnl_queue_select *vqs =
2242 (struct virtchnl_queue_select *)msg;
2243 struct i40e_pf *pf = vf->pf;
2244 u16 vsi_id = vqs->vsi_id;
2245 i40e_status aq_ret = 0;
2248 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2249 aq_ret = I40E_ERR_PARAM;
2253 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2254 aq_ret = I40E_ERR_PARAM;
2258 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2259 aq_ret = I40E_ERR_PARAM;
2263 /* Use the queue bit map sent by the VF */
2264 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2266 aq_ret = I40E_ERR_TIMEOUT;
2269 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2271 aq_ret = I40E_ERR_TIMEOUT;
2275 /* need to start the rings for additional ADq VSI's as well */
2276 if (vf->adq_enabled) {
2277 /* zero belongs to LAN VSI */
2278 for (i = 1; i < vf->num_tc; i++) {
2279 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2280 aq_ret = I40E_ERR_TIMEOUT;
2285 /* send the response to the VF */
2286 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2291 * i40e_vc_disable_queues_msg
2292 * @vf: pointer to the VF info
2293 * @msg: pointer to the msg buffer
2294 * @msglen: msg length
2296 * called from the VF to disable all or specific
2299 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2301 struct virtchnl_queue_select *vqs =
2302 (struct virtchnl_queue_select *)msg;
2303 struct i40e_pf *pf = vf->pf;
2304 i40e_status aq_ret = 0;
2306 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2307 aq_ret = I40E_ERR_PARAM;
2311 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2312 aq_ret = I40E_ERR_PARAM;
2316 if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
2317 aq_ret = I40E_ERR_PARAM;
2321 /* Use the queue bit map sent by the VF */
2322 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2324 aq_ret = I40E_ERR_TIMEOUT;
2327 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2329 aq_ret = I40E_ERR_TIMEOUT;
2333 /* send the response to the VF */
2334 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2339 * i40e_vc_request_queues_msg
2340 * @vf: pointer to the VF info
2341 * @msg: pointer to the msg buffer
2342 * @msglen: msg length
2344 * VFs get a default number of queues but can use this message to request a
2345 * different number. If the request is successful, PF will reset the VF and
2346 * return 0. If unsuccessful, PF will send message informing VF of number of
2347 * available queues and return result of sending VF a message.
2349 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
2351 struct virtchnl_vf_res_request *vfres =
2352 (struct virtchnl_vf_res_request *)msg;
2353 int req_pairs = vfres->num_queue_pairs;
2354 int cur_pairs = vf->num_queue_pairs;
2355 struct i40e_pf *pf = vf->pf;
2357 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
2360 if (req_pairs <= 0) {
2361 dev_err(&pf->pdev->dev,
2362 "VF %d tried to request %d queues. Ignoring.\n",
2363 vf->vf_id, req_pairs);
2364 } else if (req_pairs > I40E_MAX_VF_QUEUES) {
2365 dev_err(&pf->pdev->dev,
2366 "VF %d tried to request more than %d queues.\n",
2368 I40E_MAX_VF_QUEUES);
2369 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2370 } else if (req_pairs - cur_pairs > pf->queues_left) {
2371 dev_warn(&pf->pdev->dev,
2372 "VF %d requested %d more queues, but only %d left.\n",
2374 req_pairs - cur_pairs,
2376 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2378 /* successful request */
2379 vf->num_req_queues = req_pairs;
2380 i40e_vc_notify_vf_reset(vf);
2381 i40e_reset_vf(vf, false);
2385 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2386 (u8 *)vfres, sizeof(*vfres));
2390 * i40e_vc_get_stats_msg
2391 * @vf: pointer to the VF info
2392 * @msg: pointer to the msg buffer
2393 * @msglen: msg length
2395 * called from the VF to get vsi stats
2397 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2399 struct virtchnl_queue_select *vqs =
2400 (struct virtchnl_queue_select *)msg;
2401 struct i40e_pf *pf = vf->pf;
2402 struct i40e_eth_stats stats;
2403 i40e_status aq_ret = 0;
2404 struct i40e_vsi *vsi;
2406 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2408 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2409 aq_ret = I40E_ERR_PARAM;
2413 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2414 aq_ret = I40E_ERR_PARAM;
2418 vsi = pf->vsi[vf->lan_vsi_idx];
2420 aq_ret = I40E_ERR_PARAM;
2423 i40e_update_eth_stats(vsi);
2424 stats = vsi->eth_stats;
2427 /* send the response back to the VF */
2428 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2429 (u8 *)&stats, sizeof(stats));
2432 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2433 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2435 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2436 #define I40E_VC_MAX_VLAN_PER_VF 8
2439 * i40e_check_vf_permission
2440 * @vf: pointer to the VF info
2441 * @al: MAC address list from virtchnl
2443 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2444 * if any address in the list is not valid. Checks the following conditions:
2446 * 1) broadcast and zero addresses are never valid
2447 * 2) unicast addresses are not allowed if the VMM has administratively set
2448 * the VF MAC address, unless the VF is marked as privileged.
2449 * 3) There is enough space to add all the addresses.
2451 * Note that to guarantee consistency, it is expected this function be called
2452 * while holding the mac_filter_hash_lock, as otherwise the current number of
2453 * addresses might not be accurate.
2455 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2456 struct virtchnl_ether_addr_list *al)
2458 struct i40e_pf *pf = vf->pf;
2461 /* If this VF is not privileged, then we can't add more than a limited
2462 * number of addresses. Check to make sure that the additions do not
2463 * push us over the limit.
2465 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2466 (vf->num_mac + al->num_elements) > I40E_VC_MAX_MAC_ADDR_PER_VF) {
2467 dev_err(&pf->pdev->dev,
2468 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2472 for (i = 0; i < al->num_elements; i++) {
2473 u8 *addr = al->list[i].addr;
2475 if (is_broadcast_ether_addr(addr) ||
2476 is_zero_ether_addr(addr)) {
2477 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2479 return I40E_ERR_INVALID_MAC_ADDR;
2482 /* If the host VMM administrator has set the VF MAC address
2483 * administratively via the ndo_set_vf_mac command then deny
2484 * permission to the VF to add or delete unicast MAC addresses.
2485 * Unless the VF is privileged and then it can do whatever.
2486 * The VF may request to set the MAC address filter already
2487 * assigned to it so do not return an error in that case.
2489 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2490 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2491 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2492 dev_err(&pf->pdev->dev,
2493 "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
2502 * i40e_vc_add_mac_addr_msg
2503 * @vf: pointer to the VF info
2504 * @msg: pointer to the msg buffer
2505 * @msglen: msg length
2507 * add guest mac address filter
2509 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2511 struct virtchnl_ether_addr_list *al =
2512 (struct virtchnl_ether_addr_list *)msg;
2513 struct i40e_pf *pf = vf->pf;
2514 struct i40e_vsi *vsi = NULL;
2515 u16 vsi_id = al->vsi_id;
2516 i40e_status ret = 0;
2519 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2520 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2521 ret = I40E_ERR_PARAM;
2525 vsi = pf->vsi[vf->lan_vsi_idx];
2527 /* Lock once, because all function inside for loop accesses VSI's
2528 * MAC filter list which needs to be protected using same lock.
2530 spin_lock_bh(&vsi->mac_filter_hash_lock);
2532 ret = i40e_check_vf_permission(vf, al);
2534 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2538 /* add new addresses to the list */
2539 for (i = 0; i < al->num_elements; i++) {
2540 struct i40e_mac_filter *f;
2542 f = i40e_find_mac(vsi, al->list[i].addr);
2544 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2547 dev_err(&pf->pdev->dev,
2548 "Unable to add MAC filter %pM for VF %d\n",
2549 al->list[i].addr, vf->vf_id);
2550 ret = I40E_ERR_PARAM;
2551 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2558 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2560 /* program the updated filter list */
2561 ret = i40e_sync_vsi_filters(vsi);
2563 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2567 /* send the response to the VF */
2568 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2573 * i40e_vc_del_mac_addr_msg
2574 * @vf: pointer to the VF info
2575 * @msg: pointer to the msg buffer
2576 * @msglen: msg length
2578 * remove guest mac address filter
2580 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2582 struct virtchnl_ether_addr_list *al =
2583 (struct virtchnl_ether_addr_list *)msg;
2584 struct i40e_pf *pf = vf->pf;
2585 struct i40e_vsi *vsi = NULL;
2586 u16 vsi_id = al->vsi_id;
2587 i40e_status ret = 0;
2590 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2591 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2592 ret = I40E_ERR_PARAM;
2596 for (i = 0; i < al->num_elements; i++) {
2597 if (is_broadcast_ether_addr(al->list[i].addr) ||
2598 is_zero_ether_addr(al->list[i].addr)) {
2599 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2600 al->list[i].addr, vf->vf_id);
2601 ret = I40E_ERR_INVALID_MAC_ADDR;
2605 if (vf->pf_set_mac &&
2606 ether_addr_equal(al->list[i].addr,
2607 vf->default_lan_addr.addr)) {
2608 dev_err(&pf->pdev->dev,
2609 "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n",
2610 vf->default_lan_addr.addr, vf->vf_id);
2611 ret = I40E_ERR_PARAM;
2615 vsi = pf->vsi[vf->lan_vsi_idx];
2617 spin_lock_bh(&vsi->mac_filter_hash_lock);
2618 /* delete addresses from the list */
2619 for (i = 0; i < al->num_elements; i++)
2620 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2621 ret = I40E_ERR_INVALID_MAC_ADDR;
2622 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2628 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2630 /* program the updated filter list */
2631 ret = i40e_sync_vsi_filters(vsi);
2633 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2637 /* send the response to the VF */
2638 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
2643 * i40e_vc_add_vlan_msg
2644 * @vf: pointer to the VF info
2645 * @msg: pointer to the msg buffer
2646 * @msglen: msg length
2648 * program guest vlan id
2650 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2652 struct virtchnl_vlan_filter_list *vfl =
2653 (struct virtchnl_vlan_filter_list *)msg;
2654 struct i40e_pf *pf = vf->pf;
2655 struct i40e_vsi *vsi = NULL;
2656 u16 vsi_id = vfl->vsi_id;
2657 i40e_status aq_ret = 0;
2660 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
2661 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2662 dev_err(&pf->pdev->dev,
2663 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
2666 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2667 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2668 aq_ret = I40E_ERR_PARAM;
2672 for (i = 0; i < vfl->num_elements; i++) {
2673 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2674 aq_ret = I40E_ERR_PARAM;
2675 dev_err(&pf->pdev->dev,
2676 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2680 vsi = pf->vsi[vf->lan_vsi_idx];
2681 if (vsi->info.pvid) {
2682 aq_ret = I40E_ERR_PARAM;
2686 i40e_vlan_stripping_enable(vsi);
2687 for (i = 0; i < vfl->num_elements; i++) {
2688 /* add new VLAN filter */
2689 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
2693 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2694 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2698 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2699 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2705 dev_err(&pf->pdev->dev,
2706 "Unable to add VLAN filter %d for VF %d, error %d\n",
2707 vfl->vlan_id[i], vf->vf_id, ret);
2711 /* send the response to the VF */
2712 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
2716 * i40e_vc_remove_vlan_msg
2717 * @vf: pointer to the VF info
2718 * @msg: pointer to the msg buffer
2719 * @msglen: msg length
2721 * remove programmed guest vlan id
2723 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2725 struct virtchnl_vlan_filter_list *vfl =
2726 (struct virtchnl_vlan_filter_list *)msg;
2727 struct i40e_pf *pf = vf->pf;
2728 struct i40e_vsi *vsi = NULL;
2729 u16 vsi_id = vfl->vsi_id;
2730 i40e_status aq_ret = 0;
2733 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2734 !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
2735 aq_ret = I40E_ERR_PARAM;
2739 for (i = 0; i < vfl->num_elements; i++) {
2740 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
2741 aq_ret = I40E_ERR_PARAM;
2746 vsi = pf->vsi[vf->lan_vsi_idx];
2747 if (vsi->info.pvid) {
2748 aq_ret = I40E_ERR_PARAM;
2752 for (i = 0; i < vfl->num_elements; i++) {
2753 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
2756 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
2757 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
2761 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
2762 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
2769 /* send the response to the VF */
2770 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
2775 * @vf: pointer to the VF info
2776 * @msg: pointer to the msg buffer
2777 * @msglen: msg length
2779 * called from the VF for the iwarp msgs
2781 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
2783 struct i40e_pf *pf = vf->pf;
2784 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
2785 i40e_status aq_ret = 0;
2787 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2788 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2789 aq_ret = I40E_ERR_PARAM;
2793 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
2797 /* send the response to the VF */
2798 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
2803 * i40e_vc_iwarp_qvmap_msg
2804 * @vf: pointer to the VF info
2805 * @msg: pointer to the msg buffer
2806 * @msglen: msg length
2807 * @config: config qvmap or release it
2809 * called from the VF for the iwarp msgs
2811 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
2814 struct virtchnl_iwarp_qvlist_info *qvlist_info =
2815 (struct virtchnl_iwarp_qvlist_info *)msg;
2816 i40e_status aq_ret = 0;
2818 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2819 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
2820 aq_ret = I40E_ERR_PARAM;
2825 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
2826 aq_ret = I40E_ERR_PARAM;
2828 i40e_release_iwarp_qvlist(vf);
2832 /* send the response to the VF */
2833 return i40e_vc_send_resp_to_vf(vf,
2834 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
2835 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
2840 * i40e_vc_config_rss_key
2841 * @vf: pointer to the VF info
2842 * @msg: pointer to the msg buffer
2843 * @msglen: msg length
2845 * Configure the VF's RSS key
2847 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
2849 struct virtchnl_rss_key *vrk =
2850 (struct virtchnl_rss_key *)msg;
2851 struct i40e_pf *pf = vf->pf;
2852 struct i40e_vsi *vsi = NULL;
2853 u16 vsi_id = vrk->vsi_id;
2854 i40e_status aq_ret = 0;
2856 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2857 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2858 (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
2859 aq_ret = I40E_ERR_PARAM;
2863 vsi = pf->vsi[vf->lan_vsi_idx];
2864 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
2866 /* send the response to the VF */
2867 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
2872 * i40e_vc_config_rss_lut
2873 * @vf: pointer to the VF info
2874 * @msg: pointer to the msg buffer
2875 * @msglen: msg length
2877 * Configure the VF's RSS LUT
2879 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
2881 struct virtchnl_rss_lut *vrl =
2882 (struct virtchnl_rss_lut *)msg;
2883 struct i40e_pf *pf = vf->pf;
2884 struct i40e_vsi *vsi = NULL;
2885 u16 vsi_id = vrl->vsi_id;
2886 i40e_status aq_ret = 0;
2888 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
2889 !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
2890 (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
2891 aq_ret = I40E_ERR_PARAM;
2895 vsi = pf->vsi[vf->lan_vsi_idx];
2896 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
2897 /* send the response to the VF */
2899 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
2904 * i40e_vc_get_rss_hena
2905 * @vf: pointer to the VF info
2906 * @msg: pointer to the msg buffer
2907 * @msglen: msg length
2909 * Return the RSS HENA bits allowed by the hardware
2911 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2913 struct virtchnl_rss_hena *vrh = NULL;
2914 struct i40e_pf *pf = vf->pf;
2915 i40e_status aq_ret = 0;
2918 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2919 aq_ret = I40E_ERR_PARAM;
2922 len = sizeof(struct virtchnl_rss_hena);
2924 vrh = kzalloc(len, GFP_KERNEL);
2926 aq_ret = I40E_ERR_NO_MEMORY;
2930 vrh->hena = i40e_pf_get_default_rss_hena(pf);
2932 /* send the response back to the VF */
2933 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
2934 aq_ret, (u8 *)vrh, len);
2940 * i40e_vc_set_rss_hena
2941 * @vf: pointer to the VF info
2942 * @msg: pointer to the msg buffer
2943 * @msglen: msg length
2945 * Set the RSS HENA bits for the VF
2947 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
2949 struct virtchnl_rss_hena *vrh =
2950 (struct virtchnl_rss_hena *)msg;
2951 struct i40e_pf *pf = vf->pf;
2952 struct i40e_hw *hw = &pf->hw;
2953 i40e_status aq_ret = 0;
2955 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2956 aq_ret = I40E_ERR_PARAM;
2959 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
2960 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
2961 (u32)(vrh->hena >> 32));
2963 /* send the response to the VF */
2965 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
2969 * i40e_vc_enable_vlan_stripping
2970 * @vf: pointer to the VF info
2971 * @msg: pointer to the msg buffer
2972 * @msglen: msg length
2974 * Enable vlan header stripping for the VF
2976 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
2979 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
2980 i40e_status aq_ret = 0;
2982 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2983 aq_ret = I40E_ERR_PARAM;
2987 i40e_vlan_stripping_enable(vsi);
2989 /* send the response to the VF */
2991 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
2996 * i40e_vc_disable_vlan_stripping
2997 * @vf: pointer to the VF info
2998 * @msg: pointer to the msg buffer
2999 * @msglen: msg length
3001 * Disable vlan header stripping for the VF
3003 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg,
3006 struct i40e_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3007 i40e_status aq_ret = 0;
3009 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3010 aq_ret = I40E_ERR_PARAM;
3014 i40e_vlan_stripping_disable(vsi);
3016 /* send the response to the VF */
3018 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3023 * i40e_validate_cloud_filter
3024 * @mask: mask for TC filter
3025 * @data: data for TC filter
3027 * This function validates cloud filter programmed as TC filter for ADq
3029 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3030 struct virtchnl_filter *tc_filter)
3032 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3033 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3034 struct i40e_pf *pf = vf->pf;
3035 struct i40e_vsi *vsi = NULL;
3036 struct i40e_mac_filter *f;
3037 struct hlist_node *h;
3041 if (!tc_filter->action) {
3042 dev_info(&pf->pdev->dev,
3043 "VF %d: Currently ADq doesn't support Drop Action\n",
3048 /* action_meta is TC number here to which the filter is applied */
3049 if (!tc_filter->action_meta ||
3050 tc_filter->action_meta > I40E_MAX_VF_VSI) {
3051 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3052 vf->vf_id, tc_filter->action_meta);
3056 /* Check filter if it's programmed for advanced mode or basic mode.
3057 * There are two ADq modes (for VF only),
3058 * 1. Basic mode: intended to allow as many filter options as possible
3059 * to be added to a VF in Non-trusted mode. Main goal is
3060 * to add filters to its own MAC and VLAN id.
3061 * 2. Advanced mode: is for allowing filters to be applied other than
3062 * its own MAC or VLAN. This mode requires the VF to be
3065 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3066 vsi = pf->vsi[vf->lan_vsi_idx];
3067 f = i40e_find_mac(vsi, data.dst_mac);
3070 dev_info(&pf->pdev->dev,
3071 "Destination MAC %pM doesn't belong to VF %d\n",
3072 data.dst_mac, vf->vf_id);
3077 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3079 if (f->vlan == ntohs(data.vlan_id)) {
3085 dev_info(&pf->pdev->dev,
3086 "VF %d doesn't have any VLAN id %u\n",
3087 vf->vf_id, ntohs(data.vlan_id));
3092 /* Check if VF is trusted */
3093 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3094 dev_err(&pf->pdev->dev,
3095 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3097 return I40E_ERR_CONFIG;
3101 if (mask.dst_mac[0] & data.dst_mac[0]) {
3102 if (is_broadcast_ether_addr(data.dst_mac) ||
3103 is_zero_ether_addr(data.dst_mac)) {
3104 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3105 vf->vf_id, data.dst_mac);
3110 if (mask.src_mac[0] & data.src_mac[0]) {
3111 if (is_broadcast_ether_addr(data.src_mac) ||
3112 is_zero_ether_addr(data.src_mac)) {
3113 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3114 vf->vf_id, data.src_mac);
3119 if (mask.dst_port & data.dst_port) {
3120 if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
3121 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3127 if (mask.src_port & data.src_port) {
3128 if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
3129 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3135 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3136 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3137 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3142 if (mask.vlan_id & data.vlan_id) {
3143 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3144 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3150 return I40E_SUCCESS;
3152 return I40E_ERR_CONFIG;
3156 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3157 * @vf: pointer to the VF info
3158 * @seid - seid of the vsi it is searching for
3160 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3162 struct i40e_pf *pf = vf->pf;
3163 struct i40e_vsi *vsi = NULL;
3166 for (i = 0; i < vf->num_tc ; i++) {
3167 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3168 if (vsi && vsi->seid == seid)
3175 * i40e_del_all_cloud_filters
3176 * @vf: pointer to the VF info
3178 * This function deletes all cloud filters
3180 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3182 struct i40e_cloud_filter *cfilter = NULL;
3183 struct i40e_pf *pf = vf->pf;
3184 struct i40e_vsi *vsi = NULL;
3185 struct hlist_node *node;
3188 hlist_for_each_entry_safe(cfilter, node,
3189 &vf->cloud_filter_list, cloud_node) {
3190 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3193 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3194 vf->vf_id, cfilter->seid);
3198 if (cfilter->dst_port)
3199 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3202 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3204 dev_err(&pf->pdev->dev,
3205 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3206 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3207 i40e_aq_str(&pf->hw,
3208 pf->hw.aq.asq_last_status));
3210 hlist_del(&cfilter->cloud_node);
3212 vf->num_cloud_filters--;
3217 * i40e_vc_del_cloud_filter
3218 * @vf: pointer to the VF info
3219 * @msg: pointer to the msg buffer
3221 * This function deletes a cloud filter programmed as TC filter for ADq
3223 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3225 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3226 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3227 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3228 struct i40e_cloud_filter cfilter, *cf = NULL;
3229 struct i40e_pf *pf = vf->pf;
3230 struct i40e_vsi *vsi = NULL;
3231 struct hlist_node *node;
3232 i40e_status aq_ret = 0;
3235 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3236 aq_ret = I40E_ERR_PARAM;
3240 if (!vf->adq_enabled) {
3241 dev_info(&pf->pdev->dev,
3242 "VF %d: ADq not enabled, can't apply cloud filter\n",
3244 aq_ret = I40E_ERR_PARAM;
3248 if (i40e_validate_cloud_filter(vf, vcf)) {
3249 dev_info(&pf->pdev->dev,
3250 "VF %d: Invalid input, can't apply cloud filter\n",
3252 aq_ret = I40E_ERR_PARAM;
3256 memset(&cfilter, 0, sizeof(cfilter));
3257 /* parse destination mac address */
3258 for (i = 0; i < ETH_ALEN; i++)
3259 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3261 /* parse source mac address */
3262 for (i = 0; i < ETH_ALEN; i++)
3263 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3265 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3266 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3267 cfilter.src_port = mask.src_port & tcf.src_port;
3269 switch (vcf->flow_type) {
3270 case VIRTCHNL_TCP_V4_FLOW:
3271 cfilter.n_proto = ETH_P_IP;
3272 if (mask.dst_ip[0] & tcf.dst_ip[0])
3273 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3274 ARRAY_SIZE(tcf.dst_ip));
3275 else if (mask.src_ip[0] & tcf.dst_ip[0])
3276 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3277 ARRAY_SIZE(tcf.dst_ip));
3279 case VIRTCHNL_TCP_V6_FLOW:
3280 cfilter.n_proto = ETH_P_IPV6;
3281 if (mask.dst_ip[3] & tcf.dst_ip[3])
3282 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3283 sizeof(cfilter.ip.v6.dst_ip6));
3284 if (mask.src_ip[3] & tcf.src_ip[3])
3285 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3286 sizeof(cfilter.ip.v6.src_ip6));
3289 /* TC filter can be configured based on different combinations
3290 * and in this case IP is not a part of filter config
3292 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3296 /* get the vsi to which the tc belongs to */
3297 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3298 cfilter.seid = vsi->seid;
3299 cfilter.flags = vcf->field_flags;
3301 /* Deleting TC filter */
3303 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3305 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3307 dev_err(&pf->pdev->dev,
3308 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3309 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3310 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3314 hlist_for_each_entry_safe(cf, node,
3315 &vf->cloud_filter_list, cloud_node) {
3316 if (cf->seid != cfilter.seid)
3319 if (cfilter.dst_port != cf->dst_port)
3321 if (mask.dst_mac[0])
3322 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3324 /* for ipv4 data to be valid, only first byte of mask is set */
3325 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3326 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3327 ARRAY_SIZE(tcf.dst_ip)))
3329 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3330 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3331 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3332 sizeof(cfilter.ip.v6.src_ip6)))
3335 if (cfilter.vlan_id != cf->vlan_id)
3338 hlist_del(&cf->cloud_node);
3340 vf->num_cloud_filters--;
3344 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3349 * i40e_vc_add_cloud_filter
3350 * @vf: pointer to the VF info
3351 * @msg: pointer to the msg buffer
3353 * This function adds a cloud filter programmed as TC filter for ADq
3355 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3357 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3358 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3359 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3360 struct i40e_cloud_filter *cfilter = NULL;
3361 struct i40e_pf *pf = vf->pf;
3362 struct i40e_vsi *vsi = NULL;
3363 i40e_status aq_ret = 0;
3366 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3367 aq_ret = I40E_ERR_PARAM;
3371 if (!vf->adq_enabled) {
3372 dev_info(&pf->pdev->dev,
3373 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3375 aq_ret = I40E_ERR_PARAM;
3379 if (i40e_validate_cloud_filter(vf, vcf)) {
3380 dev_info(&pf->pdev->dev,
3381 "VF %d: Invalid input/s, can't apply cloud filter\n",
3383 aq_ret = I40E_ERR_PARAM;
3387 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3391 /* parse destination mac address */
3392 for (i = 0; i < ETH_ALEN; i++)
3393 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3395 /* parse source mac address */
3396 for (i = 0; i < ETH_ALEN; i++)
3397 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3399 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3400 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3401 cfilter->src_port = mask.src_port & tcf.src_port;
3403 switch (vcf->flow_type) {
3404 case VIRTCHNL_TCP_V4_FLOW:
3405 cfilter->n_proto = ETH_P_IP;
3406 if (mask.dst_ip[0] & tcf.dst_ip[0])
3407 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3408 ARRAY_SIZE(tcf.dst_ip));
3409 else if (mask.src_ip[0] & tcf.dst_ip[0])
3410 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3411 ARRAY_SIZE(tcf.dst_ip));
3413 case VIRTCHNL_TCP_V6_FLOW:
3414 cfilter->n_proto = ETH_P_IPV6;
3415 if (mask.dst_ip[3] & tcf.dst_ip[3])
3416 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3417 sizeof(cfilter->ip.v6.dst_ip6));
3418 if (mask.src_ip[3] & tcf.src_ip[3])
3419 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3420 sizeof(cfilter->ip.v6.src_ip6));
3423 /* TC filter can be configured based on different combinations
3424 * and in this case IP is not a part of filter config
3426 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3430 /* get the VSI to which the TC belongs to */
3431 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3432 cfilter->seid = vsi->seid;
3433 cfilter->flags = vcf->field_flags;
3435 /* Adding cloud filter programmed as TC filter */
3437 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3439 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3441 dev_err(&pf->pdev->dev,
3442 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3443 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3444 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3448 INIT_HLIST_NODE(&cfilter->cloud_node);
3449 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3450 /* release the pointer passing it to the collection */
3452 vf->num_cloud_filters++;
3456 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3461 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3462 * @vf: pointer to the VF info
3463 * @msg: pointer to the msg buffer
3465 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3467 struct virtchnl_tc_info *tci =
3468 (struct virtchnl_tc_info *)msg;
3469 struct i40e_pf *pf = vf->pf;
3470 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3471 int i, adq_request_qps = 0, speed = 0;
3472 i40e_status aq_ret = 0;
3474 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3475 aq_ret = I40E_ERR_PARAM;
3479 /* ADq cannot be applied if spoof check is ON */
3481 dev_err(&pf->pdev->dev,
3482 "Spoof check is ON, turn it OFF to enable ADq\n");
3483 aq_ret = I40E_ERR_PARAM;
3487 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3488 dev_err(&pf->pdev->dev,
3489 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3491 aq_ret = I40E_ERR_PARAM;
3495 /* max number of traffic classes for VF currently capped at 4 */
3496 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3497 dev_err(&pf->pdev->dev,
3498 "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n",
3499 vf->vf_id, tci->num_tc);
3500 aq_ret = I40E_ERR_PARAM;
3504 /* validate queues for each TC */
3505 for (i = 0; i < tci->num_tc; i++)
3506 if (!tci->list[i].count ||
3507 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3508 dev_err(&pf->pdev->dev,
3509 "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n",
3510 vf->vf_id, i, tci->list[i].count);
3511 aq_ret = I40E_ERR_PARAM;
3515 /* need Max VF queues but already have default number of queues */
3516 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3518 if (pf->queues_left < adq_request_qps) {
3519 dev_err(&pf->pdev->dev,
3520 "No queues left to allocate to VF %d\n",
3522 aq_ret = I40E_ERR_PARAM;
3525 /* we need to allocate max VF queues to enable ADq so as to
3526 * make sure ADq enabled VF always gets back queues when it
3527 * goes through a reset.
3529 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3532 /* get link speed in MB to validate rate limit */
3533 switch (ls->link_speed) {
3534 case VIRTCHNL_LINK_SPEED_100MB:
3537 case VIRTCHNL_LINK_SPEED_1GB:
3540 case VIRTCHNL_LINK_SPEED_10GB:
3541 speed = SPEED_10000;
3543 case VIRTCHNL_LINK_SPEED_20GB:
3544 speed = SPEED_20000;
3546 case VIRTCHNL_LINK_SPEED_25GB:
3547 speed = SPEED_25000;
3549 case VIRTCHNL_LINK_SPEED_40GB:
3550 speed = SPEED_40000;
3553 dev_err(&pf->pdev->dev,
3554 "Cannot detect link speed\n");
3555 aq_ret = I40E_ERR_PARAM;
3559 /* parse data from the queue channel info */
3560 vf->num_tc = tci->num_tc;
3561 for (i = 0; i < vf->num_tc; i++) {
3562 if (tci->list[i].max_tx_rate) {
3563 if (tci->list[i].max_tx_rate > speed) {
3564 dev_err(&pf->pdev->dev,
3565 "Invalid max tx rate %llu specified for VF %d.",
3566 tci->list[i].max_tx_rate,
3568 aq_ret = I40E_ERR_PARAM;
3571 vf->ch[i].max_tx_rate =
3572 tci->list[i].max_tx_rate;
3575 vf->ch[i].num_qps = tci->list[i].count;
3578 /* set this flag only after making sure all inputs are sane */
3579 vf->adq_enabled = true;
3580 /* num_req_queues is set when user changes number of queues via ethtool
3581 * and this causes issue for default VSI(which depends on this variable)
3582 * when ADq is enabled, hence reset it.
3584 vf->num_req_queues = 0;
3586 /* reset the VF in order to allocate resources */
3587 i40e_vc_notify_vf_reset(vf);
3588 i40e_reset_vf(vf, false);
3590 return I40E_SUCCESS;
3592 /* send the response to the VF */
3594 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3599 * i40e_vc_del_qch_msg
3600 * @vf: pointer to the VF info
3601 * @msg: pointer to the msg buffer
3603 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3605 struct i40e_pf *pf = vf->pf;
3606 i40e_status aq_ret = 0;
3608 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
3609 aq_ret = I40E_ERR_PARAM;
3613 if (vf->adq_enabled) {
3614 i40e_del_all_cloud_filters(vf);
3616 vf->adq_enabled = false;
3618 dev_info(&pf->pdev->dev,
3619 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3622 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3624 aq_ret = I40E_ERR_PARAM;
3627 /* reset the VF in order to allocate resources */
3628 i40e_vc_notify_vf_reset(vf);
3629 i40e_reset_vf(vf, false);
3631 return I40E_SUCCESS;
3634 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3639 * i40e_vc_process_vf_msg
3640 * @pf: pointer to the PF structure
3641 * @vf_id: source VF id
3642 * @v_opcode: operation code
3643 * @v_retval: unused return value code
3644 * @msg: pointer to the msg buffer
3645 * @msglen: msg length
3647 * called from the common aeq/arq handler to
3648 * process request from VF
3650 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
3651 u32 __always_unused v_retval, u8 *msg, u16 msglen)
3653 struct i40e_hw *hw = &pf->hw;
3654 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
3658 pf->vf_aq_requests++;
3659 if (local_vf_id >= pf->num_alloc_vfs)
3661 vf = &(pf->vf[local_vf_id]);
3663 /* Check if VF is disabled. */
3664 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
3665 return I40E_ERR_PARAM;
3667 /* perform basic checks on the msg */
3668 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3670 /* perform additional checks specific to this driver */
3671 if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_KEY) {
3672 struct virtchnl_rss_key *vrk = (struct virtchnl_rss_key *)msg;
3674 if (vrk->key_len != I40E_HKEY_ARRAY_SIZE)
3676 } else if (v_opcode == VIRTCHNL_OP_CONFIG_RSS_LUT) {
3677 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
3679 if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)
3684 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
3685 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
3686 local_vf_id, v_opcode, msglen);
3688 case VIRTCHNL_ERR_PARAM:
3696 case VIRTCHNL_OP_VERSION:
3697 ret = i40e_vc_get_version_msg(vf, msg);
3699 case VIRTCHNL_OP_GET_VF_RESOURCES:
3700 ret = i40e_vc_get_vf_resources_msg(vf, msg);
3701 i40e_vc_notify_vf_link_state(vf);
3703 case VIRTCHNL_OP_RESET_VF:
3704 i40e_vc_reset_vf_msg(vf);
3707 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3708 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg, msglen);
3710 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3711 ret = i40e_vc_config_queues_msg(vf, msg, msglen);
3713 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3714 ret = i40e_vc_config_irq_map_msg(vf, msg, msglen);
3716 case VIRTCHNL_OP_ENABLE_QUEUES:
3717 ret = i40e_vc_enable_queues_msg(vf, msg, msglen);
3718 i40e_vc_notify_vf_link_state(vf);
3720 case VIRTCHNL_OP_DISABLE_QUEUES:
3721 ret = i40e_vc_disable_queues_msg(vf, msg, msglen);
3723 case VIRTCHNL_OP_ADD_ETH_ADDR:
3724 ret = i40e_vc_add_mac_addr_msg(vf, msg, msglen);
3726 case VIRTCHNL_OP_DEL_ETH_ADDR:
3727 ret = i40e_vc_del_mac_addr_msg(vf, msg, msglen);
3729 case VIRTCHNL_OP_ADD_VLAN:
3730 ret = i40e_vc_add_vlan_msg(vf, msg, msglen);
3732 case VIRTCHNL_OP_DEL_VLAN:
3733 ret = i40e_vc_remove_vlan_msg(vf, msg, msglen);
3735 case VIRTCHNL_OP_GET_STATS:
3736 ret = i40e_vc_get_stats_msg(vf, msg, msglen);
3738 case VIRTCHNL_OP_IWARP:
3739 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
3741 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
3742 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
3744 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
3745 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
3747 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3748 ret = i40e_vc_config_rss_key(vf, msg, msglen);
3750 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3751 ret = i40e_vc_config_rss_lut(vf, msg, msglen);
3753 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
3754 ret = i40e_vc_get_rss_hena(vf, msg, msglen);
3756 case VIRTCHNL_OP_SET_RSS_HENA:
3757 ret = i40e_vc_set_rss_hena(vf, msg, msglen);
3759 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3760 ret = i40e_vc_enable_vlan_stripping(vf, msg, msglen);
3762 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3763 ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen);
3765 case VIRTCHNL_OP_REQUEST_QUEUES:
3766 ret = i40e_vc_request_queues_msg(vf, msg, msglen);
3768 case VIRTCHNL_OP_ENABLE_CHANNELS:
3769 ret = i40e_vc_add_qch_msg(vf, msg);
3771 case VIRTCHNL_OP_DISABLE_CHANNELS:
3772 ret = i40e_vc_del_qch_msg(vf, msg);
3774 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
3775 ret = i40e_vc_add_cloud_filter(vf, msg);
3777 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
3778 ret = i40e_vc_del_cloud_filter(vf, msg);
3780 case VIRTCHNL_OP_UNKNOWN:
3782 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
3783 v_opcode, local_vf_id);
3784 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
3785 I40E_ERR_NOT_IMPLEMENTED);
3793 * i40e_vc_process_vflr_event
3794 * @pf: pointer to the PF structure
3796 * called from the vlfr irq handler to
3797 * free up VF resources and state variables
3799 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
3801 struct i40e_hw *hw = &pf->hw;
3802 u32 reg, reg_idx, bit_idx;
3806 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
3809 /* Re-enable the VFLR interrupt cause here, before looking for which
3810 * VF got reset. Otherwise, if another VF gets a reset while the
3811 * first one is being processed, that interrupt will be lost, and
3812 * that VF will be stuck in reset forever.
3814 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3815 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
3816 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3819 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3820 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
3821 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
3822 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
3823 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
3824 vf = &pf->vf[vf_id];
3825 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
3826 if (reg & BIT(bit_idx))
3827 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
3828 i40e_reset_vf(vf, true);
3835 * i40e_ndo_set_vf_mac
3836 * @netdev: network interface device structure
3837 * @vf_id: VF identifier
3840 * program VF mac address
3842 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3844 struct i40e_netdev_priv *np = netdev_priv(netdev);
3845 struct i40e_vsi *vsi = np->vsi;
3846 struct i40e_pf *pf = vsi->back;
3847 struct i40e_mac_filter *f;
3850 struct hlist_node *h;
3854 /* validate the request */
3855 if (vf_id >= pf->num_alloc_vfs) {
3856 dev_err(&pf->pdev->dev,
3857 "Invalid VF Identifier %d\n", vf_id);
3862 vf = &(pf->vf[vf_id]);
3863 vsi = pf->vsi[vf->lan_vsi_idx];
3865 /* When the VF is resetting wait until it is done.
3866 * It can take up to 200 milliseconds,
3867 * but wait for up to 300 milliseconds to be safe.
3869 for (i = 0; i < 15; i++) {
3870 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
3874 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
3875 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
3881 if (is_multicast_ether_addr(mac)) {
3882 dev_err(&pf->pdev->dev,
3883 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
3888 /* Lock once because below invoked function add/del_filter requires
3889 * mac_filter_hash_lock to be held
3891 spin_lock_bh(&vsi->mac_filter_hash_lock);
3893 /* delete the temporary mac address */
3894 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
3895 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
3897 /* Delete all the filters for this VSI - we're going to kill it
3900 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
3901 __i40e_del_filter(vsi, f);
3903 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3905 /* program mac filter */
3906 if (i40e_sync_vsi_filters(vsi)) {
3907 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
3911 ether_addr_copy(vf->default_lan_addr.addr, mac);
3913 if (is_zero_ether_addr(mac)) {
3914 vf->pf_set_mac = false;
3915 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
3917 vf->pf_set_mac = true;
3918 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
3922 /* Force the VF driver stop so it has to reload with new MAC address */
3923 i40e_vc_disable_vf(vf);
3924 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
3931 * i40e_vsi_has_vlans - True if VSI has configured VLANs
3932 * @vsi: pointer to the vsi
3934 * Check if a VSI has configured any VLANs. False if we have a port VLAN or if
3935 * we have no configured VLANs. Do not call while holding the
3936 * mac_filter_hash_lock.
3938 static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi)
3942 /* If we have a port VLAN, then the VSI cannot have any VLANs
3943 * configured, as all MAC/VLAN filters will be assigned to the PVID.
3948 /* Since we don't have a PVID, we know that if the device is in VLAN
3949 * mode it must be because of a VLAN filter configured on this VSI.
3951 spin_lock_bh(&vsi->mac_filter_hash_lock);
3952 have_vlans = i40e_is_vsi_in_vlan(vsi);
3953 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3959 * i40e_ndo_set_vf_port_vlan
3960 * @netdev: network interface device structure
3961 * @vf_id: VF identifier
3962 * @vlan_id: mac address
3963 * @qos: priority setting
3964 * @vlan_proto: vlan protocol
3966 * program VF vlan id and/or qos
3968 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
3969 u16 vlan_id, u8 qos, __be16 vlan_proto)
3971 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
3972 struct i40e_netdev_priv *np = netdev_priv(netdev);
3973 struct i40e_pf *pf = np->vsi->back;
3974 struct i40e_vsi *vsi;
3978 /* validate the request */
3979 if (vf_id >= pf->num_alloc_vfs) {
3980 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
3985 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
3986 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
3991 if (vlan_proto != htons(ETH_P_8021Q)) {
3992 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
3993 ret = -EPROTONOSUPPORT;
3997 vf = &(pf->vf[vf_id]);
3998 vsi = pf->vsi[vf->lan_vsi_idx];
3999 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4000 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4006 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4007 /* duplicate request, so just return success */
4010 if (i40e_vsi_has_vlans(vsi)) {
4011 dev_err(&pf->pdev->dev,
4012 "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
4014 /* Administrator Error - knock the VF offline until he does
4015 * the right thing by reconfiguring his network correctly
4016 * and then reloading the VF driver.
4018 i40e_vc_disable_vf(vf);
4019 /* During reset the VF got a new VSI, so refresh the pointer. */
4020 vsi = pf->vsi[vf->lan_vsi_idx];
4023 /* Locked once because multiple functions below iterate list */
4024 spin_lock_bh(&vsi->mac_filter_hash_lock);
4026 /* Check for condition where there was already a port VLAN ID
4027 * filter set and now it is being deleted by setting it to zero.
4028 * Additionally check for the condition where there was a port
4029 * VLAN but now there is a new and different port VLAN being set.
4030 * Before deleting all the old VLAN filters we must add new ones
4031 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4032 * MAC addresses deleted.
4034 if ((!(vlan_id || qos) ||
4035 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4037 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4039 dev_info(&vsi->back->pdev->dev,
4040 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4041 vsi->back->hw.aq.asq_last_status);
4042 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4047 if (vsi->info.pvid) {
4048 /* remove all filters on the old VLAN */
4049 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4053 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4055 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4057 i40e_vsi_remove_pvid(vsi);
4058 spin_lock_bh(&vsi->mac_filter_hash_lock);
4061 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4062 vlan_id, qos, vf_id);
4064 /* add new VLAN filter for each MAC */
4065 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4067 dev_info(&vsi->back->pdev->dev,
4068 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4069 vsi->back->hw.aq.asq_last_status);
4070 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4074 /* remove the previously added non-VLAN MAC filters */
4075 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4078 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4080 /* Schedule the worker thread to take care of applying changes */
4081 i40e_service_event_schedule(vsi->back);
4084 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4088 /* The Port VLAN needs to be saved across resets the same as the
4089 * default LAN MAC address.
4091 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4099 * i40e_ndo_set_vf_bw
4100 * @netdev: network interface device structure
4101 * @vf_id: VF identifier
4102 * @min_tx_rate: Minimum Tx rate
4103 * @max_tx_rate: Maximum Tx rate
4105 * configure VF Tx rate
4107 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4110 struct i40e_netdev_priv *np = netdev_priv(netdev);
4111 struct i40e_pf *pf = np->vsi->back;
4112 struct i40e_vsi *vsi;
4116 /* validate the request */
4117 if (vf_id >= pf->num_alloc_vfs) {
4118 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
4124 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4125 min_tx_rate, vf_id);
4129 vf = &(pf->vf[vf_id]);
4130 vsi = pf->vsi[vf->lan_vsi_idx];
4131 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4132 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4138 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4142 vf->tx_rate = max_tx_rate;
4148 * i40e_ndo_get_vf_config
4149 * @netdev: network interface device structure
4150 * @vf_id: VF identifier
4151 * @ivi: VF configuration structure
4153 * return VF configuration
4155 int i40e_ndo_get_vf_config(struct net_device *netdev,
4156 int vf_id, struct ifla_vf_info *ivi)
4158 struct i40e_netdev_priv *np = netdev_priv(netdev);
4159 struct i40e_vsi *vsi = np->vsi;
4160 struct i40e_pf *pf = vsi->back;
4164 /* validate the request */
4165 if (vf_id >= pf->num_alloc_vfs) {
4166 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4171 vf = &(pf->vf[vf_id]);
4172 /* first vsi is always the LAN vsi */
4173 vsi = pf->vsi[vf->lan_vsi_idx];
4174 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4175 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4183 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4185 ivi->max_tx_rate = vf->tx_rate;
4186 ivi->min_tx_rate = 0;
4187 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4188 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4189 I40E_VLAN_PRIORITY_SHIFT;
4190 if (vf->link_forced == false)
4191 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4192 else if (vf->link_up == true)
4193 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4195 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4196 ivi->spoofchk = vf->spoofchk;
4197 ivi->trusted = vf->trusted;
4205 * i40e_ndo_set_vf_link_state
4206 * @netdev: network interface device structure
4207 * @vf_id: VF identifier
4208 * @link: required link state
4210 * Set the link state of a specified VF, regardless of physical link state
4212 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4214 struct i40e_netdev_priv *np = netdev_priv(netdev);
4215 struct i40e_pf *pf = np->vsi->back;
4216 struct virtchnl_pf_event pfe;
4217 struct i40e_hw *hw = &pf->hw;
4222 /* validate the request */
4223 if (vf_id >= pf->num_alloc_vfs) {
4224 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4229 vf = &pf->vf[vf_id];
4230 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4232 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4233 pfe.severity = PF_EVENT_SEVERITY_INFO;
4236 case IFLA_VF_LINK_STATE_AUTO:
4237 vf->link_forced = false;
4238 pfe.event_data.link_event.link_status =
4239 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4240 pfe.event_data.link_event.link_speed =
4241 (enum virtchnl_link_speed)
4242 pf->hw.phy.link_info.link_speed;
4244 case IFLA_VF_LINK_STATE_ENABLE:
4245 vf->link_forced = true;
4247 pfe.event_data.link_event.link_status = true;
4248 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
4250 case IFLA_VF_LINK_STATE_DISABLE:
4251 vf->link_forced = true;
4252 vf->link_up = false;
4253 pfe.event_data.link_event.link_status = false;
4254 pfe.event_data.link_event.link_speed = 0;
4260 /* Notify the VF of its new link state */
4261 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4262 0, (u8 *)&pfe, sizeof(pfe), NULL);
4269 * i40e_ndo_set_vf_spoofchk
4270 * @netdev: network interface device structure
4271 * @vf_id: VF identifier
4272 * @enable: flag to enable or disable feature
4274 * Enable or disable VF spoof checking
4276 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4278 struct i40e_netdev_priv *np = netdev_priv(netdev);
4279 struct i40e_vsi *vsi = np->vsi;
4280 struct i40e_pf *pf = vsi->back;
4281 struct i40e_vsi_context ctxt;
4282 struct i40e_hw *hw = &pf->hw;
4286 /* validate the request */
4287 if (vf_id >= pf->num_alloc_vfs) {
4288 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4293 vf = &(pf->vf[vf_id]);
4294 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4295 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4301 if (enable == vf->spoofchk)
4304 vf->spoofchk = enable;
4305 memset(&ctxt, 0, sizeof(ctxt));
4306 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4307 ctxt.pf_num = pf->hw.pf_id;
4308 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4310 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4311 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4312 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4314 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4323 * i40e_ndo_set_vf_trust
4324 * @netdev: network interface device structure of the pf
4325 * @vf_id: VF identifier
4326 * @setting: trust setting
4328 * Enable or disable VF trust setting
4330 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4332 struct i40e_netdev_priv *np = netdev_priv(netdev);
4333 struct i40e_pf *pf = np->vsi->back;
4337 /* validate the request */
4338 if (vf_id >= pf->num_alloc_vfs) {
4339 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4343 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4344 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4348 vf = &pf->vf[vf_id];
4350 if (setting == vf->trusted)
4353 vf->trusted = setting;
4354 i40e_vc_disable_vf(vf);
4355 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4356 vf_id, setting ? "" : "un");
4358 if (vf->adq_enabled) {
4360 dev_info(&pf->pdev->dev,
4361 "VF %u no longer Trusted, deleting all cloud filters\n",
4363 i40e_del_all_cloud_filters(vf);