1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
6 /*********************notification routines***********************/
10 * @pf: pointer to the PF structure
11 * @v_opcode: operation code
12 * @v_retval: return value
13 * @msg: pointer to the msg buffer
16 * send a message to all VFs on a given PF
18 static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
19 enum virtchnl_ops v_opcode,
20 i40e_status v_retval, u8 *msg,
23 struct i40e_hw *hw = &pf->hw;
24 struct i40e_vf *vf = pf->vf;
27 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
28 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
29 /* Not all vfs are enabled so skip the ones that are not */
30 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
31 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
34 /* Ignore return value on purpose - a given VF may fail, but
35 * we need to keep going and send to all of them
37 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
43 * i40e_vc_notify_vf_link_state
44 * @vf: pointer to the VF structure
46 * send a link status message to a single VF
48 static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
50 struct virtchnl_pf_event pfe;
51 struct i40e_pf *pf = vf->pf;
52 struct i40e_hw *hw = &pf->hw;
53 struct i40e_link_status *ls = &pf->hw.phy.link_info;
54 int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
56 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
57 pfe.severity = PF_EVENT_SEVERITY_INFO;
58 if (vf->link_forced) {
59 pfe.event_data.link_event.link_status = vf->link_up;
60 pfe.event_data.link_event.link_speed =
61 (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
63 pfe.event_data.link_event.link_status =
64 ls->link_info & I40E_AQ_LINK_UP;
65 pfe.event_data.link_event.link_speed =
66 i40e_virtchnl_link_speed(ls->link_speed);
68 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
69 0, (u8 *)&pfe, sizeof(pfe), NULL);
73 * i40e_vc_notify_link_state
74 * @pf: pointer to the PF structure
76 * send a link status message to all VFs on a given PF
78 void i40e_vc_notify_link_state(struct i40e_pf *pf)
82 for (i = 0; i < pf->num_alloc_vfs; i++)
83 i40e_vc_notify_vf_link_state(&pf->vf[i]);
87 * i40e_vc_notify_reset
88 * @pf: pointer to the PF structure
90 * indicate a pending reset to all VFs on a given PF
92 void i40e_vc_notify_reset(struct i40e_pf *pf)
94 struct virtchnl_pf_event pfe;
96 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
97 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
98 i40e_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, 0,
99 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
102 #ifdef CONFIG_PCI_IOV
103 void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev)
108 /* Continue only if this is a PF */
109 if (!pdev->is_physfn)
112 if (!pci_num_vf(pdev))
115 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
117 struct pci_dev *vf_dev = NULL;
119 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
120 while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) {
121 if (vf_dev->is_virtfn && vf_dev->physfn == pdev)
122 pci_restore_msi_state(vf_dev);
126 #endif /* CONFIG_PCI_IOV */
129 * i40e_vc_notify_vf_reset
130 * @vf: pointer to the VF structure
132 * indicate a pending reset to the given VF
134 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
136 struct virtchnl_pf_event pfe;
139 /* validate the request */
140 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
143 /* verify if the VF is in either init or active before proceeding */
144 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) &&
145 !test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
148 abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
150 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
151 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
152 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, VIRTCHNL_OP_EVENT,
154 sizeof(struct virtchnl_pf_event), NULL);
156 /***********************misc routines*****************************/
160 * @vf: pointer to the VF info
161 * @notify_vf: notify vf about reset or not
164 static void i40e_vc_reset_vf(struct i40e_vf *vf, bool notify_vf)
166 struct i40e_pf *pf = vf->pf;
170 i40e_vc_notify_vf_reset(vf);
172 /* We want to ensure that an actual reset occurs initiated after this
173 * function was called. However, we do not want to wait forever, so
174 * we'll give a reasonable time and print a message if we failed to
177 for (i = 0; i < 20; i++) {
178 /* If PF is in VFs releasing state reset VF is impossible,
181 if (test_bit(__I40E_VFS_RELEASING, pf->state))
183 if (i40e_reset_vf(vf, false))
185 usleep_range(10000, 20000);
189 dev_warn(&vf->pf->pdev->dev,
190 "Failed to initiate reset for VF %d after 200 milliseconds\n",
193 dev_dbg(&vf->pf->pdev->dev,
194 "Failed to initiate reset for VF %d after 200 milliseconds\n",
199 * i40e_vc_isvalid_vsi_id
200 * @vf: pointer to the VF info
201 * @vsi_id: VF relative VSI id
203 * check for the valid VSI id
205 static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
207 struct i40e_pf *pf = vf->pf;
208 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
210 return (vsi && (vsi->vf_id == vf->vf_id));
214 * i40e_vc_isvalid_queue_id
215 * @vf: pointer to the VF info
217 * @qid: vsi relative queue id
219 * check for the valid queue id
221 static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
224 struct i40e_pf *pf = vf->pf;
225 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
227 return (vsi && (qid < vsi->alloc_queue_pairs));
231 * i40e_vc_isvalid_vector_id
232 * @vf: pointer to the VF info
233 * @vector_id: VF relative vector id
235 * check for the valid vector id
237 static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
239 struct i40e_pf *pf = vf->pf;
241 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
244 /***********************vf resource mgmt routines*****************/
247 * i40e_vc_get_pf_queue_id
248 * @vf: pointer to the VF info
249 * @vsi_id: id of VSI as provided by the FW
250 * @vsi_queue_id: vsi relative queue id
252 * return PF relative queue id
254 static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
257 struct i40e_pf *pf = vf->pf;
258 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
259 u16 pf_queue_id = I40E_QUEUE_END_OF_LIST;
264 if (le16_to_cpu(vsi->info.mapping_flags) &
265 I40E_AQ_VSI_QUE_MAP_NONCONTIG)
267 le16_to_cpu(vsi->info.queue_mapping[vsi_queue_id]);
269 pf_queue_id = le16_to_cpu(vsi->info.queue_mapping[0]) +
276 * i40e_get_real_pf_qid
277 * @vf: pointer to the VF info
279 * @queue_id: queue number
281 * wrapper function to get pf_queue_id handling ADq code as well
283 static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
287 if (vf->adq_enabled) {
288 /* Although VF considers all the queues(can be 1 to 16) as its
289 * own but they may actually belong to different VSIs(up to 4).
290 * We need to find which queues belongs to which VSI.
292 for (i = 0; i < vf->num_tc; i++) {
293 if (queue_id < vf->ch[i].num_qps) {
294 vsi_id = vf->ch[i].vsi_id;
297 /* find right queue id which is relative to a
300 queue_id -= vf->ch[i].num_qps;
304 return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
308 * i40e_config_irq_link_list
309 * @vf: pointer to the VF info
310 * @vsi_id: id of VSI as given by the FW
311 * @vecmap: irq map info
313 * configure irq link list from the map
315 static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
316 struct virtchnl_vector_map *vecmap)
318 unsigned long linklistmap = 0, tempmap;
319 struct i40e_pf *pf = vf->pf;
320 struct i40e_hw *hw = &pf->hw;
321 u16 vsi_queue_id, pf_queue_id;
322 enum i40e_queue_type qtype;
323 u16 next_q, vector_id, size;
327 vector_id = vecmap->vector_id;
330 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
332 reg_idx = I40E_VPINT_LNKLSTN(
333 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
336 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
337 /* Special case - No queues mapped on this vector */
338 wr32(hw, reg_idx, I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
341 tempmap = vecmap->rxq_map;
342 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
343 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
347 tempmap = vecmap->txq_map;
348 for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
349 linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
353 size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
354 next_q = find_first_bit(&linklistmap, size);
355 if (unlikely(next_q == size))
358 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
359 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
360 pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
361 reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
363 wr32(hw, reg_idx, reg);
365 while (next_q < size) {
367 case I40E_QUEUE_TYPE_RX:
368 reg_idx = I40E_QINT_RQCTL(pf_queue_id);
369 itr_idx = vecmap->rxitr_idx;
371 case I40E_QUEUE_TYPE_TX:
372 reg_idx = I40E_QINT_TQCTL(pf_queue_id);
373 itr_idx = vecmap->txitr_idx;
379 next_q = find_next_bit(&linklistmap, size, next_q + 1);
381 vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
382 qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
383 pf_queue_id = i40e_get_real_pf_qid(vf,
387 pf_queue_id = I40E_QUEUE_END_OF_LIST;
391 /* format for the RQCTL & TQCTL regs is same */
393 (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
394 (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
395 BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
396 (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
397 wr32(hw, reg_idx, reg);
400 /* if the vf is running in polling mode and using interrupt zero,
401 * need to disable auto-mask on enabling zero interrupt for VFs.
403 if ((vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) &&
405 reg = rd32(hw, I40E_GLINT_CTL);
406 if (!(reg & I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK)) {
407 reg |= I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK;
408 wr32(hw, I40E_GLINT_CTL, reg);
417 * i40e_release_iwarp_qvlist
418 * @vf: pointer to the VF.
421 static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
423 struct i40e_pf *pf = vf->pf;
424 struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
428 if (!vf->qvlist_info)
431 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
432 for (i = 0; i < qvlist_info->num_vectors; i++) {
433 struct virtchnl_iwarp_qv_info *qv_info;
434 u32 next_q_index, next_q_type;
435 struct i40e_hw *hw = &pf->hw;
436 u32 v_idx, reg_idx, reg;
438 qv_info = &qvlist_info->qv_info[i];
441 v_idx = qv_info->v_idx;
442 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
443 /* Figure out the queue after CEQ and make that the
446 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
447 reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
448 next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
449 >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
450 next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
451 >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
453 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
454 reg = (next_q_index &
455 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
457 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
459 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
462 kfree(vf->qvlist_info);
463 vf->qvlist_info = NULL;
467 * i40e_config_iwarp_qvlist
468 * @vf: pointer to the VF info
469 * @qvlist_info: queue and vector list
471 * Return 0 on success or < 0 on error
473 static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
474 struct virtchnl_iwarp_qvlist_info *qvlist_info)
476 struct i40e_pf *pf = vf->pf;
477 struct i40e_hw *hw = &pf->hw;
478 struct virtchnl_iwarp_qv_info *qv_info;
479 u32 v_idx, i, reg_idx, reg;
480 u32 next_q_idx, next_q_type;
484 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
486 if (qvlist_info->num_vectors > msix_vf) {
487 dev_warn(&pf->pdev->dev,
488 "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
489 qvlist_info->num_vectors,
495 kfree(vf->qvlist_info);
496 vf->qvlist_info = kzalloc(struct_size(vf->qvlist_info, qv_info,
497 qvlist_info->num_vectors - 1),
499 if (!vf->qvlist_info) {
503 vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
505 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
506 for (i = 0; i < qvlist_info->num_vectors; i++) {
507 qv_info = &qvlist_info->qv_info[i];
511 /* Validate vector id belongs to this vf */
512 if (!i40e_vc_isvalid_vector_id(vf, qv_info->v_idx)) {
517 v_idx = qv_info->v_idx;
519 vf->qvlist_info->qv_info[i] = *qv_info;
521 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
522 /* We might be sharing the interrupt, so get the first queue
523 * index and type, push it down the list by adding the new
524 * queue on top. Also link it with the new queue in CEQCTL.
526 reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
527 next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
528 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
529 next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
530 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
532 if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
533 reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
534 reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
535 (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
536 (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
537 (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
538 (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
539 wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
541 reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
542 reg = (qv_info->ceq_idx &
543 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
544 (I40E_QUEUE_TYPE_PE_CEQ <<
545 I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
546 wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
549 if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
550 reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
551 (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
552 (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
554 wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
560 kfree(vf->qvlist_info);
561 vf->qvlist_info = NULL;
567 * i40e_config_vsi_tx_queue
568 * @vf: pointer to the VF info
569 * @vsi_id: id of VSI as provided by the FW
570 * @vsi_queue_id: vsi relative queue index
571 * @info: config. info
575 static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
577 struct virtchnl_txq_info *info)
579 struct i40e_pf *pf = vf->pf;
580 struct i40e_hw *hw = &pf->hw;
581 struct i40e_hmc_obj_txq tx_ctx;
582 struct i40e_vsi *vsi;
587 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
591 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
592 vsi = i40e_find_vsi_from_id(pf, vsi_id);
598 /* clear the context structure first */
599 memset(&tx_ctx, 0, sizeof(struct i40e_hmc_obj_txq));
601 /* only set the required fields */
602 tx_ctx.base = info->dma_ring_addr / 128;
603 tx_ctx.qlen = info->ring_len;
604 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
605 tx_ctx.rdylist_act = 0;
606 tx_ctx.head_wb_ena = info->headwb_enabled;
607 tx_ctx.head_wb_addr = info->dma_headwb_addr;
609 /* clear the context in the HMC */
610 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
612 dev_err(&pf->pdev->dev,
613 "Failed to clear VF LAN Tx queue context %d, error: %d\n",
619 /* set the context in the HMC */
620 ret = i40e_set_lan_tx_queue_context(hw, pf_queue_id, &tx_ctx);
622 dev_err(&pf->pdev->dev,
623 "Failed to set VF LAN Tx queue context %d error: %d\n",
629 /* associate this queue with the PCI VF function */
630 qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
631 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
632 & I40E_QTX_CTL_PF_INDX_MASK);
633 qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
634 << I40E_QTX_CTL_VFVM_INDX_SHIFT)
635 & I40E_QTX_CTL_VFVM_INDX_MASK);
636 wr32(hw, I40E_QTX_CTL(pf_queue_id), qtx_ctl);
644 * i40e_config_vsi_rx_queue
645 * @vf: pointer to the VF info
646 * @vsi_id: id of VSI as provided by the FW
647 * @vsi_queue_id: vsi relative queue index
648 * @info: config. info
652 static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
654 struct virtchnl_rxq_info *info)
656 u16 pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
657 struct i40e_pf *pf = vf->pf;
658 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
659 struct i40e_hw *hw = &pf->hw;
660 struct i40e_hmc_obj_rxq rx_ctx;
663 /* clear the context structure first */
664 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
666 /* only set the required fields */
667 rx_ctx.base = info->dma_ring_addr / 128;
668 rx_ctx.qlen = info->ring_len;
670 if (info->splithdr_enabled) {
671 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
673 I40E_RX_SPLIT_TCP_UDP |
675 /* header length validation */
676 if (info->hdr_size > ((2 * 1024) - 64)) {
680 rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
682 /* set split mode 10b */
683 rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
686 /* databuffer length validation */
687 if (info->databuffer_size > ((16 * 1024) - 128)) {
691 rx_ctx.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
693 /* max pkt. length validation */
694 if (info->max_pkt_size >= (16 * 1024) || info->max_pkt_size < 64) {
698 rx_ctx.rxmax = info->max_pkt_size;
700 /* if port VLAN is configured increase the max packet size */
702 rx_ctx.rxmax += VLAN_HLEN;
704 /* enable 32bytes desc always */
708 rx_ctx.lrxqthresh = 1;
713 /* clear the context in the HMC */
714 ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
716 dev_err(&pf->pdev->dev,
717 "Failed to clear VF LAN Rx queue context %d, error: %d\n",
723 /* set the context in the HMC */
724 ret = i40e_set_lan_rx_queue_context(hw, pf_queue_id, &rx_ctx);
726 dev_err(&pf->pdev->dev,
727 "Failed to set VF LAN Rx queue context %d error: %d\n",
739 * @vf: pointer to the VF info
740 * @idx: VSI index, applies only for ADq mode, zero otherwise
742 * alloc VF vsi context & resources
744 static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
746 struct i40e_mac_filter *f = NULL;
747 struct i40e_pf *pf = vf->pf;
748 struct i40e_vsi *vsi;
752 vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
756 dev_err(&pf->pdev->dev,
757 "add vsi failed for VF %d, aq_err %d\n",
758 vf->vf_id, pf->hw.aq.asq_last_status);
760 goto error_alloc_vsi_res;
764 u64 hena = i40e_pf_get_default_rss_hena(pf);
765 u8 broadcast[ETH_ALEN];
767 vf->lan_vsi_idx = vsi->idx;
768 vf->lan_vsi_id = vsi->id;
769 /* If the port VLAN has been configured and then the
770 * VF driver was removed then the VSI port VLAN
771 * configuration was destroyed. Check if there is
772 * a port VLAN and restore the VSI configuration if
775 if (vf->port_vlan_id)
776 i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
778 spin_lock_bh(&vsi->mac_filter_hash_lock);
779 if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
780 f = i40e_add_mac_filter(vsi,
781 vf->default_lan_addr.addr);
783 dev_info(&pf->pdev->dev,
784 "Could not add MAC filter %pM for VF %d\n",
785 vf->default_lan_addr.addr, vf->vf_id);
787 eth_broadcast_addr(broadcast);
788 f = i40e_add_mac_filter(vsi, broadcast);
790 dev_info(&pf->pdev->dev,
791 "Could not allocate VF broadcast filter\n");
792 spin_unlock_bh(&vsi->mac_filter_hash_lock);
793 wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
794 wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
795 /* program mac filter only for VF VSI */
796 ret = i40e_sync_vsi_filters(vsi);
798 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
801 /* storing VSI index and id for ADq and don't apply the mac filter */
802 if (vf->adq_enabled) {
803 vf->ch[idx].vsi_idx = vsi->idx;
804 vf->ch[idx].vsi_id = vsi->id;
807 /* Set VF bandwidth if specified */
809 max_tx_rate = vf->tx_rate;
810 } else if (vf->ch[idx].max_tx_rate) {
811 max_tx_rate = vf->ch[idx].max_tx_rate;
815 max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
816 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
817 max_tx_rate, 0, NULL);
819 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
828 * i40e_map_pf_queues_to_vsi
829 * @vf: pointer to the VF info
831 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
832 * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
834 static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
836 struct i40e_pf *pf = vf->pf;
837 struct i40e_hw *hw = &pf->hw;
838 u32 reg, num_tc = 1; /* VF has at least one traffic class */
845 for (i = 0; i < num_tc; i++) {
846 if (vf->adq_enabled) {
847 qps = vf->ch[i].num_qps;
848 vsi_id = vf->ch[i].vsi_id;
850 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
851 vsi_id = vf->lan_vsi_id;
854 for (j = 0; j < 7; j++) {
859 u16 qid = i40e_vc_get_pf_queue_id(vf,
863 qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
867 i40e_write_rx_ctl(hw,
868 I40E_VSILAN_QTABLE(j, vsi_id),
875 * i40e_map_pf_to_vf_queues
876 * @vf: pointer to the VF info
878 * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
879 * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
881 static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
883 struct i40e_pf *pf = vf->pf;
884 struct i40e_hw *hw = &pf->hw;
885 u32 reg, total_qps = 0;
886 u32 qps, num_tc = 1; /* VF has at least one traffic class */
893 for (i = 0; i < num_tc; i++) {
894 if (vf->adq_enabled) {
895 qps = vf->ch[i].num_qps;
896 vsi_id = vf->ch[i].vsi_id;
898 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
899 vsi_id = vf->lan_vsi_id;
902 for (j = 0; j < qps; j++) {
903 qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
905 reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
906 wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
914 * i40e_enable_vf_mappings
915 * @vf: pointer to the VF info
919 static void i40e_enable_vf_mappings(struct i40e_vf *vf)
921 struct i40e_pf *pf = vf->pf;
922 struct i40e_hw *hw = &pf->hw;
925 /* Tell the hardware we're using noncontiguous mapping. HW requires
926 * that VF queues be mapped using this method, even when they are
927 * contiguous in real life
929 i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
930 I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
932 /* enable VF vplan_qtable mappings */
933 reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
934 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
936 i40e_map_pf_to_vf_queues(vf);
937 i40e_map_pf_queues_to_vsi(vf);
943 * i40e_disable_vf_mappings
944 * @vf: pointer to the VF info
946 * disable VF mappings
948 static void i40e_disable_vf_mappings(struct i40e_vf *vf)
950 struct i40e_pf *pf = vf->pf;
951 struct i40e_hw *hw = &pf->hw;
954 /* disable qp mappings */
955 wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), 0);
956 for (i = 0; i < I40E_MAX_VSI_QP; i++)
957 wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_id),
958 I40E_QUEUE_END_OF_LIST);
964 * @vf: pointer to the VF info
968 static void i40e_free_vf_res(struct i40e_vf *vf)
970 struct i40e_pf *pf = vf->pf;
971 struct i40e_hw *hw = &pf->hw;
975 /* Start by disabling VF's configuration API to prevent the OS from
976 * accessing the VF's VSI after it's freed / invalidated.
978 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
980 /* It's possible the VF had requeuested more queues than the default so
981 * do the accounting here when we're about to free them.
983 if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) {
984 pf->queues_left += vf->num_queue_pairs -
985 I40E_DEFAULT_QUEUES_PER_VF;
988 /* free vsi & disconnect it from the parent uplink */
989 if (vf->lan_vsi_idx) {
990 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]);
995 /* do the accounting and remove additional ADq VSI's */
996 if (vf->adq_enabled && vf->ch[0].vsi_idx) {
997 for (j = 0; j < vf->num_tc; j++) {
998 /* At this point VSI0 is already released so don't
999 * release it again and only clear their values in
1000 * structure variables
1003 i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
1004 vf->ch[j].vsi_idx = 0;
1005 vf->ch[j].vsi_id = 0;
1008 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
1010 /* disable interrupts so the VF starts in a known state */
1011 for (i = 0; i < msix_vf; i++) {
1012 /* format is same for both registers */
1014 reg_idx = I40E_VFINT_DYN_CTL0(vf->vf_id);
1016 reg_idx = I40E_VFINT_DYN_CTLN(((msix_vf - 1) *
1019 wr32(hw, reg_idx, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1023 /* clear the irq settings */
1024 for (i = 0; i < msix_vf; i++) {
1025 /* format is same for both registers */
1027 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
1029 reg_idx = I40E_VPINT_LNKLSTN(((msix_vf - 1) *
1032 reg = (I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
1033 I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
1034 wr32(hw, reg_idx, reg);
1037 /* reset some of the state variables keeping track of the resources */
1038 vf->num_queue_pairs = 0;
1039 clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states);
1040 clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
1045 * @vf: pointer to the VF info
1047 * allocate VF resources
1049 static int i40e_alloc_vf_res(struct i40e_vf *vf)
1051 struct i40e_pf *pf = vf->pf;
1052 int total_queue_pairs = 0;
1055 if (vf->num_req_queues &&
1056 vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
1057 pf->num_vf_qps = vf->num_req_queues;
1059 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
1061 /* allocate hw vsi context & associated resources */
1062 ret = i40e_alloc_vsi_res(vf, 0);
1065 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
1067 /* allocate additional VSIs based on tc information for ADq */
1068 if (vf->adq_enabled) {
1069 if (pf->queues_left >=
1070 (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
1071 /* TC 0 always belongs to VF VSI */
1072 for (idx = 1; idx < vf->num_tc; idx++) {
1073 ret = i40e_alloc_vsi_res(vf, idx);
1077 /* send correct number of queues */
1078 total_queue_pairs = I40E_MAX_VF_QUEUES;
1080 dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
1082 vf->adq_enabled = false;
1086 /* We account for each VF to get a default number of queue pairs. If
1087 * the VF has now requested more, we need to account for that to make
1088 * certain we never request more queues than we actually have left in
1091 if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF)
1093 total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF;
1096 set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1098 clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
1100 /* store the total qps number for the runtime
1103 vf->num_queue_pairs = total_queue_pairs;
1105 /* VF is now completely initialized */
1106 set_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1110 i40e_free_vf_res(vf);
1115 #define VF_DEVICE_STATUS 0xAA
1116 #define VF_TRANS_PENDING_MASK 0x20
1118 * i40e_quiesce_vf_pci
1119 * @vf: pointer to the VF structure
1121 * Wait for VF PCI transactions to be cleared after reset. Returns -EIO
1122 * if the transactions never clear.
1124 static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
1126 struct i40e_pf *pf = vf->pf;
1127 struct i40e_hw *hw = &pf->hw;
1131 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
1133 wr32(hw, I40E_PF_PCI_CIAA,
1134 VF_DEVICE_STATUS | (vf_abs_id << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
1135 for (i = 0; i < 100; i++) {
1136 reg = rd32(hw, I40E_PF_PCI_CIAD);
1137 if ((reg & VF_TRANS_PENDING_MASK) == 0)
1145 * __i40e_getnum_vf_vsi_vlan_filters
1146 * @vsi: pointer to the vsi
1148 * called to get the number of VLANs offloaded on this VF
1150 static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1152 struct i40e_mac_filter *f;
1153 u16 num_vlans = 0, bkt;
1155 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1156 if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
1164 * i40e_getnum_vf_vsi_vlan_filters
1165 * @vsi: pointer to the vsi
1167 * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
1169 static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
1173 spin_lock_bh(&vsi->mac_filter_hash_lock);
1174 num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1175 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1181 * i40e_get_vlan_list_sync
1182 * @vsi: pointer to the VSI
1183 * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
1184 * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
1185 * This array is allocated here, but has to be freed in caller.
1187 * Called to get number of VLANs and VLAN list present in mac_filter_hash.
1189 static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
1192 struct i40e_mac_filter *f;
1196 spin_lock_bh(&vsi->mac_filter_hash_lock);
1197 *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
1198 *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
1202 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
1203 if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
1205 (*vlan_list)[i++] = f->vlan;
1208 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1212 * i40e_set_vsi_promisc
1213 * @vf: pointer to the VF struct
1215 * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
1217 * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
1219 * @vl: List of VLANs - apply filter for given VLANs
1220 * @num_vlans: Number of elements in @vl
1223 i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
1224 bool unicast_enable, s16 *vl, u16 num_vlans)
1226 i40e_status aq_ret, aq_tmp = 0;
1227 struct i40e_pf *pf = vf->pf;
1228 struct i40e_hw *hw = &pf->hw;
1231 /* No VLAN to set promisc on, set on VSI */
1232 if (!num_vlans || !vl) {
1233 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
1237 int aq_err = pf->hw.aq.asq_last_status;
1239 dev_err(&pf->pdev->dev,
1240 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1242 i40e_stat_str(&pf->hw, aq_ret),
1243 i40e_aq_str(&pf->hw, aq_err));
1248 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
1253 int aq_err = pf->hw.aq.asq_last_status;
1255 dev_err(&pf->pdev->dev,
1256 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1258 i40e_stat_str(&pf->hw, aq_ret),
1259 i40e_aq_str(&pf->hw, aq_err));
1265 for (i = 0; i < num_vlans; i++) {
1266 aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
1270 int aq_err = pf->hw.aq.asq_last_status;
1272 dev_err(&pf->pdev->dev,
1273 "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
1275 i40e_stat_str(&pf->hw, aq_ret),
1276 i40e_aq_str(&pf->hw, aq_err));
1282 aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
1286 int aq_err = pf->hw.aq.asq_last_status;
1288 dev_err(&pf->pdev->dev,
1289 "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
1291 i40e_stat_str(&pf->hw, aq_ret),
1292 i40e_aq_str(&pf->hw, aq_err));
1306 * i40e_config_vf_promiscuous_mode
1307 * @vf: pointer to the VF info
1309 * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
1310 * @alluni: set MAC L2 layer unicast promiscuous enable/disable
1312 * Called from the VF to configure the promiscuous mode of
1313 * VF vsis and from the VF reset path to reset promiscuous mode.
1315 static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
1320 i40e_status aq_ret = I40E_SUCCESS;
1321 struct i40e_pf *pf = vf->pf;
1322 struct i40e_vsi *vsi;
1326 vsi = i40e_find_vsi_from_id(pf, vsi_id);
1327 if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
1328 return I40E_ERR_PARAM;
1330 if (vf->port_vlan_id) {
1331 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
1332 alluni, &vf->port_vlan_id, 1);
1334 } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
1335 i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
1338 return I40E_ERR_NO_MEMORY;
1340 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1346 /* no VLANs to set on, set on VSI */
1347 aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
1353 * i40e_sync_vfr_reset
1354 * @hw: pointer to hw struct
1355 * @vf_id: VF identifier
1357 * Before trigger hardware reset, we need to know if no other process has
1358 * reserved the hardware for any reset operations. This check is done by
1359 * examining the status of the RSTAT1 register used to signal the reset.
1361 static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
1366 for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
1367 reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
1368 I40E_VFINT_ICR0_ADMINQ_MASK;
1372 usleep_range(100, 200);
1379 * i40e_trigger_vf_reset
1380 * @vf: pointer to the VF structure
1381 * @flr: VFLR was issued or not
1383 * Trigger hardware to start a reset for a particular VF. Expects the caller
1384 * to wait the proper amount of time to allow hardware to reset the VF before
1385 * it cleans up and restores VF functionality.
1387 static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
1389 struct i40e_pf *pf = vf->pf;
1390 struct i40e_hw *hw = &pf->hw;
1391 u32 reg, reg_idx, bit_idx;
1396 vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1398 /* Disable VF's configuration API during reset. The flag is re-enabled
1399 * in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
1400 * It's normally disabled in i40e_free_vf_res(), but it's safer
1401 * to do it earlier to give some time to finish to any VF config
1402 * functions that may still be running at this point.
1404 clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
1406 /* In the case of a VFLR, the HW has already reset the VF and we
1407 * just need to clean up, so don't hit the VFRTRIG register.
1410 /* Sync VFR reset before trigger next one */
1411 radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
1412 I40E_VFINT_ICR0_ADMINQ_MASK;
1413 if (vf_active && !radq)
1414 /* waiting for finish reset by virtual driver */
1415 if (i40e_sync_vfr_reset(hw, vf->vf_id))
1416 dev_info(&pf->pdev->dev,
1417 "Reset VF %d never finished\n",
1420 /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
1421 * in progress state in rstat1 register.
1423 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1424 reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1425 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1428 /* clear the VFLR bit in GLGEN_VFLRSTAT */
1429 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1430 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1431 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1434 if (i40e_quiesce_vf_pci(vf))
1435 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
1440 * i40e_cleanup_reset_vf
1441 * @vf: pointer to the VF structure
1443 * Cleanup a VF after the hardware reset is finished. Expects the caller to
1444 * have verified whether the reset is finished properly, and ensure the
1445 * minimum amount of wait time has passed.
1447 static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
1449 struct i40e_pf *pf = vf->pf;
1450 struct i40e_hw *hw = &pf->hw;
1453 /* disable promisc modes in case they were enabled */
1454 i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id, false, false);
1456 /* free VF resources to begin resetting the VSI state */
1457 i40e_free_vf_res(vf);
1459 /* Enable hardware by clearing the reset bit in the VPGEN_VFRTRIG reg.
1460 * By doing this we allow HW to access VF memory at any point. If we
1461 * did it any sooner, HW could access memory while it was being freed
1462 * in i40e_free_vf_res(), causing an IOMMU fault.
1464 * On the other hand, this needs to be done ASAP, because the VF driver
1465 * is waiting for this to happen and may report a timeout. It's
1466 * harmless, but it gets logged into Guest OS kernel log, so best avoid
1469 reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
1470 reg &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
1471 wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
1473 /* reallocate VF resources to finish resetting the VSI state */
1474 if (!i40e_alloc_vf_res(vf)) {
1475 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1476 i40e_enable_vf_mappings(vf);
1477 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
1478 clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
1479 /* Do not notify the client during VF init */
1480 if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE,
1482 i40e_notify_client_of_vf_reset(pf, abs_vf_id);
1486 /* Tell the VF driver the reset is done. This needs to be done only
1487 * after VF has been fully initialized, because the VF driver may
1488 * request resources immediately after setting this flag.
1490 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1495 * @vf: pointer to the VF structure
1496 * @flr: VFLR was issued or not
1498 * Returns true if the VF is in reset, resets successfully, or resets
1499 * are disabled and false otherwise.
1501 bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
1503 struct i40e_pf *pf = vf->pf;
1504 struct i40e_hw *hw = &pf->hw;
1509 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
1512 /* Bail out if VFs are disabled. */
1513 if (test_bit(__I40E_VF_DISABLE, pf->state))
1516 /* If VF is being reset already we don't need to continue. */
1517 if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1520 i40e_trigger_vf_reset(vf, flr);
1522 /* poll VPGEN_VFRSTAT reg to make sure
1523 * that reset is complete
1525 for (i = 0; i < 10; i++) {
1526 /* VF reset requires driver to first reset the VF and then
1527 * poll the status register to make sure that the reset
1528 * completed successfully. Due to internal HW FIFO flushes,
1529 * we must wait 10ms before the register will be valid.
1531 usleep_range(10000, 20000);
1532 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1533 if (reg & I40E_VPGEN_VFRSTAT_VFRD_MASK) {
1540 usleep_range(10000, 20000);
1543 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1545 usleep_range(10000, 20000);
1547 /* On initial reset, we don't have any queues to disable */
1548 if (vf->lan_vsi_idx != 0)
1549 i40e_vsi_stop_rings(pf->vsi[vf->lan_vsi_idx]);
1551 i40e_cleanup_reset_vf(vf);
1554 usleep_range(20000, 40000);
1555 clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
1561 * i40e_reset_all_vfs
1562 * @pf: pointer to the PF structure
1563 * @flr: VFLR was issued or not
1565 * Reset all allocated VFs in one go. First, tell the hardware to reset each
1566 * VF, then do all the waiting in one chunk, and finally finish restoring each
1567 * VF after the wait. This is useful during PF routines which need to reset
1568 * all VFs, as otherwise it must perform these resets in a serialized fashion.
1570 * Returns true if any VFs were reset, and false otherwise.
1572 bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
1574 struct i40e_hw *hw = &pf->hw;
1579 /* If we don't have any VFs, then there is nothing to reset */
1580 if (!pf->num_alloc_vfs)
1583 /* If VFs have been disabled, there is no need to reset */
1584 if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1587 /* Begin reset on all VFs at once */
1588 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1589 /* If VF is being reset no need to trigger reset again */
1590 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1591 i40e_trigger_vf_reset(vf, flr);
1594 /* HW requires some time to make sure it can flush the FIFO for a VF
1595 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1596 * sequence to make sure that it has completed. We'll keep track of
1597 * the VFs using a simple iterator that increments once that VF has
1598 * finished resetting.
1600 for (i = 0, vf = &pf->vf[0]; i < 10 && vf < &pf->vf[pf->num_alloc_vfs]; ++i) {
1601 usleep_range(10000, 20000);
1603 /* Check each VF in sequence, beginning with the VF to fail
1604 * the previous check.
1606 while (vf < &pf->vf[pf->num_alloc_vfs]) {
1607 if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) {
1608 reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id));
1609 if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK))
1613 /* If the current VF has finished resetting, move on
1614 * to the next VF in sequence.
1621 usleep_range(10000, 20000);
1623 /* Display a warning if at least one VF didn't manage to reset in
1624 * time, but continue on with the operation.
1626 if (vf < &pf->vf[pf->num_alloc_vfs])
1627 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n",
1629 usleep_range(10000, 20000);
1631 /* Begin disabling all the rings associated with VFs, but do not wait
1634 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1635 /* On initial reset, we don't have any queues to disable */
1636 if (vf->lan_vsi_idx == 0)
1639 /* If VF is reset in another thread just continue */
1640 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1643 i40e_vsi_stop_rings_no_wait(pf->vsi[vf->lan_vsi_idx]);
1646 /* Now that we've notified HW to disable all of the VF rings, wait
1647 * until they finish.
1649 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1650 /* On initial reset, we don't have any queues to disable */
1651 if (vf->lan_vsi_idx == 0)
1654 /* If VF is reset in another thread just continue */
1655 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1658 i40e_vsi_wait_queues_disabled(pf->vsi[vf->lan_vsi_idx]);
1661 /* Hw may need up to 50ms to finish disabling the RX queues. We
1662 * minimize the wait by delaying only once for all VFs.
1666 /* Finish the reset on each VF */
1667 for (vf = &pf->vf[0]; vf < &pf->vf[pf->num_alloc_vfs]; ++vf) {
1668 /* If VF is reset in another thread just continue */
1669 if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
1672 i40e_cleanup_reset_vf(vf);
1676 usleep_range(20000, 40000);
1677 clear_bit(__I40E_VF_DISABLE, pf->state);
1684 * @pf: pointer to the PF structure
1688 void i40e_free_vfs(struct i40e_pf *pf)
1690 struct i40e_hw *hw = &pf->hw;
1691 u32 reg_idx, bit_idx;
1697 set_bit(__I40E_VFS_RELEASING, pf->state);
1698 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1699 usleep_range(1000, 2000);
1701 i40e_notify_client_of_vf_enable(pf, 0);
1703 /* Disable IOV before freeing resources. This lets any VF drivers
1704 * running in the host get themselves cleaned up before we yank
1705 * the carpet out from underneath their feet.
1707 if (!pci_vfs_assigned(pf->pdev))
1708 pci_disable_sriov(pf->pdev);
1710 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
1712 /* Amortize wait time by stopping all VFs at the same time */
1713 for (i = 0; i < pf->num_alloc_vfs; i++) {
1714 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1717 i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[i].lan_vsi_idx]);
1720 for (i = 0; i < pf->num_alloc_vfs; i++) {
1721 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1724 i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
1727 /* free up VF resources */
1728 tmp = pf->num_alloc_vfs;
1729 pf->num_alloc_vfs = 0;
1730 for (i = 0; i < tmp; i++) {
1731 if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
1732 i40e_free_vf_res(&pf->vf[i]);
1733 /* disable qp mappings */
1734 i40e_disable_vf_mappings(&pf->vf[i]);
1740 /* This check is for when the driver is unloaded while VFs are
1741 * assigned. Setting the number of VFs to 0 through sysfs is caught
1742 * before this function ever gets called.
1744 if (!pci_vfs_assigned(pf->pdev)) {
1745 /* Acknowledge VFLR for all VFS. Without this, VFs will fail to
1746 * work correctly when SR-IOV gets re-enabled.
1748 for (vf_id = 0; vf_id < tmp; vf_id++) {
1749 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1750 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1751 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
1754 clear_bit(__I40E_VF_DISABLE, pf->state);
1755 clear_bit(__I40E_VFS_RELEASING, pf->state);
1758 #ifdef CONFIG_PCI_IOV
1761 * @pf: pointer to the PF structure
1762 * @num_alloc_vfs: number of VFs to allocate
1764 * allocate VF resources
1766 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
1768 struct i40e_vf *vfs;
1771 /* Disable interrupt 0 so we don't try to handle the VFLR. */
1772 i40e_irq_dynamic_disable_icr0(pf);
1774 /* Check to see if we're just allocating resources for extant VFs */
1775 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
1776 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1778 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1779 pf->num_alloc_vfs = 0;
1783 /* allocate memory */
1784 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
1791 /* apply default profile */
1792 for (i = 0; i < num_alloc_vfs; i++) {
1794 vfs[i].parent_type = I40E_SWITCH_ELEMENT_TYPE_VEB;
1797 /* assign default capabilities */
1798 set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1799 vfs[i].spoofchk = true;
1801 set_bit(I40E_VF_STATE_PRE_ENABLE, &vfs[i].vf_states);
1804 pf->num_alloc_vfs = num_alloc_vfs;
1806 /* VF resources get allocated during reset */
1807 i40e_reset_all_vfs(pf, false);
1809 i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
1815 /* Re-enable interrupt 0. */
1816 i40e_irq_dynamic_enable_icr0(pf);
1822 * i40e_pci_sriov_enable
1823 * @pdev: pointer to a pci_dev structure
1824 * @num_vfs: number of VFs to allocate
1826 * Enable or change the number of VFs
1828 static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
1830 #ifdef CONFIG_PCI_IOV
1831 struct i40e_pf *pf = pci_get_drvdata(pdev);
1832 int pre_existing_vfs = pci_num_vf(pdev);
1835 if (test_bit(__I40E_TESTING, pf->state)) {
1836 dev_warn(&pdev->dev,
1837 "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
1842 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1844 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1847 if (num_vfs > pf->num_req_vfs) {
1848 dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
1849 num_vfs, pf->num_req_vfs);
1854 dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
1855 err = i40e_alloc_vfs(pf, num_vfs);
1857 dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
1871 * i40e_pci_sriov_configure
1872 * @pdev: pointer to a pci_dev structure
1873 * @num_vfs: number of VFs to allocate
1875 * Enable or change the number of VFs. Called when the user updates the number
1878 int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1880 struct i40e_pf *pf = pci_get_drvdata(pdev);
1883 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
1884 dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
1889 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
1890 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
1891 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1893 ret = i40e_pci_sriov_enable(pdev, num_vfs);
1894 goto sriov_configure_out;
1897 if (!pci_vfs_assigned(pf->pdev)) {
1899 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
1900 i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
1902 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
1904 goto sriov_configure_out;
1906 sriov_configure_out:
1907 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
1911 /***********************virtual channel routines******************/
1914 * i40e_vc_send_msg_to_vf
1915 * @vf: pointer to the VF info
1916 * @v_opcode: virtual channel opcode
1917 * @v_retval: virtual channel return value
1918 * @msg: pointer to the msg buffer
1919 * @msglen: msg length
1923 static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1924 u32 v_retval, u8 *msg, u16 msglen)
1931 /* validate the request */
1932 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1937 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1939 aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1942 dev_info(&pf->pdev->dev,
1943 "Unable to send the message to VF %d aq_err %d\n",
1944 vf->vf_id, pf->hw.aq.asq_last_status);
1952 * i40e_vc_send_resp_to_vf
1953 * @vf: pointer to the VF info
1954 * @opcode: operation code
1955 * @retval: return value
1957 * send resp msg to VF
1959 static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
1960 enum virtchnl_ops opcode,
1963 return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
1967 * i40e_sync_vf_state
1968 * @vf: pointer to the VF info
1971 * Called from a VF message to synchronize the service with a potential
1974 static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
1978 /* When handling some messages, it needs VF state to be set.
1979 * It is possible that this flag is cleared during VF reset,
1980 * so there is a need to wait until the end of the reset to
1981 * handle the request message correctly.
1983 for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
1984 if (test_bit(state, &vf->vf_states))
1986 usleep_range(10000, 20000);
1989 return test_bit(state, &vf->vf_states);
1993 * i40e_vc_get_version_msg
1994 * @vf: pointer to the VF info
1995 * @msg: pointer to the msg buffer
1997 * called from the VF to request the API version used by the PF
1999 static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
2001 struct virtchnl_version_info info = {
2002 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2005 vf->vf_ver = *(struct virtchnl_version_info *)msg;
2006 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2007 if (VF_IS_V10(&vf->vf_ver))
2008 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2009 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2010 I40E_SUCCESS, (u8 *)&info,
2011 sizeof(struct virtchnl_version_info));
2015 * i40e_del_qch - delete all the additional VSIs created as a part of ADq
2016 * @vf: pointer to VF structure
2018 static void i40e_del_qch(struct i40e_vf *vf)
2020 struct i40e_pf *pf = vf->pf;
2023 /* first element in the array belongs to primary VF VSI and we shouldn't
2024 * delete it. We should however delete the rest of the VSIs created
2026 for (i = 1; i < vf->num_tc; i++) {
2027 if (vf->ch[i].vsi_idx) {
2028 i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
2029 vf->ch[i].vsi_idx = 0;
2030 vf->ch[i].vsi_id = 0;
2036 * i40e_vc_get_max_frame_size
2037 * @vf: pointer to the VF
2039 * Max frame size is determined based on the current port's max frame size and
2040 * whether a port VLAN is configured on this VF. The VF is not aware whether
2041 * it's in a port VLAN so the PF needs to account for this in max frame size
2042 * checks and sending the max frame size to the VF.
2044 static u16 i40e_vc_get_max_frame_size(struct i40e_vf *vf)
2046 u16 max_frame_size = vf->pf->hw.phy.link_info.max_frame_size;
2048 if (vf->port_vlan_id)
2049 max_frame_size -= VLAN_HLEN;
2051 return max_frame_size;
2055 * i40e_vc_get_vf_resources_msg
2056 * @vf: pointer to the VF info
2057 * @msg: pointer to the msg buffer
2059 * called from the VF to request its resources
2061 static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
2063 struct virtchnl_vf_resource *vfres = NULL;
2064 struct i40e_pf *pf = vf->pf;
2065 i40e_status aq_ret = 0;
2066 struct i40e_vsi *vsi;
2071 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
2072 aq_ret = I40E_ERR_PARAM;
2076 len = struct_size(vfres, vsi_res, num_vsis);
2077 vfres = kzalloc(len, GFP_KERNEL);
2079 aq_ret = I40E_ERR_NO_MEMORY;
2083 if (VF_IS_V11(&vf->vf_ver))
2084 vf->driver_caps = *(u32 *)msg;
2086 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2087 VIRTCHNL_VF_OFFLOAD_RSS_REG |
2088 VIRTCHNL_VF_OFFLOAD_VLAN;
2090 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2091 vsi = pf->vsi[vf->lan_vsi_idx];
2092 if (!vsi->info.pvid)
2093 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2095 if (i40e_vf_client_capable(pf, vf->vf_id) &&
2096 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
2097 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
2098 set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2100 clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
2103 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2104 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2106 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
2107 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ))
2108 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2110 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2113 if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
2114 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2115 vfres->vf_cap_flags |=
2116 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2119 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2120 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2122 if ((pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE) &&
2123 (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
2124 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2126 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
2127 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
2128 dev_err(&pf->pdev->dev,
2129 "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
2131 aq_ret = I40E_ERR_PARAM;
2134 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2137 if (pf->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) {
2138 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2139 vfres->vf_cap_flags |=
2140 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2143 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2144 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2146 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
2147 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
2149 vfres->num_vsis = num_vsis;
2150 vfres->num_queue_pairs = vf->num_queue_pairs;
2151 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
2152 vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
2153 vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
2154 vfres->max_mtu = i40e_vc_get_max_frame_size(vf);
2156 if (vf->lan_vsi_idx) {
2157 vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
2158 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2159 vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
2160 /* VFs only use TC 0 */
2161 vfres->vsi_res[0].qset_handle
2162 = le16_to_cpu(vsi->info.qs_handle[0]);
2163 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2164 vf->default_lan_addr.addr);
2166 set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
2169 /* send the response back to the VF */
2170 ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES,
2171 aq_ret, (u8 *)vfres, len);
2178 * i40e_vc_config_promiscuous_mode_msg
2179 * @vf: pointer to the VF info
2180 * @msg: pointer to the msg buffer
2182 * called from the VF to configure the promiscuous mode of
2185 static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
2187 struct virtchnl_promisc_info *info =
2188 (struct virtchnl_promisc_info *)msg;
2189 struct i40e_pf *pf = vf->pf;
2190 i40e_status aq_ret = 0;
2191 bool allmulti = false;
2192 bool alluni = false;
2194 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2195 aq_ret = I40E_ERR_PARAM;
2198 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2199 dev_err(&pf->pdev->dev,
2200 "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2203 /* Lie to the VF on purpose, because this is an error we can
2204 * ignore. Unprivileged VF is not a virtual channel error.
2210 if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
2211 aq_ret = I40E_ERR_PARAM;
2215 if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2216 aq_ret = I40E_ERR_PARAM;
2220 /* Multicast promiscuous handling*/
2221 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2224 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2226 aq_ret = i40e_config_vf_promiscuous_mode(vf, info->vsi_id, allmulti,
2232 if (!test_and_set_bit(I40E_VF_STATE_MC_PROMISC,
2234 dev_info(&pf->pdev->dev,
2235 "VF %d successfully set multicast promiscuous mode\n",
2237 } else if (test_and_clear_bit(I40E_VF_STATE_MC_PROMISC,
2239 dev_info(&pf->pdev->dev,
2240 "VF %d successfully unset multicast promiscuous mode\n",
2244 if (!test_and_set_bit(I40E_VF_STATE_UC_PROMISC,
2246 dev_info(&pf->pdev->dev,
2247 "VF %d successfully set unicast promiscuous mode\n",
2249 } else if (test_and_clear_bit(I40E_VF_STATE_UC_PROMISC,
2251 dev_info(&pf->pdev->dev,
2252 "VF %d successfully unset unicast promiscuous mode\n",
2256 /* send the response to the VF */
2257 return i40e_vc_send_resp_to_vf(vf,
2258 VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2263 * i40e_vc_config_queues_msg
2264 * @vf: pointer to the VF info
2265 * @msg: pointer to the msg buffer
2267 * called from the VF to configure the rx/tx
2270 static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
2272 struct virtchnl_vsi_queue_config_info *qci =
2273 (struct virtchnl_vsi_queue_config_info *)msg;
2274 struct virtchnl_queue_pair_info *qpi;
2275 u16 vsi_id, vsi_queue_id = 0;
2276 struct i40e_pf *pf = vf->pf;
2277 i40e_status aq_ret = 0;
2278 int i, j = 0, idx = 0;
2279 struct i40e_vsi *vsi;
2280 u16 num_qps_all = 0;
2282 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2283 aq_ret = I40E_ERR_PARAM;
2287 if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2288 aq_ret = I40E_ERR_PARAM;
2292 if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
2293 aq_ret = I40E_ERR_PARAM;
2297 if (vf->adq_enabled) {
2298 for (i = 0; i < vf->num_tc; i++)
2299 num_qps_all += vf->ch[i].num_qps;
2300 if (num_qps_all != qci->num_queue_pairs) {
2301 aq_ret = I40E_ERR_PARAM;
2306 vsi_id = qci->vsi_id;
2308 for (i = 0; i < qci->num_queue_pairs; i++) {
2309 qpi = &qci->qpair[i];
2311 if (!vf->adq_enabled) {
2312 if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
2313 qpi->txq.queue_id)) {
2314 aq_ret = I40E_ERR_PARAM;
2318 vsi_queue_id = qpi->txq.queue_id;
2320 if (qpi->txq.vsi_id != qci->vsi_id ||
2321 qpi->rxq.vsi_id != qci->vsi_id ||
2322 qpi->rxq.queue_id != vsi_queue_id) {
2323 aq_ret = I40E_ERR_PARAM;
2328 if (vf->adq_enabled) {
2329 if (idx >= ARRAY_SIZE(vf->ch)) {
2330 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2333 vsi_id = vf->ch[idx].vsi_id;
2336 if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
2338 i40e_config_vsi_tx_queue(vf, vsi_id, vsi_queue_id,
2340 aq_ret = I40E_ERR_PARAM;
2344 /* For ADq there can be up to 4 VSIs with max 4 queues each.
2345 * VF does not know about these additional VSIs and all
2346 * it cares is about its own queues. PF configures these queues
2347 * to its appropriate VSIs based on TC mapping
2349 if (vf->adq_enabled) {
2350 if (idx >= ARRAY_SIZE(vf->ch)) {
2351 aq_ret = I40E_ERR_NO_AVAILABLE_VSI;
2354 if (j == (vf->ch[idx].num_qps - 1)) {
2356 j = 0; /* resetting the queue count */
2364 /* set vsi num_queue_pairs in use to num configured by VF */
2365 if (!vf->adq_enabled) {
2366 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
2367 qci->num_queue_pairs;
2369 for (i = 0; i < vf->num_tc; i++) {
2370 vsi = pf->vsi[vf->ch[i].vsi_idx];
2371 vsi->num_queue_pairs = vf->ch[i].num_qps;
2373 if (i40e_update_adq_vsi_queues(vsi, i)) {
2374 aq_ret = I40E_ERR_CONFIG;
2381 /* send the response to the VF */
2382 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
2387 * i40e_validate_queue_map - check queue map is valid
2388 * @vf: the VF structure pointer
2390 * @queuemap: Tx or Rx queue map
2392 * check if Tx or Rx queue map is valid
2394 static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
2395 unsigned long queuemap)
2397 u16 vsi_queue_id, queue_id;
2399 for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
2400 if (vf->adq_enabled) {
2401 vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
2402 queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
2404 queue_id = vsi_queue_id;
2407 if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
2415 * i40e_vc_config_irq_map_msg
2416 * @vf: pointer to the VF info
2417 * @msg: pointer to the msg buffer
2419 * called from the VF to configure the irq to
2422 static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
2424 struct virtchnl_irq_map_info *irqmap_info =
2425 (struct virtchnl_irq_map_info *)msg;
2426 struct virtchnl_vector_map *map;
2428 i40e_status aq_ret = 0;
2431 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2432 aq_ret = I40E_ERR_PARAM;
2436 if (irqmap_info->num_vectors >
2437 vf->pf->hw.func_caps.num_msix_vectors_vf) {
2438 aq_ret = I40E_ERR_PARAM;
2442 for (i = 0; i < irqmap_info->num_vectors; i++) {
2443 map = &irqmap_info->vecmap[i];
2444 /* validate msg params */
2445 if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
2446 !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
2447 aq_ret = I40E_ERR_PARAM;
2450 vsi_id = map->vsi_id;
2452 if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
2453 aq_ret = I40E_ERR_PARAM;
2457 if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
2458 aq_ret = I40E_ERR_PARAM;
2462 i40e_config_irq_link_list(vf, vsi_id, map);
2465 /* send the response to the VF */
2466 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
2471 * i40e_ctrl_vf_tx_rings
2472 * @vsi: the SRIOV VSI being configured
2473 * @q_map: bit map of the queues to be enabled
2474 * @enable: start or stop the queue
2476 static int i40e_ctrl_vf_tx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2479 struct i40e_pf *pf = vsi->back;
2483 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2484 ret = i40e_control_wait_tx_q(vsi->seid, pf,
2485 vsi->base_queue + q_id,
2486 false /*is xdp*/, enable);
2494 * i40e_ctrl_vf_rx_rings
2495 * @vsi: the SRIOV VSI being configured
2496 * @q_map: bit map of the queues to be enabled
2497 * @enable: start or stop the queue
2499 static int i40e_ctrl_vf_rx_rings(struct i40e_vsi *vsi, unsigned long q_map,
2502 struct i40e_pf *pf = vsi->back;
2506 for_each_set_bit(q_id, &q_map, I40E_MAX_VF_QUEUES) {
2507 ret = i40e_control_wait_rx_q(pf, vsi->base_queue + q_id,
2516 * i40e_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTHCHNL
2517 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2519 * Returns true if validation was successful, else false.
2521 static bool i40e_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2523 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2524 vqs->rx_queues >= BIT(I40E_MAX_VF_QUEUES) ||
2525 vqs->tx_queues >= BIT(I40E_MAX_VF_QUEUES))
2532 * i40e_vc_enable_queues_msg
2533 * @vf: pointer to the VF info
2534 * @msg: pointer to the msg buffer
2536 * called from the VF to enable all or specific queue(s)
2538 static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2540 struct virtchnl_queue_select *vqs =
2541 (struct virtchnl_queue_select *)msg;
2542 struct i40e_pf *pf = vf->pf;
2543 i40e_status aq_ret = 0;
2546 if (vf->is_disabled_from_host) {
2548 dev_info(&pf->pdev->dev,
2549 "Admin has disabled VF %d, will not enable queues\n",
2554 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
2555 aq_ret = I40E_ERR_PARAM;
2559 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2560 aq_ret = I40E_ERR_PARAM;
2564 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2565 aq_ret = I40E_ERR_PARAM;
2569 /* Use the queue bit map sent by the VF */
2570 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2572 aq_ret = I40E_ERR_TIMEOUT;
2575 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2577 aq_ret = I40E_ERR_TIMEOUT;
2581 /* need to start the rings for additional ADq VSI's as well */
2582 if (vf->adq_enabled) {
2583 /* zero belongs to LAN VSI */
2584 for (i = 1; i < vf->num_tc; i++) {
2585 if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
2586 aq_ret = I40E_ERR_TIMEOUT;
2591 /* send the response to the VF */
2592 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
2597 * i40e_vc_disable_queues_msg
2598 * @vf: pointer to the VF info
2599 * @msg: pointer to the msg buffer
2601 * called from the VF to disable all or specific
2604 static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2606 struct virtchnl_queue_select *vqs =
2607 (struct virtchnl_queue_select *)msg;
2608 struct i40e_pf *pf = vf->pf;
2609 i40e_status aq_ret = 0;
2611 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2612 aq_ret = I40E_ERR_PARAM;
2616 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2617 aq_ret = I40E_ERR_PARAM;
2621 if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2622 aq_ret = I40E_ERR_PARAM;
2626 /* Use the queue bit map sent by the VF */
2627 if (i40e_ctrl_vf_tx_rings(pf->vsi[vf->lan_vsi_idx], vqs->tx_queues,
2629 aq_ret = I40E_ERR_TIMEOUT;
2632 if (i40e_ctrl_vf_rx_rings(pf->vsi[vf->lan_vsi_idx], vqs->rx_queues,
2634 aq_ret = I40E_ERR_TIMEOUT;
2638 /* send the response to the VF */
2639 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES,
2644 * i40e_check_enough_queue - find big enough queue number
2645 * @vf: pointer to the VF info
2646 * @needed: the number of items needed
2648 * Returns the base item index of the queue, or negative for error
2650 static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
2652 unsigned int i, cur_queues, more, pool_size;
2653 struct i40e_lump_tracking *pile;
2654 struct i40e_pf *pf = vf->pf;
2655 struct i40e_vsi *vsi;
2657 vsi = pf->vsi[vf->lan_vsi_idx];
2658 cur_queues = vsi->alloc_queue_pairs;
2660 /* if current allocated queues are enough for need */
2661 if (cur_queues >= needed)
2662 return vsi->base_queue;
2665 if (cur_queues > 0) {
2666 /* if the allocated queues are not zero
2667 * just check if there are enough queues for more
2668 * behind the allocated queues.
2670 more = needed - cur_queues;
2671 for (i = vsi->base_queue + cur_queues;
2672 i < pile->num_entries; i++) {
2673 if (pile->list[i] & I40E_PILE_VALID_BIT)
2677 /* there is enough */
2678 return vsi->base_queue;
2683 for (i = 0; i < pile->num_entries; i++) {
2684 if (pile->list[i] & I40E_PILE_VALID_BIT) {
2688 if (needed <= ++pool_size)
2689 /* there is enough */
2697 * i40e_vc_request_queues_msg
2698 * @vf: pointer to the VF info
2699 * @msg: pointer to the msg buffer
2701 * VFs get a default number of queues but can use this message to request a
2702 * different number. If the request is successful, PF will reset the VF and
2703 * return 0. If unsuccessful, PF will send message informing VF of number of
2704 * available queues and return result of sending VF a message.
2706 static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
2708 struct virtchnl_vf_res_request *vfres =
2709 (struct virtchnl_vf_res_request *)msg;
2710 u16 req_pairs = vfres->num_queue_pairs;
2711 u8 cur_pairs = vf->num_queue_pairs;
2712 struct i40e_pf *pf = vf->pf;
2714 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
2717 if (req_pairs > I40E_MAX_VF_QUEUES) {
2718 dev_err(&pf->pdev->dev,
2719 "VF %d tried to request more than %d queues.\n",
2721 I40E_MAX_VF_QUEUES);
2722 vfres->num_queue_pairs = I40E_MAX_VF_QUEUES;
2723 } else if (req_pairs - cur_pairs > pf->queues_left) {
2724 dev_warn(&pf->pdev->dev,
2725 "VF %d requested %d more queues, but only %d left.\n",
2727 req_pairs - cur_pairs,
2729 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
2730 } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
2731 dev_warn(&pf->pdev->dev,
2732 "VF %d requested %d more queues, but there is not enough for it.\n",
2734 req_pairs - cur_pairs);
2735 vfres->num_queue_pairs = cur_pairs;
2737 /* successful request */
2738 vf->num_req_queues = req_pairs;
2739 i40e_vc_reset_vf(vf, true);
2743 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2744 (u8 *)vfres, sizeof(*vfres));
2748 * i40e_vc_get_stats_msg
2749 * @vf: pointer to the VF info
2750 * @msg: pointer to the msg buffer
2752 * called from the VF to get vsi stats
2754 static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
2756 struct virtchnl_queue_select *vqs =
2757 (struct virtchnl_queue_select *)msg;
2758 struct i40e_pf *pf = vf->pf;
2759 struct i40e_eth_stats stats;
2760 i40e_status aq_ret = 0;
2761 struct i40e_vsi *vsi;
2763 memset(&stats, 0, sizeof(struct i40e_eth_stats));
2765 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
2766 aq_ret = I40E_ERR_PARAM;
2770 if (!i40e_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2771 aq_ret = I40E_ERR_PARAM;
2775 vsi = pf->vsi[vf->lan_vsi_idx];
2777 aq_ret = I40E_ERR_PARAM;
2780 i40e_update_eth_stats(vsi);
2781 stats = vsi->eth_stats;
2784 /* send the response back to the VF */
2785 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
2786 (u8 *)&stats, sizeof(stats));
2789 /* If the VF is not trusted restrict the number of MAC/VLAN it can program
2790 * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
2792 #define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
2793 #define I40E_VC_MAX_VLAN_PER_VF 16
2796 * i40e_check_vf_permission
2797 * @vf: pointer to the VF info
2798 * @al: MAC address list from virtchnl
2800 * Check that the given list of MAC addresses is allowed. Will return -EPERM
2801 * if any address in the list is not valid. Checks the following conditions:
2803 * 1) broadcast and zero addresses are never valid
2804 * 2) unicast addresses are not allowed if the VMM has administratively set
2805 * the VF MAC address, unless the VF is marked as privileged.
2806 * 3) There is enough space to add all the addresses.
2808 * Note that to guarantee consistency, it is expected this function be called
2809 * while holding the mac_filter_hash_lock, as otherwise the current number of
2810 * addresses might not be accurate.
2812 static inline int i40e_check_vf_permission(struct i40e_vf *vf,
2813 struct virtchnl_ether_addr_list *al)
2815 struct i40e_pf *pf = vf->pf;
2816 struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
2817 int mac2add_cnt = 0;
2820 for (i = 0; i < al->num_elements; i++) {
2821 struct i40e_mac_filter *f;
2822 u8 *addr = al->list[i].addr;
2824 if (is_broadcast_ether_addr(addr) ||
2825 is_zero_ether_addr(addr)) {
2826 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n",
2828 return I40E_ERR_INVALID_MAC_ADDR;
2831 /* If the host VMM administrator has set the VF MAC address
2832 * administratively via the ndo_set_vf_mac command then deny
2833 * permission to the VF to add or delete unicast MAC addresses.
2834 * Unless the VF is privileged and then it can do whatever.
2835 * The VF may request to set the MAC address filter already
2836 * assigned to it so do not return an error in that case.
2838 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2839 !is_multicast_ether_addr(addr) && vf->pf_set_mac &&
2840 !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
2841 dev_err(&pf->pdev->dev,
2842 "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2846 /*count filters that really will be added*/
2847 f = i40e_find_mac(vsi, addr);
2852 /* If this VF is not privileged, then we can't add more than a limited
2853 * number of addresses. Check to make sure that the additions do not
2854 * push us over the limit.
2856 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
2857 (i40e_count_filters(vsi) + mac2add_cnt) >
2858 I40E_VC_MAX_MAC_ADDR_PER_VF) {
2859 dev_err(&pf->pdev->dev,
2860 "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
2867 * i40e_vc_add_mac_addr_msg
2868 * @vf: pointer to the VF info
2869 * @msg: pointer to the msg buffer
2871 * add guest mac address filter
2873 static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2875 struct virtchnl_ether_addr_list *al =
2876 (struct virtchnl_ether_addr_list *)msg;
2877 struct i40e_pf *pf = vf->pf;
2878 struct i40e_vsi *vsi = NULL;
2879 i40e_status ret = 0;
2882 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
2883 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2884 ret = I40E_ERR_PARAM;
2888 vsi = pf->vsi[vf->lan_vsi_idx];
2890 /* Lock once, because all function inside for loop accesses VSI's
2891 * MAC filter list which needs to be protected using same lock.
2893 spin_lock_bh(&vsi->mac_filter_hash_lock);
2895 ret = i40e_check_vf_permission(vf, al);
2897 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2901 /* add new addresses to the list */
2902 for (i = 0; i < al->num_elements; i++) {
2903 struct i40e_mac_filter *f;
2905 f = i40e_find_mac(vsi, al->list[i].addr);
2907 f = i40e_add_mac_filter(vsi, al->list[i].addr);
2910 dev_err(&pf->pdev->dev,
2911 "Unable to add MAC filter %pM for VF %d\n",
2912 al->list[i].addr, vf->vf_id);
2913 ret = I40E_ERR_PARAM;
2914 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2917 if (is_valid_ether_addr(al->list[i].addr) &&
2918 is_zero_ether_addr(vf->default_lan_addr.addr))
2919 ether_addr_copy(vf->default_lan_addr.addr,
2923 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2925 /* program the updated filter list */
2926 ret = i40e_sync_vsi_filters(vsi);
2928 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2932 /* send the response to the VF */
2933 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
2938 * i40e_vc_del_mac_addr_msg
2939 * @vf: pointer to the VF info
2940 * @msg: pointer to the msg buffer
2942 * remove guest mac address filter
2944 static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
2946 struct virtchnl_ether_addr_list *al =
2947 (struct virtchnl_ether_addr_list *)msg;
2948 bool was_unimac_deleted = false;
2949 struct i40e_pf *pf = vf->pf;
2950 struct i40e_vsi *vsi = NULL;
2951 i40e_status ret = 0;
2954 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
2955 !i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2956 ret = I40E_ERR_PARAM;
2960 for (i = 0; i < al->num_elements; i++) {
2961 if (is_broadcast_ether_addr(al->list[i].addr) ||
2962 is_zero_ether_addr(al->list[i].addr)) {
2963 dev_err(&pf->pdev->dev, "Invalid MAC addr %pM for VF %d\n",
2964 al->list[i].addr, vf->vf_id);
2965 ret = I40E_ERR_INVALID_MAC_ADDR;
2968 if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr))
2969 was_unimac_deleted = true;
2971 vsi = pf->vsi[vf->lan_vsi_idx];
2973 spin_lock_bh(&vsi->mac_filter_hash_lock);
2974 /* delete addresses from the list */
2975 for (i = 0; i < al->num_elements; i++)
2976 if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
2977 ret = I40E_ERR_INVALID_MAC_ADDR;
2978 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2982 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2984 /* program the updated filter list */
2985 ret = i40e_sync_vsi_filters(vsi);
2987 dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters, error %d\n",
2990 if (vf->trusted && was_unimac_deleted) {
2991 struct i40e_mac_filter *f;
2992 struct hlist_node *h;
2996 /* set last unicast mac address as default */
2997 spin_lock_bh(&vsi->mac_filter_hash_lock);
2998 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2999 if (is_valid_ether_addr(f->macaddr))
3000 macaddr = f->macaddr;
3003 ether_addr_copy(vf->default_lan_addr.addr, macaddr);
3004 spin_unlock_bh(&vsi->mac_filter_hash_lock);
3007 /* send the response to the VF */
3008 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR, ret);
3012 * i40e_vc_add_vlan_msg
3013 * @vf: pointer to the VF info
3014 * @msg: pointer to the msg buffer
3016 * program guest vlan id
3018 static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
3020 struct virtchnl_vlan_filter_list *vfl =
3021 (struct virtchnl_vlan_filter_list *)msg;
3022 struct i40e_pf *pf = vf->pf;
3023 struct i40e_vsi *vsi = NULL;
3024 i40e_status aq_ret = 0;
3027 if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
3028 !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3029 dev_err(&pf->pdev->dev,
3030 "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
3033 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3034 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3035 aq_ret = I40E_ERR_PARAM;
3039 for (i = 0; i < vfl->num_elements; i++) {
3040 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3041 aq_ret = I40E_ERR_PARAM;
3042 dev_err(&pf->pdev->dev,
3043 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
3047 vsi = pf->vsi[vf->lan_vsi_idx];
3048 if (vsi->info.pvid) {
3049 aq_ret = I40E_ERR_PARAM;
3053 i40e_vlan_stripping_enable(vsi);
3054 for (i = 0; i < vfl->num_elements; i++) {
3055 /* add new VLAN filter */
3056 int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
3060 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3061 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3065 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3066 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3072 dev_err(&pf->pdev->dev,
3073 "Unable to add VLAN filter %d for VF %d, error %d\n",
3074 vfl->vlan_id[i], vf->vf_id, ret);
3078 /* send the response to the VF */
3079 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret);
3083 * i40e_vc_remove_vlan_msg
3084 * @vf: pointer to the VF info
3085 * @msg: pointer to the msg buffer
3087 * remove programmed guest vlan id
3089 static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
3091 struct virtchnl_vlan_filter_list *vfl =
3092 (struct virtchnl_vlan_filter_list *)msg;
3093 struct i40e_pf *pf = vf->pf;
3094 struct i40e_vsi *vsi = NULL;
3095 i40e_status aq_ret = 0;
3098 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3099 !i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3100 aq_ret = I40E_ERR_PARAM;
3104 for (i = 0; i < vfl->num_elements; i++) {
3105 if (vfl->vlan_id[i] > I40E_MAX_VLANID) {
3106 aq_ret = I40E_ERR_PARAM;
3111 vsi = pf->vsi[vf->lan_vsi_idx];
3112 if (vsi->info.pvid) {
3113 if (vfl->num_elements > 1 || vfl->vlan_id[0])
3114 aq_ret = I40E_ERR_PARAM;
3118 for (i = 0; i < vfl->num_elements; i++) {
3119 i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
3122 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
3123 i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
3127 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
3128 i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
3135 /* send the response to the VF */
3136 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret);
3141 * @vf: pointer to the VF info
3142 * @msg: pointer to the msg buffer
3143 * @msglen: msg length
3145 * called from the VF for the iwarp msgs
3147 static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
3149 struct i40e_pf *pf = vf->pf;
3150 int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
3151 i40e_status aq_ret = 0;
3153 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3154 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3155 aq_ret = I40E_ERR_PARAM;
3159 i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
3163 /* send the response to the VF */
3164 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
3169 * i40e_vc_iwarp_qvmap_msg
3170 * @vf: pointer to the VF info
3171 * @msg: pointer to the msg buffer
3172 * @config: config qvmap or release it
3174 * called from the VF for the iwarp msgs
3176 static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
3178 struct virtchnl_iwarp_qvlist_info *qvlist_info =
3179 (struct virtchnl_iwarp_qvlist_info *)msg;
3180 i40e_status aq_ret = 0;
3182 if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
3183 !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
3184 aq_ret = I40E_ERR_PARAM;
3189 if (i40e_config_iwarp_qvlist(vf, qvlist_info))
3190 aq_ret = I40E_ERR_PARAM;
3192 i40e_release_iwarp_qvlist(vf);
3196 /* send the response to the VF */
3197 return i40e_vc_send_resp_to_vf(vf,
3198 config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
3199 VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
3204 * i40e_vc_config_rss_key
3205 * @vf: pointer to the VF info
3206 * @msg: pointer to the msg buffer
3208 * Configure the VF's RSS key
3210 static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
3212 struct virtchnl_rss_key *vrk =
3213 (struct virtchnl_rss_key *)msg;
3214 struct i40e_pf *pf = vf->pf;
3215 struct i40e_vsi *vsi = NULL;
3216 i40e_status aq_ret = 0;
3218 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3219 !i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
3220 vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
3221 aq_ret = I40E_ERR_PARAM;
3225 vsi = pf->vsi[vf->lan_vsi_idx];
3226 aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
3228 /* send the response to the VF */
3229 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
3234 * i40e_vc_config_rss_lut
3235 * @vf: pointer to the VF info
3236 * @msg: pointer to the msg buffer
3238 * Configure the VF's RSS LUT
3240 static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
3242 struct virtchnl_rss_lut *vrl =
3243 (struct virtchnl_rss_lut *)msg;
3244 struct i40e_pf *pf = vf->pf;
3245 struct i40e_vsi *vsi = NULL;
3246 i40e_status aq_ret = 0;
3249 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
3250 !i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
3251 vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
3252 aq_ret = I40E_ERR_PARAM;
3256 for (i = 0; i < vrl->lut_entries; i++)
3257 if (vrl->lut[i] >= vf->num_queue_pairs) {
3258 aq_ret = I40E_ERR_PARAM;
3262 vsi = pf->vsi[vf->lan_vsi_idx];
3263 aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
3264 /* send the response to the VF */
3266 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
3271 * i40e_vc_get_rss_hena
3272 * @vf: pointer to the VF info
3273 * @msg: pointer to the msg buffer
3275 * Return the RSS HENA bits allowed by the hardware
3277 static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
3279 struct virtchnl_rss_hena *vrh = NULL;
3280 struct i40e_pf *pf = vf->pf;
3281 i40e_status aq_ret = 0;
3284 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3285 aq_ret = I40E_ERR_PARAM;
3288 len = sizeof(struct virtchnl_rss_hena);
3290 vrh = kzalloc(len, GFP_KERNEL);
3292 aq_ret = I40E_ERR_NO_MEMORY;
3296 vrh->hena = i40e_pf_get_default_rss_hena(pf);
3298 /* send the response back to the VF */
3299 aq_ret = i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS,
3300 aq_ret, (u8 *)vrh, len);
3306 * i40e_vc_set_rss_hena
3307 * @vf: pointer to the VF info
3308 * @msg: pointer to the msg buffer
3310 * Set the RSS HENA bits for the VF
3312 static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
3314 struct virtchnl_rss_hena *vrh =
3315 (struct virtchnl_rss_hena *)msg;
3316 struct i40e_pf *pf = vf->pf;
3317 struct i40e_hw *hw = &pf->hw;
3318 i40e_status aq_ret = 0;
3320 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3321 aq_ret = I40E_ERR_PARAM;
3324 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
3325 i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
3326 (u32)(vrh->hena >> 32));
3328 /* send the response to the VF */
3330 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, aq_ret);
3334 * i40e_vc_enable_vlan_stripping
3335 * @vf: pointer to the VF info
3336 * @msg: pointer to the msg buffer
3338 * Enable vlan header stripping for the VF
3340 static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3342 i40e_status aq_ret = 0;
3343 struct i40e_vsi *vsi;
3345 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3346 aq_ret = I40E_ERR_PARAM;
3350 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3351 i40e_vlan_stripping_enable(vsi);
3353 /* send the response to the VF */
3355 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3360 * i40e_vc_disable_vlan_stripping
3361 * @vf: pointer to the VF info
3362 * @msg: pointer to the msg buffer
3364 * Disable vlan header stripping for the VF
3366 static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
3368 i40e_status aq_ret = 0;
3369 struct i40e_vsi *vsi;
3371 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3372 aq_ret = I40E_ERR_PARAM;
3376 vsi = vf->pf->vsi[vf->lan_vsi_idx];
3377 i40e_vlan_stripping_disable(vsi);
3379 /* send the response to the VF */
3381 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3386 * i40e_validate_cloud_filter
3387 * @vf: pointer to VF structure
3388 * @tc_filter: pointer to filter requested
3390 * This function validates cloud filter programmed as TC filter for ADq
3392 static int i40e_validate_cloud_filter(struct i40e_vf *vf,
3393 struct virtchnl_filter *tc_filter)
3395 struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
3396 struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
3397 struct i40e_pf *pf = vf->pf;
3398 struct i40e_vsi *vsi = NULL;
3399 struct i40e_mac_filter *f;
3400 struct hlist_node *h;
3404 if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) {
3405 dev_info(&pf->pdev->dev,
3406 "VF %d: ADQ doesn't support this action (%d)\n",
3407 vf->vf_id, tc_filter->action);
3411 /* action_meta is TC number here to which the filter is applied */
3412 if (!tc_filter->action_meta ||
3413 tc_filter->action_meta > vf->num_tc) {
3414 dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
3415 vf->vf_id, tc_filter->action_meta);
3419 /* Check filter if it's programmed for advanced mode or basic mode.
3420 * There are two ADq modes (for VF only),
3421 * 1. Basic mode: intended to allow as many filter options as possible
3422 * to be added to a VF in Non-trusted mode. Main goal is
3423 * to add filters to its own MAC and VLAN id.
3424 * 2. Advanced mode: is for allowing filters to be applied other than
3425 * its own MAC or VLAN. This mode requires the VF to be
3428 if (mask.dst_mac[0] && !mask.dst_ip[0]) {
3429 vsi = pf->vsi[vf->lan_vsi_idx];
3430 f = i40e_find_mac(vsi, data.dst_mac);
3433 dev_info(&pf->pdev->dev,
3434 "Destination MAC %pM doesn't belong to VF %d\n",
3435 data.dst_mac, vf->vf_id);
3440 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
3442 if (f->vlan == ntohs(data.vlan_id)) {
3448 dev_info(&pf->pdev->dev,
3449 "VF %d doesn't have any VLAN id %u\n",
3450 vf->vf_id, ntohs(data.vlan_id));
3455 /* Check if VF is trusted */
3456 if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
3457 dev_err(&pf->pdev->dev,
3458 "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
3460 return I40E_ERR_CONFIG;
3464 if (mask.dst_mac[0] & data.dst_mac[0]) {
3465 if (is_broadcast_ether_addr(data.dst_mac) ||
3466 is_zero_ether_addr(data.dst_mac)) {
3467 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
3468 vf->vf_id, data.dst_mac);
3473 if (mask.src_mac[0] & data.src_mac[0]) {
3474 if (is_broadcast_ether_addr(data.src_mac) ||
3475 is_zero_ether_addr(data.src_mac)) {
3476 dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
3477 vf->vf_id, data.src_mac);
3482 if (mask.dst_port & data.dst_port) {
3483 if (!data.dst_port) {
3484 dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
3490 if (mask.src_port & data.src_port) {
3491 if (!data.src_port) {
3492 dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
3498 if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
3499 tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
3500 dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
3505 if (mask.vlan_id & data.vlan_id) {
3506 if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
3507 dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
3513 return I40E_SUCCESS;
3515 return I40E_ERR_CONFIG;
3519 * i40e_find_vsi_from_seid - searches for the vsi with the given seid
3520 * @vf: pointer to the VF info
3521 * @seid: seid of the vsi it is searching for
3523 static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
3525 struct i40e_pf *pf = vf->pf;
3526 struct i40e_vsi *vsi = NULL;
3529 for (i = 0; i < vf->num_tc ; i++) {
3530 vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
3531 if (vsi && vsi->seid == seid)
3538 * i40e_del_all_cloud_filters
3539 * @vf: pointer to the VF info
3541 * This function deletes all cloud filters
3543 static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
3545 struct i40e_cloud_filter *cfilter = NULL;
3546 struct i40e_pf *pf = vf->pf;
3547 struct i40e_vsi *vsi = NULL;
3548 struct hlist_node *node;
3551 hlist_for_each_entry_safe(cfilter, node,
3552 &vf->cloud_filter_list, cloud_node) {
3553 vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
3556 dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
3557 vf->vf_id, cfilter->seid);
3561 if (cfilter->dst_port)
3562 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
3565 ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
3567 dev_err(&pf->pdev->dev,
3568 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3569 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3570 i40e_aq_str(&pf->hw,
3571 pf->hw.aq.asq_last_status));
3573 hlist_del(&cfilter->cloud_node);
3575 vf->num_cloud_filters--;
3580 * i40e_vc_del_cloud_filter
3581 * @vf: pointer to the VF info
3582 * @msg: pointer to the msg buffer
3584 * This function deletes a cloud filter programmed as TC filter for ADq
3586 static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
3588 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3589 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3590 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3591 struct i40e_cloud_filter cfilter, *cf = NULL;
3592 struct i40e_pf *pf = vf->pf;
3593 struct i40e_vsi *vsi = NULL;
3594 struct hlist_node *node;
3595 i40e_status aq_ret = 0;
3598 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3599 aq_ret = I40E_ERR_PARAM;
3603 if (!vf->adq_enabled) {
3604 dev_info(&pf->pdev->dev,
3605 "VF %d: ADq not enabled, can't apply cloud filter\n",
3607 aq_ret = I40E_ERR_PARAM;
3611 if (i40e_validate_cloud_filter(vf, vcf)) {
3612 dev_info(&pf->pdev->dev,
3613 "VF %d: Invalid input, can't apply cloud filter\n",
3615 aq_ret = I40E_ERR_PARAM;
3619 memset(&cfilter, 0, sizeof(cfilter));
3620 /* parse destination mac address */
3621 for (i = 0; i < ETH_ALEN; i++)
3622 cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3624 /* parse source mac address */
3625 for (i = 0; i < ETH_ALEN; i++)
3626 cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3628 cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
3629 cfilter.dst_port = mask.dst_port & tcf.dst_port;
3630 cfilter.src_port = mask.src_port & tcf.src_port;
3632 switch (vcf->flow_type) {
3633 case VIRTCHNL_TCP_V4_FLOW:
3634 cfilter.n_proto = ETH_P_IP;
3635 if (mask.dst_ip[0] & tcf.dst_ip[0])
3636 memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
3637 ARRAY_SIZE(tcf.dst_ip));
3638 else if (mask.src_ip[0] & tcf.dst_ip[0])
3639 memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
3640 ARRAY_SIZE(tcf.dst_ip));
3642 case VIRTCHNL_TCP_V6_FLOW:
3643 cfilter.n_proto = ETH_P_IPV6;
3644 if (mask.dst_ip[3] & tcf.dst_ip[3])
3645 memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
3646 sizeof(cfilter.ip.v6.dst_ip6));
3647 if (mask.src_ip[3] & tcf.src_ip[3])
3648 memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
3649 sizeof(cfilter.ip.v6.src_ip6));
3652 /* TC filter can be configured based on different combinations
3653 * and in this case IP is not a part of filter config
3655 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3659 /* get the vsi to which the tc belongs to */
3660 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3661 cfilter.seid = vsi->seid;
3662 cfilter.flags = vcf->field_flags;
3664 /* Deleting TC filter */
3666 ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
3668 ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
3670 dev_err(&pf->pdev->dev,
3671 "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
3672 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3673 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3677 hlist_for_each_entry_safe(cf, node,
3678 &vf->cloud_filter_list, cloud_node) {
3679 if (cf->seid != cfilter.seid)
3682 if (cfilter.dst_port != cf->dst_port)
3684 if (mask.dst_mac[0])
3685 if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
3687 /* for ipv4 data to be valid, only first byte of mask is set */
3688 if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
3689 if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
3690 ARRAY_SIZE(tcf.dst_ip)))
3692 /* for ipv6, mask is set for all sixteen bytes (4 words) */
3693 if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
3694 if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
3695 sizeof(cfilter.ip.v6.src_ip6)))
3698 if (cfilter.vlan_id != cf->vlan_id)
3701 hlist_del(&cf->cloud_node);
3703 vf->num_cloud_filters--;
3707 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
3712 * i40e_vc_add_cloud_filter
3713 * @vf: pointer to the VF info
3714 * @msg: pointer to the msg buffer
3716 * This function adds a cloud filter programmed as TC filter for ADq
3718 static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
3720 struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
3721 struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
3722 struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
3723 struct i40e_cloud_filter *cfilter = NULL;
3724 struct i40e_pf *pf = vf->pf;
3725 struct i40e_vsi *vsi = NULL;
3726 i40e_status aq_ret = 0;
3729 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3730 aq_ret = I40E_ERR_PARAM;
3734 if (!vf->adq_enabled) {
3735 dev_info(&pf->pdev->dev,
3736 "VF %d: ADq is not enabled, can't apply cloud filter\n",
3738 aq_ret = I40E_ERR_PARAM;
3742 if (i40e_validate_cloud_filter(vf, vcf)) {
3743 dev_info(&pf->pdev->dev,
3744 "VF %d: Invalid input/s, can't apply cloud filter\n",
3746 aq_ret = I40E_ERR_PARAM;
3750 cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
3754 /* parse destination mac address */
3755 for (i = 0; i < ETH_ALEN; i++)
3756 cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
3758 /* parse source mac address */
3759 for (i = 0; i < ETH_ALEN; i++)
3760 cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
3762 cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
3763 cfilter->dst_port = mask.dst_port & tcf.dst_port;
3764 cfilter->src_port = mask.src_port & tcf.src_port;
3766 switch (vcf->flow_type) {
3767 case VIRTCHNL_TCP_V4_FLOW:
3768 cfilter->n_proto = ETH_P_IP;
3769 if (mask.dst_ip[0] & tcf.dst_ip[0])
3770 memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
3771 ARRAY_SIZE(tcf.dst_ip));
3772 else if (mask.src_ip[0] & tcf.dst_ip[0])
3773 memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
3774 ARRAY_SIZE(tcf.dst_ip));
3776 case VIRTCHNL_TCP_V6_FLOW:
3777 cfilter->n_proto = ETH_P_IPV6;
3778 if (mask.dst_ip[3] & tcf.dst_ip[3])
3779 memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
3780 sizeof(cfilter->ip.v6.dst_ip6));
3781 if (mask.src_ip[3] & tcf.src_ip[3])
3782 memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
3783 sizeof(cfilter->ip.v6.src_ip6));
3786 /* TC filter can be configured based on different combinations
3787 * and in this case IP is not a part of filter config
3789 dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
3793 /* get the VSI to which the TC belongs to */
3794 vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
3795 cfilter->seid = vsi->seid;
3796 cfilter->flags = vcf->field_flags;
3798 /* Adding cloud filter programmed as TC filter */
3800 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
3802 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
3804 dev_err(&pf->pdev->dev,
3805 "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
3806 vf->vf_id, i40e_stat_str(&pf->hw, ret),
3807 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
3811 INIT_HLIST_NODE(&cfilter->cloud_node);
3812 hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
3813 /* release the pointer passing it to the collection */
3815 vf->num_cloud_filters++;
3819 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
3824 * i40e_vc_add_qch_msg: Add queue channel and enable ADq
3825 * @vf: pointer to the VF info
3826 * @msg: pointer to the msg buffer
3828 static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
3830 struct virtchnl_tc_info *tci =
3831 (struct virtchnl_tc_info *)msg;
3832 struct i40e_pf *pf = vf->pf;
3833 struct i40e_link_status *ls = &pf->hw.phy.link_info;
3834 int i, adq_request_qps = 0;
3835 i40e_status aq_ret = 0;
3838 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3839 aq_ret = I40E_ERR_PARAM;
3843 /* ADq cannot be applied if spoof check is ON */
3845 dev_err(&pf->pdev->dev,
3846 "Spoof check is ON, turn it OFF to enable ADq\n");
3847 aq_ret = I40E_ERR_PARAM;
3851 if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3852 dev_err(&pf->pdev->dev,
3853 "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
3855 aq_ret = I40E_ERR_PARAM;
3859 /* max number of traffic classes for VF currently capped at 4 */
3860 if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
3861 dev_err(&pf->pdev->dev,
3862 "VF %d trying to set %u TCs, valid range 1-%u TCs per VF\n",
3863 vf->vf_id, tci->num_tc, I40E_MAX_VF_VSI);
3864 aq_ret = I40E_ERR_PARAM;
3868 /* validate queues for each TC */
3869 for (i = 0; i < tci->num_tc; i++)
3870 if (!tci->list[i].count ||
3871 tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
3872 dev_err(&pf->pdev->dev,
3873 "VF %d: TC %d trying to set %u queues, valid range 1-%u queues per TC\n",
3874 vf->vf_id, i, tci->list[i].count,
3875 I40E_DEFAULT_QUEUES_PER_VF);
3876 aq_ret = I40E_ERR_PARAM;
3880 /* need Max VF queues but already have default number of queues */
3881 adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
3883 if (pf->queues_left < adq_request_qps) {
3884 dev_err(&pf->pdev->dev,
3885 "No queues left to allocate to VF %d\n",
3887 aq_ret = I40E_ERR_PARAM;
3890 /* we need to allocate max VF queues to enable ADq so as to
3891 * make sure ADq enabled VF always gets back queues when it
3892 * goes through a reset.
3894 vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
3897 /* get link speed in MB to validate rate limit */
3898 switch (ls->link_speed) {
3899 case VIRTCHNL_LINK_SPEED_100MB:
3902 case VIRTCHNL_LINK_SPEED_1GB:
3905 case VIRTCHNL_LINK_SPEED_10GB:
3906 speed = SPEED_10000;
3908 case VIRTCHNL_LINK_SPEED_20GB:
3909 speed = SPEED_20000;
3911 case VIRTCHNL_LINK_SPEED_25GB:
3912 speed = SPEED_25000;
3914 case VIRTCHNL_LINK_SPEED_40GB:
3915 speed = SPEED_40000;
3918 dev_err(&pf->pdev->dev,
3919 "Cannot detect link speed\n");
3920 aq_ret = I40E_ERR_PARAM;
3924 /* parse data from the queue channel info */
3925 vf->num_tc = tci->num_tc;
3926 for (i = 0; i < vf->num_tc; i++) {
3927 if (tci->list[i].max_tx_rate) {
3928 if (tci->list[i].max_tx_rate > speed) {
3929 dev_err(&pf->pdev->dev,
3930 "Invalid max tx rate %llu specified for VF %d.",
3931 tci->list[i].max_tx_rate,
3933 aq_ret = I40E_ERR_PARAM;
3936 vf->ch[i].max_tx_rate =
3937 tci->list[i].max_tx_rate;
3940 vf->ch[i].num_qps = tci->list[i].count;
3943 /* set this flag only after making sure all inputs are sane */
3944 vf->adq_enabled = true;
3946 /* reset the VF in order to allocate resources */
3947 i40e_vc_reset_vf(vf, true);
3949 return I40E_SUCCESS;
3951 /* send the response to the VF */
3953 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
3958 * i40e_vc_del_qch_msg
3959 * @vf: pointer to the VF info
3960 * @msg: pointer to the msg buffer
3962 static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
3964 struct i40e_pf *pf = vf->pf;
3965 i40e_status aq_ret = 0;
3967 if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
3968 aq_ret = I40E_ERR_PARAM;
3972 if (vf->adq_enabled) {
3973 i40e_del_all_cloud_filters(vf);
3975 vf->adq_enabled = false;
3977 dev_info(&pf->pdev->dev,
3978 "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
3981 dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
3983 aq_ret = I40E_ERR_PARAM;
3986 /* reset the VF in order to allocate resources */
3987 i40e_vc_reset_vf(vf, true);
3989 return I40E_SUCCESS;
3992 return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
3997 * i40e_vc_process_vf_msg
3998 * @pf: pointer to the PF structure
3999 * @vf_id: source VF id
4000 * @v_opcode: operation code
4001 * @v_retval: unused return value code
4002 * @msg: pointer to the msg buffer
4003 * @msglen: msg length
4005 * called from the common aeq/arq handler to
4006 * process request from VF
4008 int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
4009 u32 __always_unused v_retval, u8 *msg, u16 msglen)
4011 struct i40e_hw *hw = &pf->hw;
4012 int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
4016 pf->vf_aq_requests++;
4017 if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
4019 vf = &(pf->vf[local_vf_id]);
4021 /* Check if VF is disabled. */
4022 if (test_bit(I40E_VF_STATE_DISABLED, &vf->vf_states))
4023 return I40E_ERR_PARAM;
4025 /* perform basic checks on the msg */
4026 ret = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4029 i40e_vc_send_resp_to_vf(vf, v_opcode, I40E_ERR_PARAM);
4030 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
4031 local_vf_id, v_opcode, msglen);
4033 case VIRTCHNL_STATUS_ERR_PARAM:
4041 case VIRTCHNL_OP_VERSION:
4042 ret = i40e_vc_get_version_msg(vf, msg);
4044 case VIRTCHNL_OP_GET_VF_RESOURCES:
4045 ret = i40e_vc_get_vf_resources_msg(vf, msg);
4046 i40e_vc_notify_vf_link_state(vf);
4048 case VIRTCHNL_OP_RESET_VF:
4049 i40e_vc_reset_vf(vf, false);
4052 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4053 ret = i40e_vc_config_promiscuous_mode_msg(vf, msg);
4055 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4056 ret = i40e_vc_config_queues_msg(vf, msg);
4058 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4059 ret = i40e_vc_config_irq_map_msg(vf, msg);
4061 case VIRTCHNL_OP_ENABLE_QUEUES:
4062 ret = i40e_vc_enable_queues_msg(vf, msg);
4063 i40e_vc_notify_vf_link_state(vf);
4065 case VIRTCHNL_OP_DISABLE_QUEUES:
4066 ret = i40e_vc_disable_queues_msg(vf, msg);
4068 case VIRTCHNL_OP_ADD_ETH_ADDR:
4069 ret = i40e_vc_add_mac_addr_msg(vf, msg);
4071 case VIRTCHNL_OP_DEL_ETH_ADDR:
4072 ret = i40e_vc_del_mac_addr_msg(vf, msg);
4074 case VIRTCHNL_OP_ADD_VLAN:
4075 ret = i40e_vc_add_vlan_msg(vf, msg);
4077 case VIRTCHNL_OP_DEL_VLAN:
4078 ret = i40e_vc_remove_vlan_msg(vf, msg);
4080 case VIRTCHNL_OP_GET_STATS:
4081 ret = i40e_vc_get_stats_msg(vf, msg);
4083 case VIRTCHNL_OP_IWARP:
4084 ret = i40e_vc_iwarp_msg(vf, msg, msglen);
4086 case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
4087 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
4089 case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
4090 ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
4092 case VIRTCHNL_OP_CONFIG_RSS_KEY:
4093 ret = i40e_vc_config_rss_key(vf, msg);
4095 case VIRTCHNL_OP_CONFIG_RSS_LUT:
4096 ret = i40e_vc_config_rss_lut(vf, msg);
4098 case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
4099 ret = i40e_vc_get_rss_hena(vf, msg);
4101 case VIRTCHNL_OP_SET_RSS_HENA:
4102 ret = i40e_vc_set_rss_hena(vf, msg);
4104 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4105 ret = i40e_vc_enable_vlan_stripping(vf, msg);
4107 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4108 ret = i40e_vc_disable_vlan_stripping(vf, msg);
4110 case VIRTCHNL_OP_REQUEST_QUEUES:
4111 ret = i40e_vc_request_queues_msg(vf, msg);
4113 case VIRTCHNL_OP_ENABLE_CHANNELS:
4114 ret = i40e_vc_add_qch_msg(vf, msg);
4116 case VIRTCHNL_OP_DISABLE_CHANNELS:
4117 ret = i40e_vc_del_qch_msg(vf, msg);
4119 case VIRTCHNL_OP_ADD_CLOUD_FILTER:
4120 ret = i40e_vc_add_cloud_filter(vf, msg);
4122 case VIRTCHNL_OP_DEL_CLOUD_FILTER:
4123 ret = i40e_vc_del_cloud_filter(vf, msg);
4125 case VIRTCHNL_OP_UNKNOWN:
4127 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
4128 v_opcode, local_vf_id);
4129 ret = i40e_vc_send_resp_to_vf(vf, v_opcode,
4130 I40E_ERR_NOT_IMPLEMENTED);
4138 * i40e_vc_process_vflr_event
4139 * @pf: pointer to the PF structure
4141 * called from the vlfr irq handler to
4142 * free up VF resources and state variables
4144 int i40e_vc_process_vflr_event(struct i40e_pf *pf)
4146 struct i40e_hw *hw = &pf->hw;
4147 u32 reg, reg_idx, bit_idx;
4151 if (!test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
4154 /* Re-enable the VFLR interrupt cause here, before looking for which
4155 * VF got reset. Otherwise, if another VF gets a reset while the
4156 * first one is being processed, that interrupt will be lost, and
4157 * that VF will be stuck in reset forever.
4159 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
4160 reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
4161 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
4164 clear_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
4165 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
4166 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
4167 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
4168 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
4169 vf = &pf->vf[vf_id];
4170 reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
4171 if (reg & BIT(bit_idx))
4172 /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
4173 i40e_reset_vf(vf, true);
4181 * @pf: the physical function
4182 * @vf_id: VF identifier
4184 * Check that the VF is enabled and the VSI exists.
4186 * Returns 0 on success, negative on failure
4188 static int i40e_validate_vf(struct i40e_pf *pf, int vf_id)
4190 struct i40e_vsi *vsi;
4194 if (vf_id >= pf->num_alloc_vfs) {
4195 dev_err(&pf->pdev->dev,
4196 "Invalid VF Identifier %d\n", vf_id);
4200 vf = &pf->vf[vf_id];
4201 vsi = i40e_find_vsi_from_id(pf, vf->lan_vsi_id);
4209 * i40e_ndo_set_vf_mac
4210 * @netdev: network interface device structure
4211 * @vf_id: VF identifier
4214 * program VF mac address
4216 int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4218 struct i40e_netdev_priv *np = netdev_priv(netdev);
4219 struct i40e_vsi *vsi = np->vsi;
4220 struct i40e_pf *pf = vsi->back;
4221 struct i40e_mac_filter *f;
4224 struct hlist_node *h;
4228 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4229 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4233 /* validate the request */
4234 ret = i40e_validate_vf(pf, vf_id);
4238 vf = &pf->vf[vf_id];
4240 /* When the VF is resetting wait until it is done.
4241 * It can take up to 200 milliseconds,
4242 * but wait for up to 300 milliseconds to be safe.
4243 * Acquire the VSI pointer only after the VF has been
4244 * properly initialized.
4246 for (i = 0; i < 15; i++) {
4247 if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
4251 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4252 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4257 vsi = pf->vsi[vf->lan_vsi_idx];
4259 if (is_multicast_ether_addr(mac)) {
4260 dev_err(&pf->pdev->dev,
4261 "Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
4266 /* Lock once because below invoked function add/del_filter requires
4267 * mac_filter_hash_lock to be held
4269 spin_lock_bh(&vsi->mac_filter_hash_lock);
4271 /* delete the temporary mac address */
4272 if (!is_zero_ether_addr(vf->default_lan_addr.addr))
4273 i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
4275 /* Delete all the filters for this VSI - we're going to kill it
4278 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
4279 __i40e_del_filter(vsi, f);
4281 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4283 /* program mac filter */
4284 if (i40e_sync_vsi_filters(vsi)) {
4285 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
4289 ether_addr_copy(vf->default_lan_addr.addr, mac);
4291 if (is_zero_ether_addr(mac)) {
4292 vf->pf_set_mac = false;
4293 dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id);
4295 vf->pf_set_mac = true;
4296 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n",
4300 /* Force the VF interface down so it has to bring up with new MAC
4303 i40e_vc_reset_vf(vf, true);
4304 dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
4307 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4312 * i40e_ndo_set_vf_port_vlan
4313 * @netdev: network interface device structure
4314 * @vf_id: VF identifier
4315 * @vlan_id: mac address
4316 * @qos: priority setting
4317 * @vlan_proto: vlan protocol
4319 * program VF vlan id and/or qos
4321 int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
4322 u16 vlan_id, u8 qos, __be16 vlan_proto)
4324 u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
4325 struct i40e_netdev_priv *np = netdev_priv(netdev);
4326 bool allmulti = false, alluni = false;
4327 struct i40e_pf *pf = np->vsi->back;
4328 struct i40e_vsi *vsi;
4332 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4333 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4337 /* validate the request */
4338 ret = i40e_validate_vf(pf, vf_id);
4342 if ((vlan_id > I40E_MAX_VLANID) || (qos > 7)) {
4343 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n");
4348 if (vlan_proto != htons(ETH_P_8021Q)) {
4349 dev_err(&pf->pdev->dev, "VF VLAN protocol is not supported\n");
4350 ret = -EPROTONOSUPPORT;
4354 vf = &pf->vf[vf_id];
4355 vsi = pf->vsi[vf->lan_vsi_idx];
4356 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4357 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4363 if (le16_to_cpu(vsi->info.pvid) == vlanprio)
4364 /* duplicate request, so just return success */
4367 /* Locked once because multiple functions below iterate list */
4368 spin_lock_bh(&vsi->mac_filter_hash_lock);
4370 /* Check for condition where there was already a port VLAN ID
4371 * filter set and now it is being deleted by setting it to zero.
4372 * Additionally check for the condition where there was a port
4373 * VLAN but now there is a new and different port VLAN being set.
4374 * Before deleting all the old VLAN filters we must add new ones
4375 * with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
4376 * MAC addresses deleted.
4378 if ((!(vlan_id || qos) ||
4379 vlanprio != le16_to_cpu(vsi->info.pvid)) &&
4381 ret = i40e_add_vlan_all_mac(vsi, I40E_VLAN_ANY);
4383 dev_info(&vsi->back->pdev->dev,
4384 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4385 vsi->back->hw.aq.asq_last_status);
4386 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4391 if (vsi->info.pvid) {
4392 /* remove all filters on the old VLAN */
4393 i40e_rm_vlan_all_mac(vsi, (le16_to_cpu(vsi->info.pvid) &
4397 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4399 /* disable promisc modes in case they were enabled */
4400 ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
4403 dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
4408 ret = i40e_vsi_add_pvid(vsi, vlanprio);
4410 i40e_vsi_remove_pvid(vsi);
4411 spin_lock_bh(&vsi->mac_filter_hash_lock);
4414 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n",
4415 vlan_id, qos, vf_id);
4417 /* add new VLAN filter for each MAC */
4418 ret = i40e_add_vlan_all_mac(vsi, vlan_id);
4420 dev_info(&vsi->back->pdev->dev,
4421 "add VF VLAN failed, ret=%d aq_err=%d\n", ret,
4422 vsi->back->hw.aq.asq_last_status);
4423 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4427 /* remove the previously added non-VLAN MAC filters */
4428 i40e_rm_vlan_all_mac(vsi, I40E_VLAN_ANY);
4431 spin_unlock_bh(&vsi->mac_filter_hash_lock);
4433 if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
4436 if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
4439 /* Schedule the worker thread to take care of applying changes */
4440 i40e_service_event_schedule(vsi->back);
4443 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n");
4447 /* The Port VLAN needs to be saved across resets the same as the
4448 * default LAN MAC address.
4450 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
4452 i40e_vc_reset_vf(vf, true);
4453 /* During reset the VF got a new VSI, so refresh a pointer. */
4454 vsi = pf->vsi[vf->lan_vsi_idx];
4456 ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
4458 dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
4465 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4470 * i40e_ndo_set_vf_bw
4471 * @netdev: network interface device structure
4472 * @vf_id: VF identifier
4473 * @min_tx_rate: Minimum Tx rate
4474 * @max_tx_rate: Maximum Tx rate
4476 * configure VF Tx rate
4478 int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
4481 struct i40e_netdev_priv *np = netdev_priv(netdev);
4482 struct i40e_pf *pf = np->vsi->back;
4483 struct i40e_vsi *vsi;
4487 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4488 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4492 /* validate the request */
4493 ret = i40e_validate_vf(pf, vf_id);
4498 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
4499 min_tx_rate, vf_id);
4504 vf = &pf->vf[vf_id];
4505 vsi = pf->vsi[vf->lan_vsi_idx];
4506 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4507 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4513 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
4517 vf->tx_rate = max_tx_rate;
4519 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4524 * i40e_ndo_get_vf_config
4525 * @netdev: network interface device structure
4526 * @vf_id: VF identifier
4527 * @ivi: VF configuration structure
4529 * return VF configuration
4531 int i40e_ndo_get_vf_config(struct net_device *netdev,
4532 int vf_id, struct ifla_vf_info *ivi)
4534 struct i40e_netdev_priv *np = netdev_priv(netdev);
4535 struct i40e_vsi *vsi = np->vsi;
4536 struct i40e_pf *pf = vsi->back;
4540 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4541 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4545 /* validate the request */
4546 ret = i40e_validate_vf(pf, vf_id);
4550 vf = &pf->vf[vf_id];
4551 /* first vsi is always the LAN vsi */
4552 vsi = pf->vsi[vf->lan_vsi_idx];
4560 ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
4562 ivi->max_tx_rate = vf->tx_rate;
4563 ivi->min_tx_rate = 0;
4564 ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
4565 ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
4566 I40E_VLAN_PRIORITY_SHIFT;
4567 if (vf->link_forced == false)
4568 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4569 else if (vf->link_up == true)
4570 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4572 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4573 ivi->spoofchk = vf->spoofchk;
4574 ivi->trusted = vf->trusted;
4578 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4583 * i40e_ndo_set_vf_link_state
4584 * @netdev: network interface device structure
4585 * @vf_id: VF identifier
4586 * @link: required link state
4588 * Set the link state of a specified VF, regardless of physical link state
4590 int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
4592 struct i40e_netdev_priv *np = netdev_priv(netdev);
4593 struct i40e_pf *pf = np->vsi->back;
4594 struct virtchnl_pf_event pfe;
4595 struct i40e_hw *hw = &pf->hw;
4596 struct i40e_vsi *vsi;
4597 unsigned long q_map;
4603 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4604 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4608 /* validate the request */
4609 if (vf_id >= pf->num_alloc_vfs) {
4610 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4615 vf = &pf->vf[vf_id];
4616 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
4618 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
4619 pfe.severity = PF_EVENT_SEVERITY_INFO;
4622 case IFLA_VF_LINK_STATE_AUTO:
4623 vf->link_forced = false;
4624 vf->is_disabled_from_host = false;
4625 /* reset needed to reinit VF resources */
4626 i40e_vc_reset_vf(vf, true);
4627 pfe.event_data.link_event.link_status =
4628 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
4629 pfe.event_data.link_event.link_speed =
4630 (enum virtchnl_link_speed)
4631 pf->hw.phy.link_info.link_speed;
4633 case IFLA_VF_LINK_STATE_ENABLE:
4634 vf->link_forced = true;
4636 vf->is_disabled_from_host = false;
4637 /* reset needed to reinit VF resources */
4638 i40e_vc_reset_vf(vf, true);
4639 pfe.event_data.link_event.link_status = true;
4640 pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
4642 case IFLA_VF_LINK_STATE_DISABLE:
4643 vf->link_forced = true;
4644 vf->link_up = false;
4645 pfe.event_data.link_event.link_status = false;
4646 pfe.event_data.link_event.link_speed = 0;
4648 vsi = pf->vsi[vf->lan_vsi_idx];
4649 q_map = BIT(vsi->num_queue_pairs) - 1;
4651 vf->is_disabled_from_host = true;
4653 /* Try to stop both Tx&Rx rings even if one of the calls fails
4654 * to ensure we stop the rings even in case of errors.
4655 * If any of them returns with an error then the first
4656 * error that occurred will be returned.
4658 tmp = i40e_ctrl_vf_tx_rings(vsi, q_map, false);
4659 ret = i40e_ctrl_vf_rx_rings(vsi, q_map, false);
4661 ret = tmp ? tmp : ret;
4667 /* Notify the VF of its new link state */
4668 i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
4669 0, (u8 *)&pfe, sizeof(pfe), NULL);
4672 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4677 * i40e_ndo_set_vf_spoofchk
4678 * @netdev: network interface device structure
4679 * @vf_id: VF identifier
4680 * @enable: flag to enable or disable feature
4682 * Enable or disable VF spoof checking
4684 int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
4686 struct i40e_netdev_priv *np = netdev_priv(netdev);
4687 struct i40e_vsi *vsi = np->vsi;
4688 struct i40e_pf *pf = vsi->back;
4689 struct i40e_vsi_context ctxt;
4690 struct i40e_hw *hw = &pf->hw;
4694 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4695 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4699 /* validate the request */
4700 if (vf_id >= pf->num_alloc_vfs) {
4701 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4706 vf = &(pf->vf[vf_id]);
4707 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4708 dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n",
4714 if (enable == vf->spoofchk)
4717 vf->spoofchk = enable;
4718 memset(&ctxt, 0, sizeof(ctxt));
4719 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid;
4720 ctxt.pf_num = pf->hw.pf_id;
4721 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
4723 ctxt.info.sec_flags |= (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
4724 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
4725 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
4727 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
4732 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4737 * i40e_ndo_set_vf_trust
4738 * @netdev: network interface device structure of the pf
4739 * @vf_id: VF identifier
4740 * @setting: trust setting
4742 * Enable or disable VF trust setting
4744 int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
4746 struct i40e_netdev_priv *np = netdev_priv(netdev);
4747 struct i40e_pf *pf = np->vsi->back;
4751 if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
4752 dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
4756 /* validate the request */
4757 if (vf_id >= pf->num_alloc_vfs) {
4758 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
4763 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
4764 dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
4769 vf = &pf->vf[vf_id];
4771 if (setting == vf->trusted)
4774 vf->trusted = setting;
4775 i40e_vc_reset_vf(vf, true);
4776 dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
4777 vf_id, setting ? "" : "un");
4779 if (vf->adq_enabled) {
4781 dev_info(&pf->pdev->dev,
4782 "VF %u no longer Trusted, deleting all cloud filters\n",
4784 i40e_del_all_cloud_filters(vf);
4789 clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
4794 * i40e_get_vf_stats - populate some stats for the VF
4795 * @netdev: the netdev of the PF
4796 * @vf_id: the host OS identifier (0-127)
4797 * @vf_stats: pointer to the OS memory to be initialized
4799 int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
4800 struct ifla_vf_stats *vf_stats)
4802 struct i40e_netdev_priv *np = netdev_priv(netdev);
4803 struct i40e_pf *pf = np->vsi->back;
4804 struct i40e_eth_stats *stats;
4805 struct i40e_vsi *vsi;
4808 /* validate the request */
4809 if (i40e_validate_vf(pf, vf_id))
4812 vf = &pf->vf[vf_id];
4813 if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
4814 dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
4818 vsi = pf->vsi[vf->lan_vsi_idx];
4822 i40e_update_eth_stats(vsi);
4823 stats = &vsi->eth_stats;
4825 memset(vf_stats, 0, sizeof(*vf_stats));
4827 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4828 stats->rx_multicast;
4829 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4830 stats->tx_multicast;
4831 vf_stats->rx_bytes = stats->rx_bytes;
4832 vf_stats->tx_bytes = stats->tx_bytes;
4833 vf_stats->broadcast = stats->rx_broadcast;
4834 vf_stats->multicast = stats->rx_multicast;
4835 vf_stats->rx_dropped = stats->rx_discards;
4836 vf_stats->tx_dropped = stats->tx_discards;