1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
11 #include "i40e_diag.h"
12 #include <net/udp_tunnel.h>
13 /* All i40e tracepoints are defined by the include below, which
14 * must be included exactly once across the whole kernel with
15 * CREATE_TRACE_POINTS defined
17 #define CREATE_TRACE_POINTS
18 #include "i40e_trace.h"
20 const char i40e_driver_name[] = "i40e";
21 static const char i40e_driver_string[] =
22 "Intel(R) Ethernet Connection XL710 Network Driver";
26 #define DRV_VERSION_MAJOR 2
27 #define DRV_VERSION_MINOR 3
28 #define DRV_VERSION_BUILD 2
29 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
30 __stringify(DRV_VERSION_MINOR) "." \
31 __stringify(DRV_VERSION_BUILD) DRV_KERN
32 const char i40e_driver_version_str[] = DRV_VERSION;
33 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
35 /* a bit of forward declarations */
36 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
37 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
38 static int i40e_add_vsi(struct i40e_vsi *vsi);
39 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
40 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
41 static int i40e_setup_misc_vector(struct i40e_pf *pf);
42 static void i40e_determine_queue_usage(struct i40e_pf *pf);
43 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
44 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
45 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
47 static int i40e_reset(struct i40e_pf *pf);
48 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
49 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
50 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
51 static int i40e_get_capabilities(struct i40e_pf *pf,
52 enum i40e_admin_queue_opc list_type);
55 /* i40e_pci_tbl - PCI Device ID Table
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
60 * Class, Class Mask, private data (not used) }
62 static const struct pci_device_id i40e_pci_tbl[] = {
63 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
64 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
65 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
66 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
67 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
68 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
69 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
70 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
71 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
72 {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
73 {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
74 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
75 {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
76 {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
77 {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
78 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
79 {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
80 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
81 {PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
87 #define I40E_MAX_VF_COUNT 128
88 static int debug = -1;
89 module_param(debug, uint, 0);
90 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
92 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
93 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
94 MODULE_LICENSE("GPL");
95 MODULE_VERSION(DRV_VERSION);
97 static struct workqueue_struct *i40e_wq;
100 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
101 * @hw: pointer to the HW structure
102 * @mem: ptr to mem struct to fill out
103 * @size: size of memory requested
104 * @alignment: what to align the allocation to
106 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
107 u64 size, u32 alignment)
109 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
111 mem->size = ALIGN(size, alignment);
112 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
113 &mem->pa, GFP_KERNEL);
121 * i40e_free_dma_mem_d - OS specific memory free for shared code
122 * @hw: pointer to the HW structure
123 * @mem: ptr to mem struct to free
125 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
127 struct i40e_pf *pf = (struct i40e_pf *)hw->back;
129 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
138 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
139 * @hw: pointer to the HW structure
140 * @mem: ptr to mem struct to fill out
141 * @size: size of memory requested
143 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
147 mem->va = kzalloc(size, GFP_KERNEL);
156 * i40e_free_virt_mem_d - OS specific memory free for shared code
157 * @hw: pointer to the HW structure
158 * @mem: ptr to mem struct to free
160 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
162 /* it's ok to kfree a NULL pointer */
171 * i40e_get_lump - find a lump of free generic resource
172 * @pf: board private structure
173 * @pile: the pile of resource to search
174 * @needed: the number of items needed
175 * @id: an owner id to stick on the items assigned
177 * Returns the base item index of the lump, or negative for error
179 * The search_hint trick and lack of advanced fit-finding only work
180 * because we're highly likely to have all the same size lump requests.
181 * Linear search time and any fragmentation should be minimal.
183 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
189 if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
190 dev_info(&pf->pdev->dev,
191 "param err: pile=%s needed=%d id=0x%04x\n",
192 pile ? "<valid>" : "<null>", needed, id);
196 /* start the linear search with an imperfect hint */
197 i = pile->search_hint;
198 while (i < pile->num_entries) {
199 /* skip already allocated entries */
200 if (pile->list[i] & I40E_PILE_VALID_BIT) {
205 /* do we have enough in this lump? */
206 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
207 if (pile->list[i+j] & I40E_PILE_VALID_BIT)
212 /* there was enough, so assign it to the requestor */
213 for (j = 0; j < needed; j++)
214 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
216 pile->search_hint = i + j;
220 /* not enough, so skip over it and continue looking */
228 * i40e_put_lump - return a lump of generic resource
229 * @pile: the pile of resource to search
230 * @index: the base item index
231 * @id: the owner id of the items assigned
233 * Returns the count of items in the lump
235 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
237 int valid_id = (id | I40E_PILE_VALID_BIT);
241 if (!pile || index >= pile->num_entries)
245 i < pile->num_entries && pile->list[i] == valid_id;
251 if (count && index < pile->search_hint)
252 pile->search_hint = index;
258 * i40e_find_vsi_from_id - searches for the vsi with the given id
259 * @pf: the pf structure to search for the vsi
260 * @id: id of the vsi it is searching for
262 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
266 for (i = 0; i < pf->num_alloc_vsi; i++)
267 if (pf->vsi[i] && (pf->vsi[i]->id == id))
274 * i40e_service_event_schedule - Schedule the service task to wake up
275 * @pf: board private structure
277 * If not already scheduled, this puts the task into the work queue
279 void i40e_service_event_schedule(struct i40e_pf *pf)
281 if (!test_bit(__I40E_DOWN, pf->state) &&
282 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
283 queue_work(i40e_wq, &pf->service_task);
287 * i40e_tx_timeout - Respond to a Tx Hang
288 * @netdev: network interface device structure
290 * If any port has noticed a Tx timeout, it is likely that the whole
291 * device is munged, not just the one netdev port, so go for the full
294 static void i40e_tx_timeout(struct net_device *netdev)
296 struct i40e_netdev_priv *np = netdev_priv(netdev);
297 struct i40e_vsi *vsi = np->vsi;
298 struct i40e_pf *pf = vsi->back;
299 struct i40e_ring *tx_ring = NULL;
300 unsigned int i, hung_queue = 0;
303 pf->tx_timeout_count++;
305 /* find the stopped queue the same way the stack does */
306 for (i = 0; i < netdev->num_tx_queues; i++) {
307 struct netdev_queue *q;
308 unsigned long trans_start;
310 q = netdev_get_tx_queue(netdev, i);
311 trans_start = q->trans_start;
312 if (netif_xmit_stopped(q) &&
314 (trans_start + netdev->watchdog_timeo))) {
320 if (i == netdev->num_tx_queues) {
321 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
323 /* now that we have an index, find the tx_ring struct */
324 for (i = 0; i < vsi->num_queue_pairs; i++) {
325 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
327 vsi->tx_rings[i]->queue_index) {
328 tx_ring = vsi->tx_rings[i];
335 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
336 pf->tx_timeout_recovery_level = 1; /* reset after some time */
337 else if (time_before(jiffies,
338 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
339 return; /* don't do any new action before the next timeout */
341 /* don't kick off another recovery if one is already pending */
342 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
346 head = i40e_get_head(tx_ring);
347 /* Read interrupt register */
348 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
350 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
351 tx_ring->vsi->base_vector - 1));
353 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
355 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
356 vsi->seid, hung_queue, tx_ring->next_to_clean,
357 head, tx_ring->next_to_use,
358 readl(tx_ring->tail), val);
361 pf->tx_timeout_last_recovery = jiffies;
362 netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
363 pf->tx_timeout_recovery_level, hung_queue);
365 switch (pf->tx_timeout_recovery_level) {
367 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
370 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
373 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
376 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
380 i40e_service_event_schedule(pf);
381 pf->tx_timeout_recovery_level++;
385 * i40e_get_vsi_stats_struct - Get System Network Statistics
386 * @vsi: the VSI we care about
388 * Returns the address of the device statistics structure.
389 * The statistics are actually updated from the service task.
391 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
393 return &vsi->net_stats;
397 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
398 * @ring: Tx ring to get statistics from
399 * @stats: statistics entry to be updated
401 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring *ring,
402 struct rtnl_link_stats64 *stats)
408 start = u64_stats_fetch_begin_irq(&ring->syncp);
409 packets = ring->stats.packets;
410 bytes = ring->stats.bytes;
411 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
413 stats->tx_packets += packets;
414 stats->tx_bytes += bytes;
418 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
419 * @netdev: network interface device structure
420 * @stats: data structure to store statistics
422 * Returns the address of the device statistics structure.
423 * The statistics are actually updated from the service task.
425 static void i40e_get_netdev_stats_struct(struct net_device *netdev,
426 struct rtnl_link_stats64 *stats)
428 struct i40e_netdev_priv *np = netdev_priv(netdev);
429 struct i40e_vsi *vsi = np->vsi;
430 struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
431 struct i40e_ring *ring;
434 if (test_bit(__I40E_VSI_DOWN, vsi->state))
441 for (i = 0; i < vsi->num_queue_pairs; i++) {
445 ring = READ_ONCE(vsi->tx_rings[i]);
448 i40e_get_netdev_stats_struct_tx(ring, stats);
450 if (i40e_enabled_xdp_vsi(vsi)) {
451 ring = READ_ONCE(vsi->xdp_rings[i]);
454 i40e_get_netdev_stats_struct_tx(ring, stats);
457 ring = READ_ONCE(vsi->rx_rings[i]);
461 start = u64_stats_fetch_begin_irq(&ring->syncp);
462 packets = ring->stats.packets;
463 bytes = ring->stats.bytes;
464 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
466 stats->rx_packets += packets;
467 stats->rx_bytes += bytes;
472 /* following stats updated by i40e_watchdog_subtask() */
473 stats->multicast = vsi_stats->multicast;
474 stats->tx_errors = vsi_stats->tx_errors;
475 stats->tx_dropped = vsi_stats->tx_dropped;
476 stats->rx_errors = vsi_stats->rx_errors;
477 stats->rx_dropped = vsi_stats->rx_dropped;
478 stats->rx_crc_errors = vsi_stats->rx_crc_errors;
479 stats->rx_length_errors = vsi_stats->rx_length_errors;
483 * i40e_vsi_reset_stats - Resets all stats of the given vsi
484 * @vsi: the VSI to have its stats reset
486 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
488 struct rtnl_link_stats64 *ns;
494 ns = i40e_get_vsi_stats_struct(vsi);
495 memset(ns, 0, sizeof(*ns));
496 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
497 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
498 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
499 if (vsi->rx_rings && vsi->rx_rings[0]) {
500 for (i = 0; i < vsi->num_queue_pairs; i++) {
501 memset(&vsi->rx_rings[i]->stats, 0,
502 sizeof(vsi->rx_rings[i]->stats));
503 memset(&vsi->rx_rings[i]->rx_stats, 0,
504 sizeof(vsi->rx_rings[i]->rx_stats));
505 memset(&vsi->tx_rings[i]->stats, 0,
506 sizeof(vsi->tx_rings[i]->stats));
507 memset(&vsi->tx_rings[i]->tx_stats, 0,
508 sizeof(vsi->tx_rings[i]->tx_stats));
511 vsi->stat_offsets_loaded = false;
515 * i40e_pf_reset_stats - Reset all of the stats for the given PF
516 * @pf: the PF to be reset
518 void i40e_pf_reset_stats(struct i40e_pf *pf)
522 memset(&pf->stats, 0, sizeof(pf->stats));
523 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
524 pf->stat_offsets_loaded = false;
526 for (i = 0; i < I40E_MAX_VEB; i++) {
528 memset(&pf->veb[i]->stats, 0,
529 sizeof(pf->veb[i]->stats));
530 memset(&pf->veb[i]->stats_offsets, 0,
531 sizeof(pf->veb[i]->stats_offsets));
532 pf->veb[i]->stat_offsets_loaded = false;
535 pf->hw_csum_rx_error = 0;
539 * i40e_stat_update48 - read and update a 48 bit stat from the chip
540 * @hw: ptr to the hardware info
541 * @hireg: the high 32 bit reg to read
542 * @loreg: the low 32 bit reg to read
543 * @offset_loaded: has the initial offset been loaded yet
544 * @offset: ptr to current offset value
545 * @stat: ptr to the stat
547 * Since the device stats are not reset at PFReset, they likely will not
548 * be zeroed when the driver starts. We'll save the first values read
549 * and use them as offsets to be subtracted from the raw values in order
550 * to report stats that count from zero. In the process, we also manage
551 * the potential roll-over.
553 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
554 bool offset_loaded, u64 *offset, u64 *stat)
558 if (hw->device_id == I40E_DEV_ID_QEMU) {
559 new_data = rd32(hw, loreg);
560 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
562 new_data = rd64(hw, loreg);
566 if (likely(new_data >= *offset))
567 *stat = new_data - *offset;
569 *stat = (new_data + BIT_ULL(48)) - *offset;
570 *stat &= 0xFFFFFFFFFFFFULL;
574 * i40e_stat_update32 - read and update a 32 bit stat from the chip
575 * @hw: ptr to the hardware info
576 * @reg: the hw reg to read
577 * @offset_loaded: has the initial offset been loaded yet
578 * @offset: ptr to current offset value
579 * @stat: ptr to the stat
581 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
582 bool offset_loaded, u64 *offset, u64 *stat)
586 new_data = rd32(hw, reg);
589 if (likely(new_data >= *offset))
590 *stat = (u32)(new_data - *offset);
592 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
596 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
597 * @hw: ptr to the hardware info
598 * @reg: the hw reg to read and clear
599 * @stat: ptr to the stat
601 static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat)
603 u32 new_data = rd32(hw, reg);
605 wr32(hw, reg, 1); /* must write a nonzero value to clear register */
610 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
611 * @vsi: the VSI to be updated
613 void i40e_update_eth_stats(struct i40e_vsi *vsi)
615 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
616 struct i40e_pf *pf = vsi->back;
617 struct i40e_hw *hw = &pf->hw;
618 struct i40e_eth_stats *oes;
619 struct i40e_eth_stats *es; /* device's eth stats */
621 es = &vsi->eth_stats;
622 oes = &vsi->eth_stats_offsets;
624 /* Gather up the stats that the hw collects */
625 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
626 vsi->stat_offsets_loaded,
627 &oes->tx_errors, &es->tx_errors);
628 i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
629 vsi->stat_offsets_loaded,
630 &oes->rx_discards, &es->rx_discards);
631 i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
632 vsi->stat_offsets_loaded,
633 &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
634 i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
635 vsi->stat_offsets_loaded,
636 &oes->tx_errors, &es->tx_errors);
638 i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
639 I40E_GLV_GORCL(stat_idx),
640 vsi->stat_offsets_loaded,
641 &oes->rx_bytes, &es->rx_bytes);
642 i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
643 I40E_GLV_UPRCL(stat_idx),
644 vsi->stat_offsets_loaded,
645 &oes->rx_unicast, &es->rx_unicast);
646 i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
647 I40E_GLV_MPRCL(stat_idx),
648 vsi->stat_offsets_loaded,
649 &oes->rx_multicast, &es->rx_multicast);
650 i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
651 I40E_GLV_BPRCL(stat_idx),
652 vsi->stat_offsets_loaded,
653 &oes->rx_broadcast, &es->rx_broadcast);
655 i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
656 I40E_GLV_GOTCL(stat_idx),
657 vsi->stat_offsets_loaded,
658 &oes->tx_bytes, &es->tx_bytes);
659 i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
660 I40E_GLV_UPTCL(stat_idx),
661 vsi->stat_offsets_loaded,
662 &oes->tx_unicast, &es->tx_unicast);
663 i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
664 I40E_GLV_MPTCL(stat_idx),
665 vsi->stat_offsets_loaded,
666 &oes->tx_multicast, &es->tx_multicast);
667 i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
668 I40E_GLV_BPTCL(stat_idx),
669 vsi->stat_offsets_loaded,
670 &oes->tx_broadcast, &es->tx_broadcast);
671 vsi->stat_offsets_loaded = true;
675 * i40e_update_veb_stats - Update Switch component statistics
676 * @veb: the VEB being updated
678 static void i40e_update_veb_stats(struct i40e_veb *veb)
680 struct i40e_pf *pf = veb->pf;
681 struct i40e_hw *hw = &pf->hw;
682 struct i40e_eth_stats *oes;
683 struct i40e_eth_stats *es; /* device's eth stats */
684 struct i40e_veb_tc_stats *veb_oes;
685 struct i40e_veb_tc_stats *veb_es;
688 idx = veb->stats_idx;
690 oes = &veb->stats_offsets;
691 veb_es = &veb->tc_stats;
692 veb_oes = &veb->tc_stats_offsets;
694 /* Gather up the stats that the hw collects */
695 i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
696 veb->stat_offsets_loaded,
697 &oes->tx_discards, &es->tx_discards);
698 if (hw->revision_id > 0)
699 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
700 veb->stat_offsets_loaded,
701 &oes->rx_unknown_protocol,
702 &es->rx_unknown_protocol);
703 i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
704 veb->stat_offsets_loaded,
705 &oes->rx_bytes, &es->rx_bytes);
706 i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
707 veb->stat_offsets_loaded,
708 &oes->rx_unicast, &es->rx_unicast);
709 i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
710 veb->stat_offsets_loaded,
711 &oes->rx_multicast, &es->rx_multicast);
712 i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
713 veb->stat_offsets_loaded,
714 &oes->rx_broadcast, &es->rx_broadcast);
716 i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
717 veb->stat_offsets_loaded,
718 &oes->tx_bytes, &es->tx_bytes);
719 i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
720 veb->stat_offsets_loaded,
721 &oes->tx_unicast, &es->tx_unicast);
722 i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
723 veb->stat_offsets_loaded,
724 &oes->tx_multicast, &es->tx_multicast);
725 i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
726 veb->stat_offsets_loaded,
727 &oes->tx_broadcast, &es->tx_broadcast);
728 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
729 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
730 I40E_GLVEBTC_RPCL(i, idx),
731 veb->stat_offsets_loaded,
732 &veb_oes->tc_rx_packets[i],
733 &veb_es->tc_rx_packets[i]);
734 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
735 I40E_GLVEBTC_RBCL(i, idx),
736 veb->stat_offsets_loaded,
737 &veb_oes->tc_rx_bytes[i],
738 &veb_es->tc_rx_bytes[i]);
739 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
740 I40E_GLVEBTC_TPCL(i, idx),
741 veb->stat_offsets_loaded,
742 &veb_oes->tc_tx_packets[i],
743 &veb_es->tc_tx_packets[i]);
744 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
745 I40E_GLVEBTC_TBCL(i, idx),
746 veb->stat_offsets_loaded,
747 &veb_oes->tc_tx_bytes[i],
748 &veb_es->tc_tx_bytes[i]);
750 veb->stat_offsets_loaded = true;
754 * i40e_update_vsi_stats - Update the vsi statistics counters.
755 * @vsi: the VSI to be updated
757 * There are a few instances where we store the same stat in a
758 * couple of different structs. This is partly because we have
759 * the netdev stats that need to be filled out, which is slightly
760 * different from the "eth_stats" defined by the chip and used in
761 * VF communications. We sort it out here.
763 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
765 struct i40e_pf *pf = vsi->back;
766 struct rtnl_link_stats64 *ons;
767 struct rtnl_link_stats64 *ns; /* netdev stats */
768 struct i40e_eth_stats *oes;
769 struct i40e_eth_stats *es; /* device's eth stats */
770 u32 tx_restart, tx_busy;
781 if (test_bit(__I40E_VSI_DOWN, vsi->state) ||
782 test_bit(__I40E_CONFIG_BUSY, pf->state))
785 ns = i40e_get_vsi_stats_struct(vsi);
786 ons = &vsi->net_stats_offsets;
787 es = &vsi->eth_stats;
788 oes = &vsi->eth_stats_offsets;
790 /* Gather up the netdev and vsi stats that the driver collects
791 * on the fly during packet processing
795 tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
799 for (q = 0; q < vsi->num_queue_pairs; q++) {
801 p = READ_ONCE(vsi->tx_rings[q]);
806 start = u64_stats_fetch_begin_irq(&p->syncp);
807 packets = p->stats.packets;
808 bytes = p->stats.bytes;
809 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
812 tx_restart += p->tx_stats.restart_queue;
813 tx_busy += p->tx_stats.tx_busy;
814 tx_linearize += p->tx_stats.tx_linearize;
815 tx_force_wb += p->tx_stats.tx_force_wb;
818 p = READ_ONCE(vsi->rx_rings[q]);
823 start = u64_stats_fetch_begin_irq(&p->syncp);
824 packets = p->stats.packets;
825 bytes = p->stats.bytes;
826 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
829 rx_buf += p->rx_stats.alloc_buff_failed;
830 rx_page += p->rx_stats.alloc_page_failed;
833 vsi->tx_restart = tx_restart;
834 vsi->tx_busy = tx_busy;
835 vsi->tx_linearize = tx_linearize;
836 vsi->tx_force_wb = tx_force_wb;
837 vsi->rx_page_failed = rx_page;
838 vsi->rx_buf_failed = rx_buf;
840 ns->rx_packets = rx_p;
842 ns->tx_packets = tx_p;
845 /* update netdev stats from eth stats */
846 i40e_update_eth_stats(vsi);
847 ons->tx_errors = oes->tx_errors;
848 ns->tx_errors = es->tx_errors;
849 ons->multicast = oes->rx_multicast;
850 ns->multicast = es->rx_multicast;
851 ons->rx_dropped = oes->rx_discards;
852 ns->rx_dropped = es->rx_discards;
853 ons->tx_dropped = oes->tx_discards;
854 ns->tx_dropped = es->tx_discards;
856 /* pull in a couple PF stats if this is the main vsi */
857 if (vsi == pf->vsi[pf->lan_vsi]) {
858 ns->rx_crc_errors = pf->stats.crc_errors;
859 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
860 ns->rx_length_errors = pf->stats.rx_length_errors;
865 * i40e_update_pf_stats - Update the PF statistics counters.
866 * @pf: the PF to be updated
868 static void i40e_update_pf_stats(struct i40e_pf *pf)
870 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
871 struct i40e_hw_port_stats *nsd = &pf->stats;
872 struct i40e_hw *hw = &pf->hw;
876 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
877 I40E_GLPRT_GORCL(hw->port),
878 pf->stat_offsets_loaded,
879 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
880 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
881 I40E_GLPRT_GOTCL(hw->port),
882 pf->stat_offsets_loaded,
883 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
884 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
885 pf->stat_offsets_loaded,
886 &osd->eth.rx_discards,
887 &nsd->eth.rx_discards);
888 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
889 I40E_GLPRT_UPRCL(hw->port),
890 pf->stat_offsets_loaded,
891 &osd->eth.rx_unicast,
892 &nsd->eth.rx_unicast);
893 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
894 I40E_GLPRT_MPRCL(hw->port),
895 pf->stat_offsets_loaded,
896 &osd->eth.rx_multicast,
897 &nsd->eth.rx_multicast);
898 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
899 I40E_GLPRT_BPRCL(hw->port),
900 pf->stat_offsets_loaded,
901 &osd->eth.rx_broadcast,
902 &nsd->eth.rx_broadcast);
903 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
904 I40E_GLPRT_UPTCL(hw->port),
905 pf->stat_offsets_loaded,
906 &osd->eth.tx_unicast,
907 &nsd->eth.tx_unicast);
908 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
909 I40E_GLPRT_MPTCL(hw->port),
910 pf->stat_offsets_loaded,
911 &osd->eth.tx_multicast,
912 &nsd->eth.tx_multicast);
913 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
914 I40E_GLPRT_BPTCL(hw->port),
915 pf->stat_offsets_loaded,
916 &osd->eth.tx_broadcast,
917 &nsd->eth.tx_broadcast);
919 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
920 pf->stat_offsets_loaded,
921 &osd->tx_dropped_link_down,
922 &nsd->tx_dropped_link_down);
924 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
925 pf->stat_offsets_loaded,
926 &osd->crc_errors, &nsd->crc_errors);
928 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
929 pf->stat_offsets_loaded,
930 &osd->illegal_bytes, &nsd->illegal_bytes);
932 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
933 pf->stat_offsets_loaded,
934 &osd->mac_local_faults,
935 &nsd->mac_local_faults);
936 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
937 pf->stat_offsets_loaded,
938 &osd->mac_remote_faults,
939 &nsd->mac_remote_faults);
941 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
942 pf->stat_offsets_loaded,
943 &osd->rx_length_errors,
944 &nsd->rx_length_errors);
946 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
947 pf->stat_offsets_loaded,
948 &osd->link_xon_rx, &nsd->link_xon_rx);
949 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
950 pf->stat_offsets_loaded,
951 &osd->link_xon_tx, &nsd->link_xon_tx);
952 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
953 pf->stat_offsets_loaded,
954 &osd->link_xoff_rx, &nsd->link_xoff_rx);
955 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
956 pf->stat_offsets_loaded,
957 &osd->link_xoff_tx, &nsd->link_xoff_tx);
959 for (i = 0; i < 8; i++) {
960 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
961 pf->stat_offsets_loaded,
962 &osd->priority_xoff_rx[i],
963 &nsd->priority_xoff_rx[i]);
964 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
965 pf->stat_offsets_loaded,
966 &osd->priority_xon_rx[i],
967 &nsd->priority_xon_rx[i]);
968 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
969 pf->stat_offsets_loaded,
970 &osd->priority_xon_tx[i],
971 &nsd->priority_xon_tx[i]);
972 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
973 pf->stat_offsets_loaded,
974 &osd->priority_xoff_tx[i],
975 &nsd->priority_xoff_tx[i]);
976 i40e_stat_update32(hw,
977 I40E_GLPRT_RXON2OFFCNT(hw->port, i),
978 pf->stat_offsets_loaded,
979 &osd->priority_xon_2_xoff[i],
980 &nsd->priority_xon_2_xoff[i]);
983 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
984 I40E_GLPRT_PRC64L(hw->port),
985 pf->stat_offsets_loaded,
986 &osd->rx_size_64, &nsd->rx_size_64);
987 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
988 I40E_GLPRT_PRC127L(hw->port),
989 pf->stat_offsets_loaded,
990 &osd->rx_size_127, &nsd->rx_size_127);
991 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
992 I40E_GLPRT_PRC255L(hw->port),
993 pf->stat_offsets_loaded,
994 &osd->rx_size_255, &nsd->rx_size_255);
995 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
996 I40E_GLPRT_PRC511L(hw->port),
997 pf->stat_offsets_loaded,
998 &osd->rx_size_511, &nsd->rx_size_511);
999 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1000 I40E_GLPRT_PRC1023L(hw->port),
1001 pf->stat_offsets_loaded,
1002 &osd->rx_size_1023, &nsd->rx_size_1023);
1003 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1004 I40E_GLPRT_PRC1522L(hw->port),
1005 pf->stat_offsets_loaded,
1006 &osd->rx_size_1522, &nsd->rx_size_1522);
1007 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1008 I40E_GLPRT_PRC9522L(hw->port),
1009 pf->stat_offsets_loaded,
1010 &osd->rx_size_big, &nsd->rx_size_big);
1012 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1013 I40E_GLPRT_PTC64L(hw->port),
1014 pf->stat_offsets_loaded,
1015 &osd->tx_size_64, &nsd->tx_size_64);
1016 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1017 I40E_GLPRT_PTC127L(hw->port),
1018 pf->stat_offsets_loaded,
1019 &osd->tx_size_127, &nsd->tx_size_127);
1020 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1021 I40E_GLPRT_PTC255L(hw->port),
1022 pf->stat_offsets_loaded,
1023 &osd->tx_size_255, &nsd->tx_size_255);
1024 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1025 I40E_GLPRT_PTC511L(hw->port),
1026 pf->stat_offsets_loaded,
1027 &osd->tx_size_511, &nsd->tx_size_511);
1028 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1029 I40E_GLPRT_PTC1023L(hw->port),
1030 pf->stat_offsets_loaded,
1031 &osd->tx_size_1023, &nsd->tx_size_1023);
1032 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1033 I40E_GLPRT_PTC1522L(hw->port),
1034 pf->stat_offsets_loaded,
1035 &osd->tx_size_1522, &nsd->tx_size_1522);
1036 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1037 I40E_GLPRT_PTC9522L(hw->port),
1038 pf->stat_offsets_loaded,
1039 &osd->tx_size_big, &nsd->tx_size_big);
1041 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1042 pf->stat_offsets_loaded,
1043 &osd->rx_undersize, &nsd->rx_undersize);
1044 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1045 pf->stat_offsets_loaded,
1046 &osd->rx_fragments, &nsd->rx_fragments);
1047 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1048 pf->stat_offsets_loaded,
1049 &osd->rx_oversize, &nsd->rx_oversize);
1050 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1051 pf->stat_offsets_loaded,
1052 &osd->rx_jabber, &nsd->rx_jabber);
1055 i40e_stat_update_and_clear32(hw,
1056 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)),
1057 &nsd->fd_atr_match);
1058 i40e_stat_update_and_clear32(hw,
1059 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)),
1061 i40e_stat_update_and_clear32(hw,
1062 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)),
1063 &nsd->fd_atr_tunnel_match);
1065 val = rd32(hw, I40E_PRTPM_EEE_STAT);
1066 nsd->tx_lpi_status =
1067 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1068 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1069 nsd->rx_lpi_status =
1070 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1071 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1072 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1073 pf->stat_offsets_loaded,
1074 &osd->tx_lpi_count, &nsd->tx_lpi_count);
1075 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1076 pf->stat_offsets_loaded,
1077 &osd->rx_lpi_count, &nsd->rx_lpi_count);
1079 if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1080 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
1081 nsd->fd_sb_status = true;
1083 nsd->fd_sb_status = false;
1085 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1086 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
1087 nsd->fd_atr_status = true;
1089 nsd->fd_atr_status = false;
1091 pf->stat_offsets_loaded = true;
1095 * i40e_update_stats - Update the various statistics counters.
1096 * @vsi: the VSI to be updated
1098 * Update the various stats for this VSI and its related entities.
1100 void i40e_update_stats(struct i40e_vsi *vsi)
1102 struct i40e_pf *pf = vsi->back;
1104 if (vsi == pf->vsi[pf->lan_vsi])
1105 i40e_update_pf_stats(pf);
1107 i40e_update_vsi_stats(vsi);
1111 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1112 * @vsi: the VSI to be searched
1113 * @macaddr: the MAC address
1116 * Returns ptr to the filter object or NULL
1118 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1119 const u8 *macaddr, s16 vlan)
1121 struct i40e_mac_filter *f;
1124 if (!vsi || !macaddr)
1127 key = i40e_addr_to_hkey(macaddr);
1128 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1129 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1137 * i40e_find_mac - Find a mac addr in the macvlan filters list
1138 * @vsi: the VSI to be searched
1139 * @macaddr: the MAC address we are searching for
1141 * Returns the first filter with the provided MAC address or NULL if
1142 * MAC address was not found
1144 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr)
1146 struct i40e_mac_filter *f;
1149 if (!vsi || !macaddr)
1152 key = i40e_addr_to_hkey(macaddr);
1153 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) {
1154 if ((ether_addr_equal(macaddr, f->macaddr)))
1161 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1162 * @vsi: the VSI to be searched
1164 * Returns true if VSI is in vlan mode or false otherwise
1166 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1168 /* If we have a PVID, always operate in VLAN mode */
1172 /* We need to operate in VLAN mode whenever we have any filters with
1173 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1174 * time, incurring search cost repeatedly. However, we can notice two
1177 * 1) the only place where we can gain a VLAN filter is in
1180 * 2) the only place where filters are actually removed is in
1181 * i40e_sync_filters_subtask.
1183 * Thus, we can simply use a boolean value, has_vlan_filters which we
1184 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1185 * we have to perform the full search after deleting filters in
1186 * i40e_sync_filters_subtask, but we already have to search
1187 * filters here and can perform the check at the same time. This
1188 * results in avoiding embedding a loop for VLAN mode inside another
1189 * loop over all the filters, and should maintain correctness as noted
1192 return vsi->has_vlan_filter;
1196 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1197 * @vsi: the VSI to configure
1198 * @tmp_add_list: list of filters ready to be added
1199 * @tmp_del_list: list of filters ready to be deleted
1200 * @vlan_filters: the number of active VLAN filters
1202 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1203 * behave as expected. If we have any active VLAN filters remaining or about
1204 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1205 * so that they only match against untagged traffic. If we no longer have any
1206 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1207 * so that they match against both tagged and untagged traffic. In this way,
1208 * we ensure that we correctly receive the desired traffic. This ensures that
1209 * when we have an active VLAN we will receive only untagged traffic and
1210 * traffic matching active VLANs. If we have no active VLANs then we will
1211 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1213 * Finally, in a similar fashion, this function also corrects filters when
1214 * there is an active PVID assigned to this VSI.
1216 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1218 * This function is only expected to be called from within
1219 * i40e_sync_vsi_filters.
1221 * NOTE: This function expects to be called while under the
1222 * mac_filter_hash_lock
1224 static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
1225 struct hlist_head *tmp_add_list,
1226 struct hlist_head *tmp_del_list,
1229 s16 pvid = le16_to_cpu(vsi->info.pvid);
1230 struct i40e_mac_filter *f, *add_head;
1231 struct i40e_new_mac_filter *new;
1232 struct hlist_node *h;
1235 /* To determine if a particular filter needs to be replaced we
1236 * have the three following conditions:
1238 * a) if we have a PVID assigned, then all filters which are
1239 * not marked as VLAN=PVID must be replaced with filters that
1241 * b) otherwise, if we have any active VLANS, all filters
1242 * which are marked as VLAN=-1 must be replaced with
1243 * filters marked as VLAN=0
1244 * c) finally, if we do not have any active VLANS, all filters
1245 * which are marked as VLAN=0 must be replaced with filters
1249 /* Update the filters about to be added in place */
1250 hlist_for_each_entry(new, tmp_add_list, hlist) {
1251 if (pvid && new->f->vlan != pvid)
1252 new->f->vlan = pvid;
1253 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
1255 else if (!vlan_filters && new->f->vlan == 0)
1256 new->f->vlan = I40E_VLAN_ANY;
1259 /* Update the remaining active filters */
1260 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1261 /* Combine the checks for whether a filter needs to be changed
1262 * and then determine the new VLAN inside the if block, in
1263 * order to avoid duplicating code for adding the new filter
1264 * then deleting the old filter.
1266 if ((pvid && f->vlan != pvid) ||
1267 (vlan_filters && f->vlan == I40E_VLAN_ANY) ||
1268 (!vlan_filters && f->vlan == 0)) {
1269 /* Determine the new vlan we will be adding */
1272 else if (vlan_filters)
1275 new_vlan = I40E_VLAN_ANY;
1277 /* Create the new filter */
1278 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan);
1282 /* Create a temporary i40e_new_mac_filter */
1283 new = kzalloc(sizeof(*new), GFP_ATOMIC);
1288 new->state = add_head->state;
1290 /* Add the new filter to the tmp list */
1291 hlist_add_head(&new->hlist, tmp_add_list);
1293 /* Put the original filter into the delete list */
1294 f->state = I40E_FILTER_REMOVE;
1295 hash_del(&f->hlist);
1296 hlist_add_head(&f->hlist, tmp_del_list);
1300 vsi->has_vlan_filter = !!vlan_filters;
1306 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1307 * @vsi: the PF Main VSI - inappropriate for any other VSI
1308 * @macaddr: the MAC address
1310 * Remove whatever filter the firmware set up so the driver can manage
1311 * its own filtering intelligently.
1313 static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1315 struct i40e_aqc_remove_macvlan_element_data element;
1316 struct i40e_pf *pf = vsi->back;
1318 /* Only appropriate for the PF main VSI */
1319 if (vsi->type != I40E_VSI_MAIN)
1322 memset(&element, 0, sizeof(element));
1323 ether_addr_copy(element.mac_addr, macaddr);
1324 element.vlan_tag = 0;
1325 /* Ignore error returns, some firmware does it this way... */
1326 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1327 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1329 memset(&element, 0, sizeof(element));
1330 ether_addr_copy(element.mac_addr, macaddr);
1331 element.vlan_tag = 0;
1332 /* ...and some firmware does it this way. */
1333 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1334 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1335 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1339 * i40e_add_filter - Add a mac/vlan filter to the VSI
1340 * @vsi: the VSI to be searched
1341 * @macaddr: the MAC address
1344 * Returns ptr to the filter object or NULL when no memory available.
1346 * NOTE: This function is expected to be called with mac_filter_hash_lock
1349 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1350 const u8 *macaddr, s16 vlan)
1352 struct i40e_mac_filter *f;
1355 if (!vsi || !macaddr)
1358 f = i40e_find_filter(vsi, macaddr, vlan);
1360 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1364 /* Update the boolean indicating if we need to function in
1368 vsi->has_vlan_filter = true;
1370 ether_addr_copy(f->macaddr, macaddr);
1372 f->state = I40E_FILTER_NEW;
1373 INIT_HLIST_NODE(&f->hlist);
1375 key = i40e_addr_to_hkey(macaddr);
1376 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1378 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1379 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1382 /* If we're asked to add a filter that has been marked for removal, it
1383 * is safe to simply restore it to active state. __i40e_del_filter
1384 * will have simply deleted any filters which were previously marked
1385 * NEW or FAILED, so if it is currently marked REMOVE it must have
1386 * previously been ACTIVE. Since we haven't yet run the sync filters
1387 * task, just restore this filter to the ACTIVE state so that the
1388 * sync task leaves it in place
1390 if (f->state == I40E_FILTER_REMOVE)
1391 f->state = I40E_FILTER_ACTIVE;
1397 * __i40e_del_filter - Remove a specific filter from the VSI
1398 * @vsi: VSI to remove from
1399 * @f: the filter to remove from the list
1401 * This function should be called instead of i40e_del_filter only if you know
1402 * the exact filter you will remove already, such as via i40e_find_filter or
1405 * NOTE: This function is expected to be called with mac_filter_hash_lock
1407 * ANOTHER NOTE: This function MUST be called from within the context of
1408 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1409 * instead of list_for_each_entry().
1411 void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
1416 /* If the filter was never added to firmware then we can just delete it
1417 * directly and we don't want to set the status to remove or else an
1418 * admin queue command will unnecessarily fire.
1420 if ((f->state == I40E_FILTER_FAILED) ||
1421 (f->state == I40E_FILTER_NEW)) {
1422 hash_del(&f->hlist);
1425 f->state = I40E_FILTER_REMOVE;
1428 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1429 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1433 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1434 * @vsi: the VSI to be searched
1435 * @macaddr: the MAC address
1438 * NOTE: This function is expected to be called with mac_filter_hash_lock
1440 * ANOTHER NOTE: This function MUST be called from within the context of
1441 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1442 * instead of list_for_each_entry().
1444 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
1446 struct i40e_mac_filter *f;
1448 if (!vsi || !macaddr)
1451 f = i40e_find_filter(vsi, macaddr, vlan);
1452 __i40e_del_filter(vsi, f);
1456 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1457 * @vsi: the VSI to be searched
1458 * @macaddr: the mac address to be filtered
1460 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1461 * go through all the macvlan filters and add a macvlan filter for each
1462 * unique vlan that already exists. If a PVID has been assigned, instead only
1463 * add the macaddr to that VLAN.
1465 * Returns last filter added on success, else NULL
1467 struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
1470 struct i40e_mac_filter *f, *add = NULL;
1471 struct hlist_node *h;
1475 return i40e_add_filter(vsi, macaddr,
1476 le16_to_cpu(vsi->info.pvid));
1478 if (!i40e_is_vsi_in_vlan(vsi))
1479 return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
1481 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1482 if (f->state == I40E_FILTER_REMOVE)
1484 add = i40e_add_filter(vsi, macaddr, f->vlan);
1493 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1494 * @vsi: the VSI to be searched
1495 * @macaddr: the mac address to be removed
1497 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1500 * Returns 0 for success, or error
1502 int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
1504 struct i40e_mac_filter *f;
1505 struct hlist_node *h;
1509 WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
1510 "Missing mac_filter_hash_lock\n");
1511 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
1512 if (ether_addr_equal(macaddr, f->macaddr)) {
1513 __i40e_del_filter(vsi, f);
1525 * i40e_set_mac - NDO callback to set mac address
1526 * @netdev: network interface device structure
1527 * @p: pointer to an address structure
1529 * Returns 0 on success, negative on failure
1531 static int i40e_set_mac(struct net_device *netdev, void *p)
1533 struct i40e_netdev_priv *np = netdev_priv(netdev);
1534 struct i40e_vsi *vsi = np->vsi;
1535 struct i40e_pf *pf = vsi->back;
1536 struct i40e_hw *hw = &pf->hw;
1537 struct sockaddr *addr = p;
1539 if (!is_valid_ether_addr(addr->sa_data))
1540 return -EADDRNOTAVAIL;
1542 if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1543 netdev_info(netdev, "already using mac address %pM\n",
1548 if (test_bit(__I40E_VSI_DOWN, vsi->back->state) ||
1549 test_bit(__I40E_RESET_RECOVERY_PENDING, vsi->back->state))
1550 return -EADDRNOTAVAIL;
1552 if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1553 netdev_info(netdev, "returning to hw mac address %pM\n",
1556 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1558 /* Copy the address first, so that we avoid a possible race with
1560 * - Remove old address from MAC filter
1561 * - Copy new address
1562 * - Add new address to MAC filter
1564 spin_lock_bh(&vsi->mac_filter_hash_lock);
1565 i40e_del_mac_filter(vsi, netdev->dev_addr);
1566 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1567 i40e_add_mac_filter(vsi, netdev->dev_addr);
1568 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1570 if (vsi->type == I40E_VSI_MAIN) {
1573 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1574 I40E_AQC_WRITE_TYPE_LAA_WOL,
1575 addr->sa_data, NULL);
1577 netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1578 i40e_stat_str(hw, ret),
1579 i40e_aq_str(hw, hw->aq.asq_last_status));
1582 /* schedule our worker thread which will take care of
1583 * applying the new filter changes
1585 i40e_service_event_schedule(vsi->back);
1590 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1591 * @vsi: vsi structure
1592 * @seed: RSS hash seed
1594 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
1595 u8 *lut, u16 lut_size)
1597 struct i40e_pf *pf = vsi->back;
1598 struct i40e_hw *hw = &pf->hw;
1602 struct i40e_aqc_get_set_rss_key_data *seed_dw =
1603 (struct i40e_aqc_get_set_rss_key_data *)seed;
1604 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
1606 dev_info(&pf->pdev->dev,
1607 "Cannot set RSS key, err %s aq_err %s\n",
1608 i40e_stat_str(hw, ret),
1609 i40e_aq_str(hw, hw->aq.asq_last_status));
1614 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
1616 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
1618 dev_info(&pf->pdev->dev,
1619 "Cannot set RSS lut, err %s aq_err %s\n",
1620 i40e_stat_str(hw, ret),
1621 i40e_aq_str(hw, hw->aq.asq_last_status));
1629 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1630 * @vsi: VSI structure
1632 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
1634 struct i40e_pf *pf = vsi->back;
1635 u8 seed[I40E_HKEY_ARRAY_SIZE];
1639 if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE))
1642 vsi->rss_size = min_t(int, pf->alloc_rss_size,
1643 vsi->num_queue_pairs);
1646 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1650 /* Use the user configured hash keys and lookup table if there is one,
1651 * otherwise use default
1653 if (vsi->rss_lut_user)
1654 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1656 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
1657 if (vsi->rss_hkey_user)
1658 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
1660 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
1661 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
1667 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1668 * @vsi: the VSI being configured,
1669 * @ctxt: VSI context structure
1670 * @enabled_tc: number of traffic classes to enable
1672 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1674 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi,
1675 struct i40e_vsi_context *ctxt,
1678 u16 qcount = 0, max_qcount, qmap, sections = 0;
1679 int i, override_q, pow, num_qps, ret;
1680 u8 netdev_tc = 0, offset = 0;
1682 if (vsi->type != I40E_VSI_MAIN)
1684 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1685 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1686 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc;
1687 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1688 num_qps = vsi->mqprio_qopt.qopt.count[0];
1690 /* find the next higher power-of-2 of num queue pairs */
1691 pow = ilog2(num_qps);
1692 if (!is_power_of_2(num_qps))
1694 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1695 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1697 /* Setup queue offset/count for all TCs for given VSI */
1698 max_qcount = vsi->mqprio_qopt.qopt.count[0];
1699 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1700 /* See if the given TC is enabled for the given VSI */
1701 if (vsi->tc_config.enabled_tc & BIT(i)) {
1702 offset = vsi->mqprio_qopt.qopt.offset[i];
1703 qcount = vsi->mqprio_qopt.qopt.count[i];
1704 if (qcount > max_qcount)
1705 max_qcount = qcount;
1706 vsi->tc_config.tc_info[i].qoffset = offset;
1707 vsi->tc_config.tc_info[i].qcount = qcount;
1708 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1710 /* TC is not enabled so set the offset to
1711 * default queue and allocate one queue
1714 vsi->tc_config.tc_info[i].qoffset = 0;
1715 vsi->tc_config.tc_info[i].qcount = 1;
1716 vsi->tc_config.tc_info[i].netdev_tc = 0;
1720 /* Set actual Tx/Rx queue pairs */
1721 vsi->num_queue_pairs = offset + qcount;
1723 /* Setup queue TC[0].qmap for given VSI context */
1724 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1725 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1726 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1727 ctxt->info.valid_sections |= cpu_to_le16(sections);
1729 /* Reconfigure RSS for main VSI with max queue count */
1730 vsi->rss_size = max_qcount;
1731 ret = i40e_vsi_config_rss(vsi);
1733 dev_info(&vsi->back->pdev->dev,
1734 "Failed to reconfig rss for num_queues (%u)\n",
1738 vsi->reconfig_rss = true;
1739 dev_dbg(&vsi->back->pdev->dev,
1740 "Reconfigured rss with num_queues (%u)\n", max_qcount);
1742 /* Find queue count available for channel VSIs and starting offset
1745 override_q = vsi->mqprio_qopt.qopt.count[0];
1746 if (override_q && override_q < vsi->num_queue_pairs) {
1747 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q;
1748 vsi->next_base_queue = override_q;
1754 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1755 * @vsi: the VSI being setup
1756 * @ctxt: VSI context structure
1757 * @enabled_tc: Enabled TCs bitmap
1758 * @is_add: True if called before Add VSI
1760 * Setup VSI queue mapping for enabled traffic classes.
1762 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1763 struct i40e_vsi_context *ctxt,
1767 struct i40e_pf *pf = vsi->back;
1777 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1780 /* Number of queues per enabled TC */
1781 num_tc_qps = vsi->alloc_queue_pairs;
1782 if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1783 /* Find numtc from enabled TC bitmap */
1784 for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1785 if (enabled_tc & BIT(i)) /* TC is enabled */
1789 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1792 num_tc_qps = num_tc_qps / numtc;
1793 num_tc_qps = min_t(int, num_tc_qps,
1794 i40e_pf_get_max_q_per_tc(pf));
1797 vsi->tc_config.numtc = numtc;
1798 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1800 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1801 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1802 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix);
1804 /* Setup queue offset/count for all TCs for given VSI */
1805 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1806 /* See if the given TC is enabled for the given VSI */
1807 if (vsi->tc_config.enabled_tc & BIT(i)) {
1811 switch (vsi->type) {
1813 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
1814 I40E_FLAG_FD_ATR_ENABLED)) ||
1815 vsi->tc_config.enabled_tc != 1) {
1816 qcount = min_t(int, pf->alloc_rss_size,
1822 case I40E_VSI_SRIOV:
1823 case I40E_VSI_VMDQ2:
1825 qcount = num_tc_qps;
1829 vsi->tc_config.tc_info[i].qoffset = offset;
1830 vsi->tc_config.tc_info[i].qcount = qcount;
1832 /* find the next higher power-of-2 of num queue pairs */
1835 while (num_qps && (BIT_ULL(pow) < qcount)) {
1840 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1842 (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1843 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1847 /* TC is not enabled so set the offset to
1848 * default queue and allocate one queue
1851 vsi->tc_config.tc_info[i].qoffset = 0;
1852 vsi->tc_config.tc_info[i].qcount = 1;
1853 vsi->tc_config.tc_info[i].netdev_tc = 0;
1857 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1860 /* Set actual Tx/Rx queue pairs */
1861 vsi->num_queue_pairs = offset;
1862 if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1863 if (vsi->req_queue_pairs > 0)
1864 vsi->num_queue_pairs = vsi->req_queue_pairs;
1865 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1866 vsi->num_queue_pairs = pf->num_lan_msix;
1869 /* Scheduler section valid can only be set for ADD VSI */
1871 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1873 ctxt->info.up_enable_bits = enabled_tc;
1875 if (vsi->type == I40E_VSI_SRIOV) {
1876 ctxt->info.mapping_flags |=
1877 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1878 for (i = 0; i < vsi->num_queue_pairs; i++)
1879 ctxt->info.queue_mapping[i] =
1880 cpu_to_le16(vsi->base_queue + i);
1882 ctxt->info.mapping_flags |=
1883 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1884 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1886 ctxt->info.valid_sections |= cpu_to_le16(sections);
1890 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1891 * @netdev: the netdevice
1892 * @addr: address to add
1894 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1895 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1897 static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
1899 struct i40e_netdev_priv *np = netdev_priv(netdev);
1900 struct i40e_vsi *vsi = np->vsi;
1902 if (i40e_add_mac_filter(vsi, addr))
1909 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1910 * @netdev: the netdevice
1911 * @addr: address to add
1913 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1914 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1916 static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
1918 struct i40e_netdev_priv *np = netdev_priv(netdev);
1919 struct i40e_vsi *vsi = np->vsi;
1921 /* Under some circumstances, we might receive a request to delete
1922 * our own device address from our uc list. Because we store the
1923 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1924 * such requests and not delete our device address from this list.
1926 if (ether_addr_equal(addr, netdev->dev_addr))
1929 i40e_del_mac_filter(vsi, addr);
1935 * i40e_set_rx_mode - NDO callback to set the netdev filters
1936 * @netdev: network interface device structure
1938 static void i40e_set_rx_mode(struct net_device *netdev)
1940 struct i40e_netdev_priv *np = netdev_priv(netdev);
1941 struct i40e_vsi *vsi = np->vsi;
1943 spin_lock_bh(&vsi->mac_filter_hash_lock);
1945 __dev_uc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1946 __dev_mc_sync(netdev, i40e_addr_sync, i40e_addr_unsync);
1948 spin_unlock_bh(&vsi->mac_filter_hash_lock);
1950 /* check for other flag changes */
1951 if (vsi->current_netdev_flags != vsi->netdev->flags) {
1952 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1953 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
1958 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1959 * @vsi: Pointer to VSI struct
1960 * @from: Pointer to list which contains MAC filter entries - changes to
1961 * those entries needs to be undone.
1963 * MAC filter entries from this list were slated for deletion.
1965 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1966 struct hlist_head *from)
1968 struct i40e_mac_filter *f;
1969 struct hlist_node *h;
1971 hlist_for_each_entry_safe(f, h, from, hlist) {
1972 u64 key = i40e_addr_to_hkey(f->macaddr);
1974 /* Move the element back into MAC filter list*/
1975 hlist_del(&f->hlist);
1976 hash_add(vsi->mac_filter_hash, &f->hlist, key);
1981 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1982 * @vsi: Pointer to vsi struct
1983 * @from: Pointer to list which contains MAC filter entries - changes to
1984 * those entries needs to be undone.
1986 * MAC filter entries from this list were slated for addition.
1988 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
1989 struct hlist_head *from)
1991 struct i40e_new_mac_filter *new;
1992 struct hlist_node *h;
1994 hlist_for_each_entry_safe(new, h, from, hlist) {
1995 /* We can simply free the wrapper structure */
1996 hlist_del(&new->hlist);
2002 * i40e_next_entry - Get the next non-broadcast filter from a list
2003 * @next: pointer to filter in list
2005 * Returns the next non-broadcast filter in the list. Required so that we
2006 * ignore broadcast filters within the list, since these are not handled via
2007 * the normal firmware update path.
2010 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
2012 hlist_for_each_entry_continue(next, hlist) {
2013 if (!is_broadcast_ether_addr(next->f->macaddr))
2021 * i40e_update_filter_state - Update filter state based on return data
2023 * @count: Number of filters added
2024 * @add_list: return data from fw
2025 * @add_head: pointer to first filter in current batch
2027 * MAC filter entries from list were slated to be added to device. Returns
2028 * number of successful filters. Note that 0 does NOT mean success!
2031 i40e_update_filter_state(int count,
2032 struct i40e_aqc_add_macvlan_element_data *add_list,
2033 struct i40e_new_mac_filter *add_head)
2038 for (i = 0; i < count; i++) {
2039 /* Always check status of each filter. We don't need to check
2040 * the firmware return status because we pre-set the filter
2041 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2042 * request to the adminq. Thus, if it no longer matches then
2043 * we know the filter is active.
2045 if (add_list[i].match_method == I40E_AQC_MM_ERR_NO_RES) {
2046 add_head->state = I40E_FILTER_FAILED;
2048 add_head->state = I40E_FILTER_ACTIVE;
2052 add_head = i40e_next_filter(add_head);
2061 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2062 * @vsi: ptr to the VSI
2063 * @vsi_name: name to display in messages
2064 * @list: the list of filters to send to firmware
2065 * @num_del: the number of filters to delete
2066 * @retval: Set to -EIO on failure to delete
2068 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2069 * *retval instead of a return value so that success does not force ret_val to
2070 * be set to 0. This ensures that a sequence of calls to this function
2071 * preserve the previous value of *retval on successful delete.
2074 void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
2075 struct i40e_aqc_remove_macvlan_element_data *list,
2076 int num_del, int *retval)
2078 struct i40e_hw *hw = &vsi->back->hw;
2082 aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, list, num_del, NULL);
2083 aq_err = hw->aq.asq_last_status;
2085 /* Explicitly ignore and do not report when firmware returns ENOENT */
2086 if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
2088 dev_info(&vsi->back->pdev->dev,
2089 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2090 vsi_name, i40e_stat_str(hw, aq_ret),
2091 i40e_aq_str(hw, aq_err));
2096 * i40e_aqc_add_filters - Request firmware to add a set of filters
2097 * @vsi: ptr to the VSI
2098 * @vsi_name: name to display in messages
2099 * @list: the list of filters to send to firmware
2100 * @add_head: Position in the add hlist
2101 * @num_add: the number of filters to add
2103 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2104 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2105 * space for more filters.
2108 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
2109 struct i40e_aqc_add_macvlan_element_data *list,
2110 struct i40e_new_mac_filter *add_head,
2113 struct i40e_hw *hw = &vsi->back->hw;
2116 i40e_aq_add_macvlan(hw, vsi->seid, list, num_add, NULL);
2117 aq_err = hw->aq.asq_last_status;
2118 fcnt = i40e_update_filter_state(num_add, list, add_head);
2120 if (fcnt != num_add) {
2121 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2122 dev_warn(&vsi->back->pdev->dev,
2123 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2124 i40e_aq_str(hw, aq_err),
2130 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2131 * @vsi: pointer to the VSI
2132 * @vsi_name: the VSI name
2135 * This function sets or clears the promiscuous broadcast flags for VLAN
2136 * filters in order to properly receive broadcast frames. Assumes that only
2137 * broadcast filters are passed.
2139 * Returns status indicating success or failure;
2142 i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
2143 struct i40e_mac_filter *f)
2145 bool enable = f->state == I40E_FILTER_NEW;
2146 struct i40e_hw *hw = &vsi->back->hw;
2149 if (f->vlan == I40E_VLAN_ANY) {
2150 aq_ret = i40e_aq_set_vsi_broadcast(hw,
2155 aq_ret = i40e_aq_set_vsi_bc_promisc_on_vlan(hw,
2163 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2164 dev_warn(&vsi->back->pdev->dev,
2165 "Error %s, forcing overflow promiscuous on %s\n",
2166 i40e_aq_str(hw, hw->aq.asq_last_status),
2174 * i40e_set_promiscuous - set promiscuous mode
2175 * @pf: board private structure
2176 * @promisc: promisc on or off
2178 * There are different ways of setting promiscuous mode on a PF depending on
2179 * what state/environment we're in. This identifies and sets it appropriately.
2180 * Returns 0 on success.
2182 static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
2184 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
2185 struct i40e_hw *hw = &pf->hw;
2188 if (vsi->type == I40E_VSI_MAIN &&
2189 pf->lan_veb != I40E_NO_VEB &&
2190 !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
2191 /* set defport ON for Main VSI instead of true promisc
2192 * this way we will get all unicast/multicast and VLAN
2193 * promisc behavior but will not get VF or VMDq traffic
2194 * replicated on the Main VSI.
2197 aq_ret = i40e_aq_set_default_vsi(hw,
2201 aq_ret = i40e_aq_clear_default_vsi(hw,
2205 dev_info(&pf->pdev->dev,
2206 "Set default VSI failed, err %s, aq_err %s\n",
2207 i40e_stat_str(hw, aq_ret),
2208 i40e_aq_str(hw, hw->aq.asq_last_status));
2211 aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
2217 dev_info(&pf->pdev->dev,
2218 "set unicast promisc failed, err %s, aq_err %s\n",
2219 i40e_stat_str(hw, aq_ret),
2220 i40e_aq_str(hw, hw->aq.asq_last_status));
2222 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
2227 dev_info(&pf->pdev->dev,
2228 "set multicast promisc failed, err %s, aq_err %s\n",
2229 i40e_stat_str(hw, aq_ret),
2230 i40e_aq_str(hw, hw->aq.asq_last_status));
2235 pf->cur_promisc = promisc;
2241 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2242 * @vsi: ptr to the VSI
2244 * Push any outstanding VSI filter changes through the AdminQ.
2246 * Returns 0 or error value
2248 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
2250 struct hlist_head tmp_add_list, tmp_del_list;
2251 struct i40e_mac_filter *f;
2252 struct i40e_new_mac_filter *new, *add_head = NULL;
2253 struct i40e_hw *hw = &vsi->back->hw;
2254 bool old_overflow, new_overflow;
2255 unsigned int failed_filters = 0;
2256 unsigned int vlan_filters = 0;
2257 char vsi_name[16] = "PF";
2258 int filter_list_len = 0;
2259 i40e_status aq_ret = 0;
2260 u32 changed_flags = 0;
2261 struct hlist_node *h;
2270 /* empty array typed pointers, kcalloc later */
2271 struct i40e_aqc_add_macvlan_element_data *add_list;
2272 struct i40e_aqc_remove_macvlan_element_data *del_list;
2274 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state))
2275 usleep_range(1000, 2000);
2278 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2281 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
2282 vsi->current_netdev_flags = vsi->netdev->flags;
2285 INIT_HLIST_HEAD(&tmp_add_list);
2286 INIT_HLIST_HEAD(&tmp_del_list);
2288 if (vsi->type == I40E_VSI_SRIOV)
2289 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
2290 else if (vsi->type != I40E_VSI_MAIN)
2291 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
2293 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
2294 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
2296 spin_lock_bh(&vsi->mac_filter_hash_lock);
2297 /* Create a list of filters to delete. */
2298 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2299 if (f->state == I40E_FILTER_REMOVE) {
2300 /* Move the element into temporary del_list */
2301 hash_del(&f->hlist);
2302 hlist_add_head(&f->hlist, &tmp_del_list);
2304 /* Avoid counting removed filters */
2307 if (f->state == I40E_FILTER_NEW) {
2308 /* Create a temporary i40e_new_mac_filter */
2309 new = kzalloc(sizeof(*new), GFP_ATOMIC);
2311 goto err_no_memory_locked;
2313 /* Store pointer to the real filter */
2315 new->state = f->state;
2317 /* Add it to the hash list */
2318 hlist_add_head(&new->hlist, &tmp_add_list);
2321 /* Count the number of active (current and new) VLAN
2322 * filters we have now. Does not count filters which
2323 * are marked for deletion.
2329 retval = i40e_correct_mac_vlan_filters(vsi,
2334 goto err_no_memory_locked;
2336 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2339 /* Now process 'del_list' outside the lock */
2340 if (!hlist_empty(&tmp_del_list)) {
2341 filter_list_len = hw->aq.asq_buf_size /
2342 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2343 list_size = filter_list_len *
2344 sizeof(struct i40e_aqc_remove_macvlan_element_data);
2345 del_list = kzalloc(list_size, GFP_ATOMIC);
2349 hlist_for_each_entry_safe(f, h, &tmp_del_list, hlist) {
2352 /* handle broadcast filters by updating the broadcast
2353 * promiscuous flag and release filter list.
2355 if (is_broadcast_ether_addr(f->macaddr)) {
2356 i40e_aqc_broadcast_filter(vsi, vsi_name, f);
2358 hlist_del(&f->hlist);
2363 /* add to delete list */
2364 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
2365 if (f->vlan == I40E_VLAN_ANY) {
2366 del_list[num_del].vlan_tag = 0;
2367 cmd_flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
2369 del_list[num_del].vlan_tag =
2370 cpu_to_le16((u16)(f->vlan));
2373 cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2374 del_list[num_del].flags = cmd_flags;
2377 /* flush a full buffer */
2378 if (num_del == filter_list_len) {
2379 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2381 memset(del_list, 0, list_size);
2384 /* Release memory for MAC filter entries which were
2385 * synced up with HW.
2387 hlist_del(&f->hlist);
2392 i40e_aqc_del_filters(vsi, vsi_name, del_list,
2400 if (!hlist_empty(&tmp_add_list)) {
2401 /* Do all the adds now. */
2402 filter_list_len = hw->aq.asq_buf_size /
2403 sizeof(struct i40e_aqc_add_macvlan_element_data);
2404 list_size = filter_list_len *
2405 sizeof(struct i40e_aqc_add_macvlan_element_data);
2406 add_list = kzalloc(list_size, GFP_ATOMIC);
2411 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2412 /* handle broadcast filters by updating the broadcast
2413 * promiscuous flag instead of adding a MAC filter.
2415 if (is_broadcast_ether_addr(new->f->macaddr)) {
2416 if (i40e_aqc_broadcast_filter(vsi, vsi_name,
2418 new->state = I40E_FILTER_FAILED;
2420 new->state = I40E_FILTER_ACTIVE;
2424 /* add to add array */
2428 ether_addr_copy(add_list[num_add].mac_addr,
2430 if (new->f->vlan == I40E_VLAN_ANY) {
2431 add_list[num_add].vlan_tag = 0;
2432 cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
2434 add_list[num_add].vlan_tag =
2435 cpu_to_le16((u16)(new->f->vlan));
2437 add_list[num_add].queue_number = 0;
2438 /* set invalid match method for later detection */
2439 add_list[num_add].match_method = I40E_AQC_MM_ERR_NO_RES;
2440 cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2441 add_list[num_add].flags = cpu_to_le16(cmd_flags);
2444 /* flush a full buffer */
2445 if (num_add == filter_list_len) {
2446 i40e_aqc_add_filters(vsi, vsi_name, add_list,
2448 memset(add_list, 0, list_size);
2453 i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
2456 /* Now move all of the filters from the temp add list back to
2459 spin_lock_bh(&vsi->mac_filter_hash_lock);
2460 hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
2461 /* Only update the state if we're still NEW */
2462 if (new->f->state == I40E_FILTER_NEW)
2463 new->f->state = new->state;
2464 hlist_del(&new->hlist);
2467 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2472 /* Determine the number of active and failed filters. */
2473 spin_lock_bh(&vsi->mac_filter_hash_lock);
2474 vsi->active_filters = 0;
2475 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
2476 if (f->state == I40E_FILTER_ACTIVE)
2477 vsi->active_filters++;
2478 else if (f->state == I40E_FILTER_FAILED)
2481 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2483 /* Check if we are able to exit overflow promiscuous mode. We can
2484 * safely exit if we didn't just enter, we no longer have any failed
2485 * filters, and we have reduced filters below the threshold value.
2487 if (old_overflow && !failed_filters &&
2488 vsi->active_filters < vsi->promisc_threshold) {
2489 dev_info(&pf->pdev->dev,
2490 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2492 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2493 vsi->promisc_threshold = 0;
2496 /* if the VF is not trusted do not do promisc */
2497 if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
2498 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2502 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
2504 /* If we are entering overflow promiscuous, we need to calculate a new
2505 * threshold for when we are safe to exit
2507 if (!old_overflow && new_overflow)
2508 vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
2510 /* check for changes in promiscuous modes */
2511 if (changed_flags & IFF_ALLMULTI) {
2512 bool cur_multipromisc;
2514 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2515 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2520 retval = i40e_aq_rc_to_posix(aq_ret,
2521 hw->aq.asq_last_status);
2522 dev_info(&pf->pdev->dev,
2523 "set multi promisc failed on %s, err %s aq_err %s\n",
2525 i40e_stat_str(hw, aq_ret),
2526 i40e_aq_str(hw, hw->aq.asq_last_status));
2530 if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
2533 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2535 aq_ret = i40e_set_promiscuous(pf, cur_promisc);
2537 retval = i40e_aq_rc_to_posix(aq_ret,
2538 hw->aq.asq_last_status);
2539 dev_info(&pf->pdev->dev,
2540 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2541 cur_promisc ? "on" : "off",
2543 i40e_stat_str(hw, aq_ret),
2544 i40e_aq_str(hw, hw->aq.asq_last_status));
2548 /* if something went wrong then set the changed flag so we try again */
2550 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2552 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2556 /* Restore elements on the temporary add and delete lists */
2557 spin_lock_bh(&vsi->mac_filter_hash_lock);
2558 err_no_memory_locked:
2559 i40e_undo_del_filter_entries(vsi, &tmp_del_list);
2560 i40e_undo_add_filter_entries(vsi, &tmp_add_list);
2561 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2563 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
2564 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state);
2569 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2570 * @pf: board private structure
2572 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2578 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
2580 if (test_bit(__I40E_VF_DISABLE, pf->state)) {
2581 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
2585 for (v = 0; v < pf->num_alloc_vsi; v++) {
2587 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
2588 int ret = i40e_sync_vsi_filters(pf->vsi[v]);
2591 /* come back and try again later */
2592 set_bit(__I40E_MACVLAN_SYNC_PENDING,
2601 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2604 static int i40e_max_xdp_frame_size(struct i40e_vsi *vsi)
2606 if (PAGE_SIZE >= 8192 || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
2607 return I40E_RXBUFFER_2048;
2609 return I40E_RXBUFFER_3072;
2613 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2614 * @netdev: network interface device structure
2615 * @new_mtu: new value for maximum frame size
2617 * Returns 0 on success, negative on failure
2619 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2621 struct i40e_netdev_priv *np = netdev_priv(netdev);
2622 struct i40e_vsi *vsi = np->vsi;
2623 struct i40e_pf *pf = vsi->back;
2625 if (i40e_enabled_xdp_vsi(vsi)) {
2626 int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2628 if (frame_size > i40e_max_xdp_frame_size(vsi))
2632 netdev_info(netdev, "changing MTU from %d to %d\n",
2633 netdev->mtu, new_mtu);
2634 netdev->mtu = new_mtu;
2635 if (netif_running(netdev))
2636 i40e_vsi_reinit_locked(vsi);
2637 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
2638 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
2643 * i40e_ioctl - Access the hwtstamp interface
2644 * @netdev: network interface device structure
2645 * @ifr: interface request data
2646 * @cmd: ioctl command
2648 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2650 struct i40e_netdev_priv *np = netdev_priv(netdev);
2651 struct i40e_pf *pf = np->vsi->back;
2655 return i40e_ptp_get_ts_config(pf, ifr);
2657 return i40e_ptp_set_ts_config(pf, ifr);
2664 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2665 * @vsi: the vsi being adjusted
2667 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2669 struct i40e_vsi_context ctxt;
2672 /* Don't modify stripping options if a port VLAN is active */
2676 if ((vsi->info.valid_sections &
2677 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2678 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2679 return; /* already enabled */
2681 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2682 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2683 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2685 ctxt.seid = vsi->seid;
2686 ctxt.info = vsi->info;
2687 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2689 dev_info(&vsi->back->pdev->dev,
2690 "update vlan stripping failed, err %s aq_err %s\n",
2691 i40e_stat_str(&vsi->back->hw, ret),
2692 i40e_aq_str(&vsi->back->hw,
2693 vsi->back->hw.aq.asq_last_status));
2698 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2699 * @vsi: the vsi being adjusted
2701 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2703 struct i40e_vsi_context ctxt;
2706 /* Don't modify stripping options if a port VLAN is active */
2710 if ((vsi->info.valid_sections &
2711 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2712 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2713 I40E_AQ_VSI_PVLAN_EMOD_MASK))
2714 return; /* already disabled */
2716 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2717 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2718 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2720 ctxt.seid = vsi->seid;
2721 ctxt.info = vsi->info;
2722 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2724 dev_info(&vsi->back->pdev->dev,
2725 "update vlan stripping failed, err %s aq_err %s\n",
2726 i40e_stat_str(&vsi->back->hw, ret),
2727 i40e_aq_str(&vsi->back->hw,
2728 vsi->back->hw.aq.asq_last_status));
2733 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2734 * @vsi: the vsi being configured
2735 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2737 * This is a helper function for adding a new MAC/VLAN filter with the
2738 * specified VLAN for each existing MAC address already in the hash table.
2739 * This function does *not* perform any accounting to update filters based on
2742 * NOTE: this function expects to be called while under the
2743 * mac_filter_hash_lock
2745 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2747 struct i40e_mac_filter *f, *add_f;
2748 struct hlist_node *h;
2751 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2752 if (f->state == I40E_FILTER_REMOVE)
2754 add_f = i40e_add_filter(vsi, f->macaddr, vid);
2756 dev_info(&vsi->back->pdev->dev,
2757 "Could not add vlan filter %d for %pM\n",
2767 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2768 * @vsi: the VSI being configured
2769 * @vid: VLAN id to be added
2771 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
2778 /* The network stack will attempt to add VID=0, with the intention to
2779 * receive priority tagged packets with a VLAN of 0. Our HW receives
2780 * these packets by default when configured to receive untagged
2781 * packets, so we don't need to add a filter for this case.
2782 * Additionally, HW interprets adding a VID=0 filter as meaning to
2783 * receive *only* tagged traffic and stops receiving untagged traffic.
2784 * Thus, we do not want to actually add a filter for VID=0
2789 /* Locked once because all functions invoked below iterates list*/
2790 spin_lock_bh(&vsi->mac_filter_hash_lock);
2791 err = i40e_add_vlan_all_mac(vsi, vid);
2792 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2796 /* schedule our worker thread which will take care of
2797 * applying the new filter changes
2799 i40e_service_event_schedule(vsi->back);
2804 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2805 * @vsi: the vsi being configured
2806 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2808 * This function should be used to remove all VLAN filters which match the
2809 * given VID. It does not schedule the service event and does not take the
2810 * mac_filter_hash_lock so it may be combined with other operations under
2811 * a single invocation of the mac_filter_hash_lock.
2813 * NOTE: this function expects to be called while under the
2814 * mac_filter_hash_lock
2816 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
2818 struct i40e_mac_filter *f;
2819 struct hlist_node *h;
2822 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
2824 __i40e_del_filter(vsi, f);
2829 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2830 * @vsi: the VSI being configured
2831 * @vid: VLAN id to be removed
2833 void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
2835 if (!vid || vsi->info.pvid)
2838 spin_lock_bh(&vsi->mac_filter_hash_lock);
2839 i40e_rm_vlan_all_mac(vsi, vid);
2840 spin_unlock_bh(&vsi->mac_filter_hash_lock);
2842 /* schedule our worker thread which will take care of
2843 * applying the new filter changes
2845 i40e_service_event_schedule(vsi->back);
2849 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2850 * @netdev: network interface to be adjusted
2851 * @proto: unused protocol value
2852 * @vid: vlan id to be added
2854 * net_device_ops implementation for adding vlan ids
2856 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2857 __always_unused __be16 proto, u16 vid)
2859 struct i40e_netdev_priv *np = netdev_priv(netdev);
2860 struct i40e_vsi *vsi = np->vsi;
2863 if (vid >= VLAN_N_VID)
2866 ret = i40e_vsi_add_vlan(vsi, vid);
2868 set_bit(vid, vsi->active_vlans);
2874 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2875 * @netdev: network interface to be adjusted
2876 * @proto: unused protocol value
2877 * @vid: vlan id to be added
2879 static void i40e_vlan_rx_add_vid_up(struct net_device *netdev,
2880 __always_unused __be16 proto, u16 vid)
2882 struct i40e_netdev_priv *np = netdev_priv(netdev);
2883 struct i40e_vsi *vsi = np->vsi;
2885 if (vid >= VLAN_N_VID)
2887 set_bit(vid, vsi->active_vlans);
2891 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2892 * @netdev: network interface to be adjusted
2893 * @proto: unused protocol value
2894 * @vid: vlan id to be removed
2896 * net_device_ops implementation for removing vlan ids
2898 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2899 __always_unused __be16 proto, u16 vid)
2901 struct i40e_netdev_priv *np = netdev_priv(netdev);
2902 struct i40e_vsi *vsi = np->vsi;
2904 /* return code is ignored as there is nothing a user
2905 * can do about failure to remove and a log message was
2906 * already printed from the other function
2908 i40e_vsi_kill_vlan(vsi, vid);
2910 clear_bit(vid, vsi->active_vlans);
2916 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2917 * @vsi: the vsi being brought back up
2919 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2926 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2927 i40e_vlan_stripping_enable(vsi);
2929 i40e_vlan_stripping_disable(vsi);
2931 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2932 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q),
2937 * i40e_vsi_add_pvid - Add pvid for the VSI
2938 * @vsi: the vsi being adjusted
2939 * @vid: the vlan id to set as a PVID
2941 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2943 struct i40e_vsi_context ctxt;
2946 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2947 vsi->info.pvid = cpu_to_le16(vid);
2948 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2949 I40E_AQ_VSI_PVLAN_INSERT_PVID |
2950 I40E_AQ_VSI_PVLAN_EMOD_STR;
2952 ctxt.seid = vsi->seid;
2953 ctxt.info = vsi->info;
2954 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2956 dev_info(&vsi->back->pdev->dev,
2957 "add pvid failed, err %s aq_err %s\n",
2958 i40e_stat_str(&vsi->back->hw, ret),
2959 i40e_aq_str(&vsi->back->hw,
2960 vsi->back->hw.aq.asq_last_status));
2968 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2969 * @vsi: the vsi being adjusted
2971 * Just use the vlan_rx_register() service to put it back to normal
2973 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2975 i40e_vlan_stripping_disable(vsi);
2981 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2982 * @vsi: ptr to the VSI
2984 * If this function returns with an error, then it's possible one or
2985 * more of the rings is populated (while the rest are not). It is the
2986 * callers duty to clean those orphaned rings.
2988 * Return 0 on success, negative on failure
2990 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2994 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2995 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2997 if (!i40e_enabled_xdp_vsi(vsi))
3000 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3001 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]);
3007 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3008 * @vsi: ptr to the VSI
3010 * Free VSI's transmit software resources
3012 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
3016 if (vsi->tx_rings) {
3017 for (i = 0; i < vsi->num_queue_pairs; i++)
3018 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
3019 i40e_free_tx_resources(vsi->tx_rings[i]);
3022 if (vsi->xdp_rings) {
3023 for (i = 0; i < vsi->num_queue_pairs; i++)
3024 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc)
3025 i40e_free_tx_resources(vsi->xdp_rings[i]);
3030 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3031 * @vsi: ptr to the VSI
3033 * If this function returns with an error, then it's possible one or
3034 * more of the rings is populated (while the rest are not). It is the
3035 * callers duty to clean those orphaned rings.
3037 * Return 0 on success, negative on failure
3039 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
3043 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3044 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
3049 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3050 * @vsi: ptr to the VSI
3052 * Free all receive software resources
3054 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
3061 for (i = 0; i < vsi->num_queue_pairs; i++)
3062 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
3063 i40e_free_rx_resources(vsi->rx_rings[i]);
3067 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3068 * @ring: The Tx ring to configure
3070 * This enables/disables XPS for a given Tx descriptor ring
3071 * based on the TCs enabled for the VSI that ring belongs to.
3073 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3077 if (!ring->q_vector || !ring->netdev || ring->ch)
3080 /* We only initialize XPS once, so as not to overwrite user settings */
3081 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
3084 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
3085 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
3090 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3091 * @ring: The Tx ring to configure
3093 * Configure the Tx descriptor ring in the HMC context.
3095 static int i40e_configure_tx_ring(struct i40e_ring *ring)
3097 struct i40e_vsi *vsi = ring->vsi;
3098 u16 pf_q = vsi->base_queue + ring->queue_index;
3099 struct i40e_hw *hw = &vsi->back->hw;
3100 struct i40e_hmc_obj_txq tx_ctx;
3101 i40e_status err = 0;
3104 /* some ATR related tx ring init */
3105 if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
3106 ring->atr_sample_rate = vsi->back->atr_sample_rate;
3107 ring->atr_count = 0;
3109 ring->atr_sample_rate = 0;
3113 i40e_config_xps_tx_ring(ring);
3115 /* clear the context structure first */
3116 memset(&tx_ctx, 0, sizeof(tx_ctx));
3118 tx_ctx.new_context = 1;
3119 tx_ctx.base = (ring->dma / 128);
3120 tx_ctx.qlen = ring->count;
3121 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
3122 I40E_FLAG_FD_ATR_ENABLED));
3123 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
3124 /* FDIR VSI tx ring can still use RS bit and writebacks */
3125 if (vsi->type != I40E_VSI_FDIR)
3126 tx_ctx.head_wb_ena = 1;
3127 tx_ctx.head_wb_addr = ring->dma +
3128 (ring->count * sizeof(struct i40e_tx_desc));
3130 /* As part of VSI creation/update, FW allocates certain
3131 * Tx arbitration queue sets for each TC enabled for
3132 * the VSI. The FW returns the handles to these queue
3133 * sets as part of the response buffer to Add VSI,
3134 * Update VSI, etc. AQ commands. It is expected that
3135 * these queue set handles be associated with the Tx
3136 * queues by the driver as part of the TX queue context
3137 * initialization. This has to be done regardless of
3138 * DCB as by default everything is mapped to TC0.
3143 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]);
3146 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
3148 tx_ctx.rdylist_act = 0;
3150 /* clear the context in the HMC */
3151 err = i40e_clear_lan_tx_queue_context(hw, pf_q);
3153 dev_info(&vsi->back->pdev->dev,
3154 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3155 ring->queue_index, pf_q, err);
3159 /* set the context in the HMC */
3160 err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
3162 dev_info(&vsi->back->pdev->dev,
3163 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3164 ring->queue_index, pf_q, err);
3168 /* Now associate this queue with this PCI function */
3170 if (ring->ch->type == I40E_VSI_VMDQ2)
3171 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3175 qtx_ctl |= (ring->ch->vsi_number <<
3176 I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3177 I40E_QTX_CTL_VFVM_INDX_MASK;
3179 if (vsi->type == I40E_VSI_VMDQ2) {
3180 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
3181 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
3182 I40E_QTX_CTL_VFVM_INDX_MASK;
3184 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
3188 qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
3189 I40E_QTX_CTL_PF_INDX_MASK);
3190 wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
3193 /* cache tail off for easier writes later */
3194 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
3200 * i40e_configure_rx_ring - Configure a receive ring context
3201 * @ring: The Rx ring to configure
3203 * Configure the Rx descriptor ring in the HMC context.
3205 static int i40e_configure_rx_ring(struct i40e_ring *ring)
3207 struct i40e_vsi *vsi = ring->vsi;
3208 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
3209 u16 pf_q = vsi->base_queue + ring->queue_index;
3210 struct i40e_hw *hw = &vsi->back->hw;
3211 struct i40e_hmc_obj_rxq rx_ctx;
3212 i40e_status err = 0;
3214 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
3216 /* clear the context structure first */
3217 memset(&rx_ctx, 0, sizeof(rx_ctx));
3219 ring->rx_buf_len = vsi->rx_buf_len;
3221 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
3222 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3224 rx_ctx.base = (ring->dma / 128);
3225 rx_ctx.qlen = ring->count;
3227 /* use 32 byte descriptors */
3230 /* descriptor type is always zero
3233 rx_ctx.hsplit_0 = 0;
3235 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
3236 if (hw->revision_id == 0)
3237 rx_ctx.lrxqthresh = 0;
3239 rx_ctx.lrxqthresh = 1;
3240 rx_ctx.crcstrip = 1;
3242 /* this controls whether VLAN is stripped from inner headers */
3244 /* set the prefena field to 1 because the manual says to */
3247 /* clear the context in the HMC */
3248 err = i40e_clear_lan_rx_queue_context(hw, pf_q);
3250 dev_info(&vsi->back->pdev->dev,
3251 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3252 ring->queue_index, pf_q, err);
3256 /* set the context in the HMC */
3257 err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
3259 dev_info(&vsi->back->pdev->dev,
3260 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3261 ring->queue_index, pf_q, err);
3265 /* configure Rx buffer alignment */
3266 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
3267 clear_ring_build_skb_enabled(ring);
3269 set_ring_build_skb_enabled(ring);
3271 /* cache tail for quicker writes, and clear the reg before use */
3272 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
3273 writel(0, ring->tail);
3275 i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
3281 * i40e_vsi_configure_tx - Configure the VSI for Tx
3282 * @vsi: VSI structure describing this set of rings and resources
3284 * Configure the Tx VSI for operation.
3286 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
3291 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3292 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
3294 if (!i40e_enabled_xdp_vsi(vsi))
3297 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
3298 err = i40e_configure_tx_ring(vsi->xdp_rings[i]);
3304 * i40e_vsi_configure_rx - Configure the VSI for Rx
3305 * @vsi: the VSI being configured
3307 * Configure the Rx VSI for operation.
3309 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
3314 if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
3315 vsi->max_frame = I40E_MAX_RXBUFFER;
3316 vsi->rx_buf_len = I40E_RXBUFFER_2048;
3317 #if (PAGE_SIZE < 8192)
3318 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
3319 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
3320 vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3321 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
3324 vsi->max_frame = I40E_MAX_RXBUFFER;
3325 vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
3329 /* set up individual rings */
3330 for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3331 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3337 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3338 * @vsi: ptr to the VSI
3340 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3342 struct i40e_ring *tx_ring, *rx_ring;
3343 u16 qoffset, qcount;
3346 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3347 /* Reset the TC information */
3348 for (i = 0; i < vsi->num_queue_pairs; i++) {
3349 rx_ring = vsi->rx_rings[i];
3350 tx_ring = vsi->tx_rings[i];
3351 rx_ring->dcb_tc = 0;
3352 tx_ring->dcb_tc = 0;
3357 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3358 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3361 qoffset = vsi->tc_config.tc_info[n].qoffset;
3362 qcount = vsi->tc_config.tc_info[n].qcount;
3363 for (i = qoffset; i < (qoffset + qcount); i++) {
3364 rx_ring = vsi->rx_rings[i];
3365 tx_ring = vsi->tx_rings[i];
3366 rx_ring->dcb_tc = n;
3367 tx_ring->dcb_tc = n;
3373 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3374 * @vsi: ptr to the VSI
3376 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3379 i40e_set_rx_mode(vsi->netdev);
3383 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3384 * @vsi: Pointer to the targeted VSI
3386 * This function replays the hlist on the hw where all the SB Flow Director
3387 * filters were saved.
3389 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3391 struct i40e_fdir_filter *filter;
3392 struct i40e_pf *pf = vsi->back;
3393 struct hlist_node *node;
3395 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3398 /* Reset FDir counters as we're replaying all existing filters */
3399 pf->fd_tcp4_filter_cnt = 0;
3400 pf->fd_udp4_filter_cnt = 0;
3401 pf->fd_sctp4_filter_cnt = 0;
3402 pf->fd_ip4_filter_cnt = 0;
3404 hlist_for_each_entry_safe(filter, node,
3405 &pf->fdir_filter_list, fdir_node) {
3406 i40e_add_del_fdir(vsi, filter, true);
3411 * i40e_vsi_configure - Set up the VSI for action
3412 * @vsi: the VSI being configured
3414 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3418 i40e_set_vsi_rx_mode(vsi);
3419 i40e_restore_vlan(vsi);
3420 i40e_vsi_config_dcb_rings(vsi);
3421 err = i40e_vsi_configure_tx(vsi);
3423 err = i40e_vsi_configure_rx(vsi);
3429 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3430 * @vsi: the VSI being configured
3432 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3434 bool has_xdp = i40e_enabled_xdp_vsi(vsi);
3435 struct i40e_pf *pf = vsi->back;
3436 struct i40e_hw *hw = &pf->hw;
3441 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3442 * and PFINT_LNKLSTn registers, e.g.:
3443 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3445 qp = vsi->base_queue;
3446 vector = vsi->base_vector;
3447 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3448 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3450 q_vector->rx.next_update = jiffies + 1;
3451 q_vector->rx.target_itr =
3452 ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
3453 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3454 q_vector->rx.target_itr >> 1);
3455 q_vector->rx.current_itr = q_vector->rx.target_itr;
3457 q_vector->tx.next_update = jiffies + 1;
3458 q_vector->tx.target_itr =
3459 ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
3460 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3461 q_vector->tx.target_itr >> 1);
3462 q_vector->tx.current_itr = q_vector->tx.target_itr;
3464 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3465 i40e_intrl_usec_to_reg(vsi->int_rate_limit));
3467 /* Linked list for the queuepairs assigned to this vector */
3468 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3469 for (q = 0; q < q_vector->num_ringpairs; q++) {
3470 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
3473 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3474 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3475 (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3476 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
3477 (I40E_QUEUE_TYPE_TX <<
3478 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3480 wr32(hw, I40E_QINT_RQCTL(qp), val);
3483 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3484 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3485 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3486 (qp << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3487 (I40E_QUEUE_TYPE_TX <<
3488 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3490 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3493 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3494 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3495 (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3496 ((qp + 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
3497 (I40E_QUEUE_TYPE_RX <<
3498 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3500 /* Terminate the linked list */
3501 if (q == (q_vector->num_ringpairs - 1))
3502 val |= (I40E_QUEUE_END_OF_LIST <<
3503 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3505 wr32(hw, I40E_QINT_TQCTL(qp), val);
3514 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3515 * @pf: pointer to private device data structure
3517 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3519 struct i40e_hw *hw = &pf->hw;
3522 /* clear things first */
3523 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
3524 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
3526 val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
3527 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
3528 I40E_PFINT_ICR0_ENA_GRST_MASK |
3529 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3530 I40E_PFINT_ICR0_ENA_GPIO_MASK |
3531 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
3532 I40E_PFINT_ICR0_ENA_VFLR_MASK |
3533 I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3535 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3536 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3538 if (pf->flags & I40E_FLAG_PTP)
3539 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3541 wr32(hw, I40E_PFINT_ICR0_ENA, val);
3543 /* SW_ITR_IDX = 0, but don't change INTENA */
3544 wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3545 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3547 /* OTHER_ITR_IDX = 0 */
3548 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3552 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3553 * @vsi: the VSI being configured
3555 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3557 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0;
3558 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3559 struct i40e_pf *pf = vsi->back;
3560 struct i40e_hw *hw = &pf->hw;
3563 /* set the ITR configuration */
3564 q_vector->rx.next_update = jiffies + 1;
3565 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
3566 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1);
3567 q_vector->rx.current_itr = q_vector->rx.target_itr;
3568 q_vector->tx.next_update = jiffies + 1;
3569 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
3570 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1);
3571 q_vector->tx.current_itr = q_vector->tx.target_itr;
3573 i40e_enable_misc_int_causes(pf);
3575 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3576 wr32(hw, I40E_PFINT_LNKLST0, 0);
3578 /* Associate the queue pair to the vector and enable the queue int */
3579 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3580 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3581 (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3582 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3584 wr32(hw, I40E_QINT_RQCTL(0), val);
3586 if (i40e_enabled_xdp_vsi(vsi)) {
3587 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3588 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
3590 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3592 wr32(hw, I40E_QINT_TQCTL(nextqp), val);
3595 val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3596 (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3597 (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3599 wr32(hw, I40E_QINT_TQCTL(0), val);
3604 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3605 * @pf: board private structure
3607 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3609 struct i40e_hw *hw = &pf->hw;
3611 wr32(hw, I40E_PFINT_DYN_CTL0,
3612 I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3617 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3618 * @pf: board private structure
3620 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3622 struct i40e_hw *hw = &pf->hw;
3625 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
3626 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3627 (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3629 wr32(hw, I40E_PFINT_DYN_CTL0, val);
3634 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3635 * @irq: interrupt number
3636 * @data: pointer to a q_vector
3638 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3640 struct i40e_q_vector *q_vector = data;
3642 if (!q_vector->tx.ring && !q_vector->rx.ring)
3645 napi_schedule_irqoff(&q_vector->napi);
3651 * i40e_irq_affinity_notify - Callback for affinity changes
3652 * @notify: context as to what irq was changed
3653 * @mask: the new affinity mask
3655 * This is a callback function used by the irq_set_affinity_notifier function
3656 * so that we may register to receive changes to the irq affinity masks.
3658 static void i40e_irq_affinity_notify(struct irq_affinity_notify *notify,
3659 const cpumask_t *mask)
3661 struct i40e_q_vector *q_vector =
3662 container_of(notify, struct i40e_q_vector, affinity_notify);
3664 cpumask_copy(&q_vector->affinity_mask, mask);
3668 * i40e_irq_affinity_release - Callback for affinity notifier release
3669 * @ref: internal core kernel usage
3671 * This is a callback function used by the irq_set_affinity_notifier function
3672 * to inform the current notification subscriber that they will no longer
3673 * receive notifications.
3675 static void i40e_irq_affinity_release(struct kref *ref) {}
3678 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3679 * @vsi: the VSI being configured
3680 * @basename: name for the vector
3682 * Allocates MSI-X vectors and requests interrupts from the kernel.
3684 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3686 int q_vectors = vsi->num_q_vectors;
3687 struct i40e_pf *pf = vsi->back;
3688 int base = vsi->base_vector;
3695 for (vector = 0; vector < q_vectors; vector++) {
3696 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3698 irq_num = pf->msix_entries[base + vector].vector;
3700 if (q_vector->tx.ring && q_vector->rx.ring) {
3701 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3702 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3704 } else if (q_vector->rx.ring) {
3705 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3706 "%s-%s-%d", basename, "rx", rx_int_idx++);
3707 } else if (q_vector->tx.ring) {
3708 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3709 "%s-%s-%d", basename, "tx", tx_int_idx++);
3711 /* skip this unused q_vector */
3714 err = request_irq(irq_num,
3720 dev_info(&pf->pdev->dev,
3721 "MSIX request_irq failed, error: %d\n", err);
3722 goto free_queue_irqs;
3725 /* register for affinity change notifications */
3726 q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
3727 q_vector->affinity_notify.release = i40e_irq_affinity_release;
3728 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
3729 /* Spread affinity hints out across online CPUs.
3731 * get_cpu_mask returns a static constant mask with
3732 * a permanent lifetime so it's ok to pass to
3733 * irq_set_affinity_hint without making a copy.
3735 cpu = cpumask_local_spread(q_vector->v_idx, -1);
3736 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
3739 vsi->irqs_ready = true;
3745 irq_num = pf->msix_entries[base + vector].vector;
3746 irq_set_affinity_notifier(irq_num, NULL);
3747 irq_set_affinity_hint(irq_num, NULL);
3748 free_irq(irq_num, &vsi->q_vectors[vector]);
3754 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3755 * @vsi: the VSI being un-configured
3757 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3759 struct i40e_pf *pf = vsi->back;
3760 struct i40e_hw *hw = &pf->hw;
3761 int base = vsi->base_vector;
3764 /* disable interrupt causation from each queue */
3765 for (i = 0; i < vsi->num_queue_pairs; i++) {
3768 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx));
3769 val &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3770 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val);
3772 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx));
3773 val &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3774 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val);
3776 if (!i40e_enabled_xdp_vsi(vsi))
3778 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0);
3781 /* disable each interrupt */
3782 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3783 for (i = vsi->base_vector;
3784 i < (vsi->num_q_vectors + vsi->base_vector); i++)
3785 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3788 for (i = 0; i < vsi->num_q_vectors; i++)
3789 synchronize_irq(pf->msix_entries[i + base].vector);
3791 /* Legacy and MSI mode - this stops all interrupt handling */
3792 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3793 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3795 synchronize_irq(pf->pdev->irq);
3800 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3801 * @vsi: the VSI being configured
3803 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3805 struct i40e_pf *pf = vsi->back;
3808 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3809 for (i = 0; i < vsi->num_q_vectors; i++)
3810 i40e_irq_dynamic_enable(vsi, i);
3812 i40e_irq_dynamic_enable_icr0(pf);
3815 i40e_flush(&pf->hw);
3820 * i40e_free_misc_vector - Free the vector that handles non-queue events
3821 * @pf: board private structure
3823 static void i40e_free_misc_vector(struct i40e_pf *pf)
3826 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3827 i40e_flush(&pf->hw);
3829 if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
3830 synchronize_irq(pf->msix_entries[0].vector);
3831 free_irq(pf->msix_entries[0].vector, pf);
3832 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
3837 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3838 * @irq: interrupt number
3839 * @data: pointer to a q_vector
3841 * This is the handler used for all MSI/Legacy interrupts, and deals
3842 * with both queue and non-queue interrupts. This is also used in
3843 * MSIX mode to handle the non-queue interrupts.
3845 static irqreturn_t i40e_intr(int irq, void *data)
3847 struct i40e_pf *pf = (struct i40e_pf *)data;
3848 struct i40e_hw *hw = &pf->hw;
3849 irqreturn_t ret = IRQ_NONE;
3850 u32 icr0, icr0_remaining;
3853 icr0 = rd32(hw, I40E_PFINT_ICR0);
3854 ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3856 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3857 if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3860 /* if interrupt but no bits showing, must be SWINT */
3861 if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3862 (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3865 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3866 (icr0 & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3867 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3868 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n");
3869 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
3872 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3873 if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3874 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3875 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3877 /* We do not have a way to disarm Queue causes while leaving
3878 * interrupt enabled for all other causes, ideally
3879 * interrupt should be disabled while we are in NAPI but
3880 * this is not a performance path and napi_schedule()
3881 * can deal with rescheduling.
3883 if (!test_bit(__I40E_DOWN, pf->state))
3884 napi_schedule_irqoff(&q_vector->napi);
3887 if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3888 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3889 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
3890 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
3893 if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3894 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3895 set_bit(__I40E_MDD_EVENT_PENDING, pf->state);
3898 if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3899 /* disable any further VFLR event notifications */
3900 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) {
3901 u32 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
3903 reg &= ~I40E_PFINT_ICR0_VFLR_MASK;
3904 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
3906 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3907 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
3911 if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3912 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
3913 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
3914 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3915 val = rd32(hw, I40E_GLGEN_RSTAT);
3916 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3917 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3918 if (val == I40E_RESET_CORER) {
3920 } else if (val == I40E_RESET_GLOBR) {
3922 } else if (val == I40E_RESET_EMPR) {
3924 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state);
3928 if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3929 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3930 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3931 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3932 rd32(hw, I40E_PFHMC_ERRORINFO),
3933 rd32(hw, I40E_PFHMC_ERRORDATA));
3936 if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3937 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3939 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3940 icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3941 i40e_ptp_tx_hwtstamp(pf);
3945 /* If a critical error is pending we have no choice but to reset the
3947 * Report and mask out any remaining unexpected interrupts.
3949 icr0_remaining = icr0 & ena_mask;
3950 if (icr0_remaining) {
3951 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3953 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3954 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3955 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3956 dev_info(&pf->pdev->dev, "device will be reset\n");
3957 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
3958 i40e_service_event_schedule(pf);
3960 ena_mask &= ~icr0_remaining;
3965 /* re-enable interrupt causes */
3966 wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3967 if (!test_bit(__I40E_DOWN, pf->state)) {
3968 i40e_service_event_schedule(pf);
3969 i40e_irq_dynamic_enable_icr0(pf);
3976 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3977 * @tx_ring: tx ring to clean
3978 * @budget: how many cleans we're allowed
3980 * Returns true if there's any budget left (e.g. the clean is finished)
3982 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3984 struct i40e_vsi *vsi = tx_ring->vsi;
3985 u16 i = tx_ring->next_to_clean;
3986 struct i40e_tx_buffer *tx_buf;
3987 struct i40e_tx_desc *tx_desc;
3989 tx_buf = &tx_ring->tx_bi[i];
3990 tx_desc = I40E_TX_DESC(tx_ring, i);
3991 i -= tx_ring->count;
3994 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3996 /* if next_to_watch is not set then there is no work pending */
4000 /* prevent any other reads prior to eop_desc */
4003 /* if the descriptor isn't done, no work yet to do */
4004 if (!(eop_desc->cmd_type_offset_bsz &
4005 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
4008 /* clear next_to_watch to prevent false hangs */
4009 tx_buf->next_to_watch = NULL;
4011 tx_desc->buffer_addr = 0;
4012 tx_desc->cmd_type_offset_bsz = 0;
4013 /* move past filter desc */
4018 i -= tx_ring->count;
4019 tx_buf = tx_ring->tx_bi;
4020 tx_desc = I40E_TX_DESC(tx_ring, 0);
4022 /* unmap skb header data */
4023 dma_unmap_single(tx_ring->dev,
4024 dma_unmap_addr(tx_buf, dma),
4025 dma_unmap_len(tx_buf, len),
4027 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
4028 kfree(tx_buf->raw_buf);
4030 tx_buf->raw_buf = NULL;
4031 tx_buf->tx_flags = 0;
4032 tx_buf->next_to_watch = NULL;
4033 dma_unmap_len_set(tx_buf, len, 0);
4034 tx_desc->buffer_addr = 0;
4035 tx_desc->cmd_type_offset_bsz = 0;
4037 /* move us past the eop_desc for start of next FD desc */
4042 i -= tx_ring->count;
4043 tx_buf = tx_ring->tx_bi;
4044 tx_desc = I40E_TX_DESC(tx_ring, 0);
4047 /* update budget accounting */
4049 } while (likely(budget));
4051 i += tx_ring->count;
4052 tx_ring->next_to_clean = i;
4054 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
4055 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
4061 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4062 * @irq: interrupt number
4063 * @data: pointer to a q_vector
4065 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
4067 struct i40e_q_vector *q_vector = data;
4068 struct i40e_vsi *vsi;
4070 if (!q_vector->tx.ring)
4073 vsi = q_vector->tx.ring->vsi;
4074 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
4080 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4081 * @vsi: the VSI being configured
4082 * @v_idx: vector index
4083 * @qp_idx: queue pair index
4085 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
4087 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4088 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
4089 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
4091 tx_ring->q_vector = q_vector;
4092 tx_ring->next = q_vector->tx.ring;
4093 q_vector->tx.ring = tx_ring;
4094 q_vector->tx.count++;
4096 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4097 if (i40e_enabled_xdp_vsi(vsi)) {
4098 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx];
4100 xdp_ring->q_vector = q_vector;
4101 xdp_ring->next = q_vector->tx.ring;
4102 q_vector->tx.ring = xdp_ring;
4103 q_vector->tx.count++;
4106 rx_ring->q_vector = q_vector;
4107 rx_ring->next = q_vector->rx.ring;
4108 q_vector->rx.ring = rx_ring;
4109 q_vector->rx.count++;
4113 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4114 * @vsi: the VSI being configured
4116 * This function maps descriptor rings to the queue-specific vectors
4117 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4118 * one vector per queue pair, but on a constrained vector budget, we
4119 * group the queue pairs as "efficiently" as possible.
4121 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
4123 int qp_remaining = vsi->num_queue_pairs;
4124 int q_vectors = vsi->num_q_vectors;
4129 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4130 * group them so there are multiple queues per vector.
4131 * It is also important to go through all the vectors available to be
4132 * sure that if we don't use all the vectors, that the remaining vectors
4133 * are cleared. This is especially important when decreasing the
4134 * number of queues in use.
4136 for (; v_start < q_vectors; v_start++) {
4137 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
4139 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
4141 q_vector->num_ringpairs = num_ringpairs;
4142 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1;
4144 q_vector->rx.count = 0;
4145 q_vector->tx.count = 0;
4146 q_vector->rx.ring = NULL;
4147 q_vector->tx.ring = NULL;
4149 while (num_ringpairs--) {
4150 i40e_map_vector_to_qp(vsi, v_start, qp_idx);
4158 * i40e_vsi_request_irq - Request IRQ from the OS
4159 * @vsi: the VSI being configured
4160 * @basename: name for the vector
4162 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
4164 struct i40e_pf *pf = vsi->back;
4167 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4168 err = i40e_vsi_request_irq_msix(vsi, basename);
4169 else if (pf->flags & I40E_FLAG_MSI_ENABLED)
4170 err = request_irq(pf->pdev->irq, i40e_intr, 0,
4173 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
4177 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
4182 #ifdef CONFIG_NET_POLL_CONTROLLER
4184 * i40e_netpoll - A Polling 'interrupt' handler
4185 * @netdev: network interface device structure
4187 * This is used by netconsole to send skbs without having to re-enable
4188 * interrupts. It's not called while the normal interrupt routine is executing.
4190 static void i40e_netpoll(struct net_device *netdev)
4192 struct i40e_netdev_priv *np = netdev_priv(netdev);
4193 struct i40e_vsi *vsi = np->vsi;
4194 struct i40e_pf *pf = vsi->back;
4197 /* if interface is down do nothing */
4198 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4201 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4202 for (i = 0; i < vsi->num_q_vectors; i++)
4203 i40e_msix_clean_rings(0, vsi->q_vectors[i]);
4205 i40e_intr(pf->pdev->irq, netdev);
4210 #define I40E_QTX_ENA_WAIT_COUNT 50
4213 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4214 * @pf: the PF being configured
4215 * @pf_q: the PF queue
4216 * @enable: enable or disable state of the queue
4218 * This routine will wait for the given Tx queue of the PF to reach the
4219 * enabled or disabled state.
4220 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4221 * multiple retries; else will return 0 in case of success.
4223 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4228 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4229 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
4230 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4233 usleep_range(10, 20);
4235 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4242 * i40e_control_tx_q - Start or stop a particular Tx queue
4243 * @pf: the PF structure
4244 * @pf_q: the PF queue to configure
4245 * @enable: start or stop the queue
4247 * This function enables or disables a single queue. Note that any delay
4248 * required after the operation is expected to be handled by the caller of
4251 static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
4253 struct i40e_hw *hw = &pf->hw;
4257 /* warn the TX unit of coming changes */
4258 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
4260 usleep_range(10, 20);
4262 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4263 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
4264 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
4265 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
4267 usleep_range(1000, 2000);
4270 /* Skip if the queue is already in the requested state */
4271 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
4274 /* turn on/off the queue */
4276 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
4277 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
4279 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
4282 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
4286 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4288 * @pf: the PF structure
4289 * @pf_q: the PF queue to configure
4290 * @is_xdp: true if the queue is used for XDP
4291 * @enable: start or stop the queue
4293 int i40e_control_wait_tx_q(int seid, struct i40e_pf *pf, int pf_q,
4294 bool is_xdp, bool enable)
4298 i40e_control_tx_q(pf, pf_q, enable);
4300 /* wait for the change to finish */
4301 ret = i40e_pf_txq_wait(pf, pf_q, enable);
4303 dev_info(&pf->pdev->dev,
4304 "VSI seid %d %sTx ring %d %sable timeout\n",
4305 seid, (is_xdp ? "XDP " : ""), pf_q,
4306 (enable ? "en" : "dis"));
4313 * i40e_vsi_enable_tx - Start a VSI's rings
4314 * @vsi: the VSI being configured
4316 static int i40e_vsi_enable_tx(struct i40e_vsi *vsi)
4318 struct i40e_pf *pf = vsi->back;
4319 int i, pf_q, ret = 0;
4321 pf_q = vsi->base_queue;
4322 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4323 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4325 false /*is xdp*/, true);
4329 if (!i40e_enabled_xdp_vsi(vsi))
4332 ret = i40e_control_wait_tx_q(vsi->seid, pf,
4333 pf_q + vsi->alloc_queue_pairs,
4334 true /*is xdp*/, true);
4342 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4343 * @pf: the PF being configured
4344 * @pf_q: the PF queue
4345 * @enable: enable or disable state of the queue
4347 * This routine will wait for the given Rx queue of the PF to reach the
4348 * enabled or disabled state.
4349 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4350 * multiple retries; else will return 0 in case of success.
4352 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
4357 for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
4358 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
4359 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4362 usleep_range(10, 20);
4364 if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
4371 * i40e_control_rx_q - Start or stop a particular Rx queue
4372 * @pf: the PF structure
4373 * @pf_q: the PF queue to configure
4374 * @enable: start or stop the queue
4376 * This function enables or disables a single queue. Note that
4377 * any delay required after the operation is expected to be
4378 * handled by the caller of this function.
4380 static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4382 struct i40e_hw *hw = &pf->hw;
4386 for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
4387 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
4388 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
4389 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
4391 usleep_range(1000, 2000);
4394 /* Skip if the queue is already in the requested state */
4395 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
4398 /* turn on/off the queue */
4400 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
4402 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
4404 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
4408 * i40e_control_wait_rx_q
4409 * @pf: the PF structure
4410 * @pf_q: queue being configured
4411 * @enable: start or stop the rings
4413 * This function enables or disables a single queue along with waiting
4414 * for the change to finish. The caller of this function should handle
4415 * the delays needed in the case of disabling queues.
4417 int i40e_control_wait_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
4421 i40e_control_rx_q(pf, pf_q, enable);
4423 /* wait for the change to finish */
4424 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
4432 * i40e_vsi_enable_rx - Start a VSI's rings
4433 * @vsi: the VSI being configured
4435 static int i40e_vsi_enable_rx(struct i40e_vsi *vsi)
4437 struct i40e_pf *pf = vsi->back;
4438 int i, pf_q, ret = 0;
4440 pf_q = vsi->base_queue;
4441 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4442 ret = i40e_control_wait_rx_q(pf, pf_q, true);
4444 dev_info(&pf->pdev->dev,
4445 "VSI seid %d Rx ring %d enable timeout\n",
4455 * i40e_vsi_start_rings - Start a VSI's rings
4456 * @vsi: the VSI being configured
4458 int i40e_vsi_start_rings(struct i40e_vsi *vsi)
4462 /* do rx first for enable and last for disable */
4463 ret = i40e_vsi_enable_rx(vsi);
4466 ret = i40e_vsi_enable_tx(vsi);
4471 #define I40E_DISABLE_TX_GAP_MSEC 50
4474 * i40e_vsi_stop_rings - Stop a VSI's rings
4475 * @vsi: the VSI being configured
4477 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
4479 struct i40e_pf *pf = vsi->back;
4480 int pf_q, err, q_end;
4482 /* When port TX is suspended, don't wait */
4483 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state))
4484 return i40e_vsi_stop_rings_no_wait(vsi);
4486 q_end = vsi->base_queue + vsi->num_queue_pairs;
4487 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4488 i40e_pre_tx_queue_cfg(&pf->hw, (u32)pf_q, false);
4490 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++) {
4491 err = i40e_control_wait_rx_q(pf, pf_q, false);
4493 dev_info(&pf->pdev->dev,
4494 "VSI seid %d Rx ring %d dissable timeout\n",
4498 msleep(I40E_DISABLE_TX_GAP_MSEC);
4499 pf_q = vsi->base_queue;
4500 for (pf_q = vsi->base_queue; pf_q < q_end; pf_q++)
4501 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0);
4503 i40e_vsi_wait_queues_disabled(vsi);
4507 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4508 * @vsi: the VSI being shutdown
4510 * This function stops all the rings for a VSI but does not delay to verify
4511 * that rings have been disabled. It is expected that the caller is shutting
4512 * down multiple VSIs at once and will delay together for all the VSIs after
4513 * initiating the shutdown. This is particularly useful for shutting down lots
4514 * of VFs together. Otherwise, a large delay can be incurred while configuring
4515 * each VSI in serial.
4517 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
4519 struct i40e_pf *pf = vsi->back;
4522 pf_q = vsi->base_queue;
4523 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4524 i40e_control_tx_q(pf, pf_q, false);
4525 i40e_control_rx_q(pf, pf_q, false);
4530 * i40e_vsi_free_irq - Free the irq association with the OS
4531 * @vsi: the VSI being configured
4533 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4535 struct i40e_pf *pf = vsi->back;
4536 struct i40e_hw *hw = &pf->hw;
4537 int base = vsi->base_vector;
4541 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4542 if (!vsi->q_vectors)
4545 if (!vsi->irqs_ready)
4548 vsi->irqs_ready = false;
4549 for (i = 0; i < vsi->num_q_vectors; i++) {
4554 irq_num = pf->msix_entries[vector].vector;
4556 /* free only the irqs that were actually requested */
4557 if (!vsi->q_vectors[i] ||
4558 !vsi->q_vectors[i]->num_ringpairs)
4561 /* clear the affinity notifier in the IRQ descriptor */
4562 irq_set_affinity_notifier(irq_num, NULL);
4563 /* remove our suggested affinity mask for this IRQ */
4564 irq_set_affinity_hint(irq_num, NULL);
4565 synchronize_irq(irq_num);
4566 free_irq(irq_num, vsi->q_vectors[i]);
4568 /* Tear down the interrupt queue link list
4570 * We know that they come in pairs and always
4571 * the Rx first, then the Tx. To clear the
4572 * link list, stick the EOL value into the
4573 * next_q field of the registers.
4575 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4576 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4577 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4578 val |= I40E_QUEUE_END_OF_LIST
4579 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4580 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4582 while (qp != I40E_QUEUE_END_OF_LIST) {
4585 val = rd32(hw, I40E_QINT_RQCTL(qp));
4587 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4588 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4589 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4590 I40E_QINT_RQCTL_INTEVENT_MASK);
4592 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4593 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4595 wr32(hw, I40E_QINT_RQCTL(qp), val);
4597 val = rd32(hw, I40E_QINT_TQCTL(qp));
4599 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4600 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4602 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4603 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4604 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4605 I40E_QINT_TQCTL_INTEVENT_MASK);
4607 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4608 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4610 wr32(hw, I40E_QINT_TQCTL(qp), val);
4615 free_irq(pf->pdev->irq, pf);
4617 val = rd32(hw, I40E_PFINT_LNKLST0);
4618 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4619 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4620 val |= I40E_QUEUE_END_OF_LIST
4621 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4622 wr32(hw, I40E_PFINT_LNKLST0, val);
4624 val = rd32(hw, I40E_QINT_RQCTL(qp));
4625 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK |
4626 I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4627 I40E_QINT_RQCTL_CAUSE_ENA_MASK |
4628 I40E_QINT_RQCTL_INTEVENT_MASK);
4630 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4631 I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4633 wr32(hw, I40E_QINT_RQCTL(qp), val);
4635 val = rd32(hw, I40E_QINT_TQCTL(qp));
4637 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK |
4638 I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4639 I40E_QINT_TQCTL_CAUSE_ENA_MASK |
4640 I40E_QINT_TQCTL_INTEVENT_MASK);
4642 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4643 I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4645 wr32(hw, I40E_QINT_TQCTL(qp), val);
4650 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4651 * @vsi: the VSI being configured
4652 * @v_idx: Index of vector to be freed
4654 * This function frees the memory allocated to the q_vector. In addition if
4655 * NAPI is enabled it will delete any references to the NAPI struct prior
4656 * to freeing the q_vector.
4658 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4660 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4661 struct i40e_ring *ring;
4666 /* disassociate q_vector from rings */
4667 i40e_for_each_ring(ring, q_vector->tx)
4668 ring->q_vector = NULL;
4670 i40e_for_each_ring(ring, q_vector->rx)
4671 ring->q_vector = NULL;
4673 /* only VSI w/ an associated netdev is set up w/ NAPI */
4675 netif_napi_del(&q_vector->napi);
4677 vsi->q_vectors[v_idx] = NULL;
4679 kfree_rcu(q_vector, rcu);
4683 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4684 * @vsi: the VSI being un-configured
4686 * This frees the memory allocated to the q_vectors and
4687 * deletes references to the NAPI struct.
4689 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4693 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4694 i40e_free_q_vector(vsi, v_idx);
4698 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4699 * @pf: board private structure
4701 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4703 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4704 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4705 pci_disable_msix(pf->pdev);
4706 kfree(pf->msix_entries);
4707 pf->msix_entries = NULL;
4708 kfree(pf->irq_pile);
4709 pf->irq_pile = NULL;
4710 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4711 pci_disable_msi(pf->pdev);
4713 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4717 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4718 * @pf: board private structure
4720 * We go through and clear interrupt specific resources and reset the structure
4721 * to pre-load conditions
4723 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4727 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state))
4728 i40e_free_misc_vector(pf);
4730 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
4731 I40E_IWARP_IRQ_PILE_ID);
4733 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4734 for (i = 0; i < pf->num_alloc_vsi; i++)
4736 i40e_vsi_free_q_vectors(pf->vsi[i]);
4737 i40e_reset_interrupt_capability(pf);
4741 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4742 * @vsi: the VSI being configured
4744 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4751 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4752 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4754 if (q_vector->rx.ring || q_vector->tx.ring)
4755 napi_enable(&q_vector->napi);
4760 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4761 * @vsi: the VSI being configured
4763 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4770 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4771 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4773 if (q_vector->rx.ring || q_vector->tx.ring)
4774 napi_disable(&q_vector->napi);
4779 * i40e_vsi_close - Shut down a VSI
4780 * @vsi: the vsi to be quelled
4782 static void i40e_vsi_close(struct i40e_vsi *vsi)
4784 struct i40e_pf *pf = vsi->back;
4785 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state))
4787 i40e_vsi_free_irq(vsi);
4788 i40e_vsi_free_tx_resources(vsi);
4789 i40e_vsi_free_rx_resources(vsi);
4790 vsi->current_netdev_flags = 0;
4791 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
4792 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
4793 set_bit(__I40E_CLIENT_RESET, pf->state);
4797 * i40e_quiesce_vsi - Pause a given VSI
4798 * @vsi: the VSI being paused
4800 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4802 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4805 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state);
4806 if (vsi->netdev && netif_running(vsi->netdev))
4807 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4809 i40e_vsi_close(vsi);
4813 * i40e_unquiesce_vsi - Resume a given VSI
4814 * @vsi: the VSI being resumed
4816 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4818 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state))
4821 if (vsi->netdev && netif_running(vsi->netdev))
4822 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4824 i40e_vsi_open(vsi); /* this clears the DOWN bit */
4828 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4831 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4835 for (v = 0; v < pf->num_alloc_vsi; v++) {
4837 i40e_quiesce_vsi(pf->vsi[v]);
4842 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4845 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4849 for (v = 0; v < pf->num_alloc_vsi; v++) {
4851 i40e_unquiesce_vsi(pf->vsi[v]);
4856 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4857 * @vsi: the VSI being configured
4859 * Wait until all queues on a given VSI have been disabled.
4861 int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
4863 struct i40e_pf *pf = vsi->back;
4866 pf_q = vsi->base_queue;
4867 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4868 /* Check and wait for the Tx queue */
4869 ret = i40e_pf_txq_wait(pf, pf_q, false);
4871 dev_info(&pf->pdev->dev,
4872 "VSI seid %d Tx ring %d disable timeout\n",
4877 if (!i40e_enabled_xdp_vsi(vsi))
4880 /* Check and wait for the XDP Tx queue */
4881 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs,
4884 dev_info(&pf->pdev->dev,
4885 "VSI seid %d XDP Tx ring %d disable timeout\n",
4890 /* Check and wait for the Rx queue */
4891 ret = i40e_pf_rxq_wait(pf, pf_q, false);
4893 dev_info(&pf->pdev->dev,
4894 "VSI seid %d Rx ring %d disable timeout\n",
4903 #ifdef CONFIG_I40E_DCB
4905 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4908 * This function waits for the queues to be in disabled state for all the
4909 * VSIs that are managed by this PF.
4911 static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
4915 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4917 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
4929 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4930 * @pf: pointer to PF
4932 * Get TC map for ISCSI PF type that will include iSCSI TC
4935 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4937 struct i40e_dcb_app_priority_table app;
4938 struct i40e_hw *hw = &pf->hw;
4939 u8 enabled_tc = 1; /* TC0 is always enabled */
4941 /* Get the iSCSI APP TLV */
4942 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4944 for (i = 0; i < dcbcfg->numapps; i++) {
4945 app = dcbcfg->app[i];
4946 if (app.selector == I40E_APP_SEL_TCPIP &&
4947 app.protocolid == I40E_APP_PROTOID_ISCSI) {
4948 tc = dcbcfg->etscfg.prioritytable[app.priority];
4949 enabled_tc |= BIT(tc);
4958 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4959 * @dcbcfg: the corresponding DCBx configuration structure
4961 * Return the number of TCs from given DCBx configuration
4963 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4965 int i, tc_unused = 0;
4969 /* Scan the ETS Config Priority Table to find
4970 * traffic class enabled for a given priority
4971 * and create a bitmask of enabled TCs
4973 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
4974 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
4976 /* Now scan the bitmask to check for
4977 * contiguous TCs starting with TC0
4979 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4980 if (num_tc & BIT(i)) {
4984 pr_err("Non-contiguous TC - Disabling DCB\n");
4992 /* There is always at least TC0 */
5000 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5001 * @dcbcfg: the corresponding DCBx configuration structure
5003 * Query the current DCB configuration and return the number of
5004 * traffic classes enabled from the given DCBX config
5006 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
5008 u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
5012 for (i = 0; i < num_tc; i++)
5013 enabled_tc |= BIT(i);
5019 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5020 * @pf: PF being queried
5022 * Query the current MQPRIO configuration and return the number of
5023 * traffic classes enabled.
5025 static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
5027 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5028 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
5029 u8 enabled_tc = 1, i;
5031 for (i = 1; i < num_tc; i++)
5032 enabled_tc |= BIT(i);
5037 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5038 * @pf: PF being queried
5040 * Return number of traffic classes enabled for the given PF
5042 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
5044 struct i40e_hw *hw = &pf->hw;
5045 u8 i, enabled_tc = 1;
5047 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5049 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5050 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
5052 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5053 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5056 /* SFP mode will be enabled for all TCs on port */
5057 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5058 return i40e_dcb_get_num_tc(dcbcfg);
5060 /* MFP mode return count of enabled TCs for this PF */
5061 if (pf->hw.func_caps.iscsi)
5062 enabled_tc = i40e_get_iscsi_tc_map(pf);
5064 return 1; /* Only TC0 */
5066 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5067 if (enabled_tc & BIT(i))
5074 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5075 * @pf: PF being queried
5077 * Return a bitmap for enabled traffic classes for this PF.
5079 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
5081 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5082 return i40e_mqprio_get_enabled_tc(pf);
5084 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5087 if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
5088 return I40E_DEFAULT_TRAFFIC_CLASS;
5090 /* SFP mode we want PF to be enabled for all TCs */
5091 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
5092 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
5094 /* MFP enabled and iSCSI PF type */
5095 if (pf->hw.func_caps.iscsi)
5096 return i40e_get_iscsi_tc_map(pf);
5098 return I40E_DEFAULT_TRAFFIC_CLASS;
5102 * i40e_vsi_get_bw_info - Query VSI BW Information
5103 * @vsi: the VSI being queried
5105 * Returns 0 on success, negative value on failure
5107 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
5109 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
5110 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5111 struct i40e_pf *pf = vsi->back;
5112 struct i40e_hw *hw = &pf->hw;
5117 /* Get the VSI level BW configuration */
5118 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
5120 dev_info(&pf->pdev->dev,
5121 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5122 i40e_stat_str(&pf->hw, ret),
5123 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5127 /* Get the VSI level BW configuration per TC */
5128 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
5131 dev_info(&pf->pdev->dev,
5132 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5133 i40e_stat_str(&pf->hw, ret),
5134 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5138 if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
5139 dev_info(&pf->pdev->dev,
5140 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5141 bw_config.tc_valid_bits,
5142 bw_ets_config.tc_valid_bits);
5143 /* Still continuing */
5146 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
5147 vsi->bw_max_quanta = bw_config.max_bw;
5148 tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
5149 (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
5150 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5151 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
5152 vsi->bw_ets_limit_credits[i] =
5153 le16_to_cpu(bw_ets_config.credits[i]);
5154 /* 3 bits out of 4 for each TC */
5155 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
5162 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5163 * @vsi: the VSI being configured
5164 * @enabled_tc: TC bitmap
5165 * @bw_share: BW shared credits per TC
5167 * Returns 0 on success, negative value on failure
5169 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
5172 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5173 struct i40e_pf *pf = vsi->back;
5177 /* There is no need to reset BW when mqprio mode is on. */
5178 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5180 if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5181 ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
5183 dev_info(&pf->pdev->dev,
5184 "Failed to reset tx rate for vsi->seid %u\n",
5188 bw_data.tc_valid_bits = enabled_tc;
5189 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5190 bw_data.tc_bw_credits[i] = bw_share[i];
5192 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
5194 dev_info(&pf->pdev->dev,
5195 "AQ command Config VSI BW allocation per TC failed = %d\n",
5196 pf->hw.aq.asq_last_status);
5200 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5201 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
5207 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5208 * @vsi: the VSI being configured
5209 * @enabled_tc: TC map to be enabled
5212 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5214 struct net_device *netdev = vsi->netdev;
5215 struct i40e_pf *pf = vsi->back;
5216 struct i40e_hw *hw = &pf->hw;
5219 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
5225 netdev_reset_tc(netdev);
5229 /* Set up actual enabled TCs on the VSI */
5230 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
5233 /* set per TC queues for the VSI */
5234 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5235 /* Only set TC queues for enabled tcs
5237 * e.g. For a VSI that has TC0 and TC3 enabled the
5238 * enabled_tc bitmap would be 0x00001001; the driver
5239 * will set the numtc for netdev as 2 that will be
5240 * referenced by the netdev layer as TC 0 and 1.
5242 if (vsi->tc_config.enabled_tc & BIT(i))
5243 netdev_set_tc_queue(netdev,
5244 vsi->tc_config.tc_info[i].netdev_tc,
5245 vsi->tc_config.tc_info[i].qcount,
5246 vsi->tc_config.tc_info[i].qoffset);
5249 if (pf->flags & I40E_FLAG_TC_MQPRIO)
5252 /* Assign UP2TC map for the VSI */
5253 for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
5254 /* Get the actual TC# for the UP */
5255 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
5256 /* Get the mapped netdev TC# for the UP */
5257 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc;
5258 netdev_set_prio_tc_map(netdev, i, netdev_tc);
5263 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5264 * @vsi: the VSI being configured
5265 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5267 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
5268 struct i40e_vsi_context *ctxt)
5270 /* copy just the sections touched not the entire info
5271 * since not all sections are valid as returned by
5274 vsi->info.mapping_flags = ctxt->info.mapping_flags;
5275 memcpy(&vsi->info.queue_mapping,
5276 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
5277 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
5278 sizeof(vsi->info.tc_mapping));
5282 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5283 * @vsi: VSI to be configured
5284 * @enabled_tc: TC bitmap
5286 * This configures a particular VSI for TCs that are mapped to the
5287 * given TC bitmap. It uses default bandwidth share for TCs across
5288 * VSIs to configure TC for a particular VSI.
5291 * It is expected that the VSI queues have been quisced before calling
5294 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
5296 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5297 struct i40e_pf *pf = vsi->back;
5298 struct i40e_hw *hw = &pf->hw;
5299 struct i40e_vsi_context ctxt;
5303 /* Check if enabled_tc is same as existing or new TCs */
5304 if (vsi->tc_config.enabled_tc == enabled_tc &&
5305 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
5308 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5309 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5310 if (enabled_tc & BIT(i))
5314 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5316 struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
5318 dev_info(&pf->pdev->dev,
5319 "Failed configuring TC map %d for VSI %d\n",
5320 enabled_tc, vsi->seid);
5321 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid,
5324 dev_info(&pf->pdev->dev,
5325 "Failed querying vsi bw info, err %s aq_err %s\n",
5326 i40e_stat_str(hw, ret),
5327 i40e_aq_str(hw, hw->aq.asq_last_status));
5330 if ((bw_config.tc_valid_bits & enabled_tc) != enabled_tc) {
5331 u8 valid_tc = bw_config.tc_valid_bits & enabled_tc;
5334 valid_tc = bw_config.tc_valid_bits;
5335 /* Always enable TC0, no matter what */
5337 dev_info(&pf->pdev->dev,
5338 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5339 enabled_tc, bw_config.tc_valid_bits, valid_tc);
5340 enabled_tc = valid_tc;
5343 ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
5345 dev_err(&pf->pdev->dev,
5346 "Unable to configure TC map %d for VSI %d\n",
5347 enabled_tc, vsi->seid);
5352 /* Update Queue Pairs Mapping for currently enabled UPs */
5353 ctxt.seid = vsi->seid;
5354 ctxt.pf_num = vsi->back->hw.pf_id;
5356 ctxt.uplink_seid = vsi->uplink_seid;
5357 ctxt.info = vsi->info;
5358 if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) {
5359 ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc);
5363 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
5366 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5369 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) {
5370 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size,
5371 vsi->num_queue_pairs);
5372 ret = i40e_vsi_config_rss(vsi);
5374 dev_info(&vsi->back->pdev->dev,
5375 "Failed to reconfig rss for num_queues\n");
5378 vsi->reconfig_rss = false;
5380 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
5381 ctxt.info.valid_sections |=
5382 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
5383 ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
5386 /* Update the VSI after updating the VSI queue-mapping
5389 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
5391 dev_info(&pf->pdev->dev,
5392 "Update vsi tc config failed, err %s aq_err %s\n",
5393 i40e_stat_str(hw, ret),
5394 i40e_aq_str(hw, hw->aq.asq_last_status));
5397 /* update the local VSI info with updated queue map */
5398 i40e_vsi_update_queue_map(vsi, &ctxt);
5399 vsi->info.valid_sections = 0;
5401 /* Update current VSI BW information */
5402 ret = i40e_vsi_get_bw_info(vsi);
5404 dev_info(&pf->pdev->dev,
5405 "Failed updating vsi bw info, err %s aq_err %s\n",
5406 i40e_stat_str(hw, ret),
5407 i40e_aq_str(hw, hw->aq.asq_last_status));
5411 /* Update the netdev TC setup */
5412 i40e_vsi_config_netdev_tc(vsi, enabled_tc);
5418 * i40e_get_link_speed - Returns link speed for the interface
5419 * @vsi: VSI to be configured
5422 static int i40e_get_link_speed(struct i40e_vsi *vsi)
5424 struct i40e_pf *pf = vsi->back;
5426 switch (pf->hw.phy.link_info.link_speed) {
5427 case I40E_LINK_SPEED_40GB:
5429 case I40E_LINK_SPEED_25GB:
5431 case I40E_LINK_SPEED_20GB:
5433 case I40E_LINK_SPEED_10GB:
5435 case I40E_LINK_SPEED_1GB:
5443 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5444 * @vsi: VSI to be configured
5445 * @seid: seid of the channel/VSI
5446 * @max_tx_rate: max TX rate to be configured as BW limit
5448 * Helper function to set BW limit for a given VSI
5450 int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
5452 struct i40e_pf *pf = vsi->back;
5457 speed = i40e_get_link_speed(vsi);
5458 if (max_tx_rate > speed) {
5459 dev_err(&pf->pdev->dev,
5460 "Invalid max tx rate %llu specified for VSI seid %d.",
5464 if (max_tx_rate && max_tx_rate < 50) {
5465 dev_warn(&pf->pdev->dev,
5466 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5470 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5471 credits = max_tx_rate;
5472 do_div(credits, I40E_BW_CREDIT_DIVISOR);
5473 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits,
5474 I40E_MAX_BW_INACTIVE_ACCUM, NULL);
5476 dev_err(&pf->pdev->dev,
5477 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5478 max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
5479 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5484 * i40e_remove_queue_channels - Remove queue channels for the TCs
5485 * @vsi: VSI to be configured
5487 * Remove queue channels for the TCs
5489 static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
5491 enum i40e_admin_queue_err last_aq_status;
5492 struct i40e_cloud_filter *cfilter;
5493 struct i40e_channel *ch, *ch_tmp;
5494 struct i40e_pf *pf = vsi->back;
5495 struct hlist_node *node;
5498 /* Reset rss size that was stored when reconfiguring rss for
5499 * channel VSIs with non-power-of-2 queue count.
5501 vsi->current_rss_size = 0;
5503 /* perform cleanup for channels if they exist */
5504 if (list_empty(&vsi->ch_list))
5507 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5508 struct i40e_vsi *p_vsi;
5510 list_del(&ch->list);
5511 p_vsi = ch->parent_vsi;
5512 if (!p_vsi || !ch->initialized) {
5516 /* Reset queue contexts */
5517 for (i = 0; i < ch->num_queue_pairs; i++) {
5518 struct i40e_ring *tx_ring, *rx_ring;
5521 pf_q = ch->base_queue + i;
5522 tx_ring = vsi->tx_rings[pf_q];
5525 rx_ring = vsi->rx_rings[pf_q];
5529 /* Reset BW configured for this VSI via mqprio */
5530 ret = i40e_set_bw_limit(vsi, ch->seid, 0);
5532 dev_info(&vsi->back->pdev->dev,
5533 "Failed to reset tx rate for ch->seid %u\n",
5536 /* delete cloud filters associated with this channel */
5537 hlist_for_each_entry_safe(cfilter, node,
5538 &pf->cloud_filter_list, cloud_node) {
5539 if (cfilter->seid != ch->seid)
5542 hash_del(&cfilter->cloud_node);
5543 if (cfilter->dst_port)
5544 ret = i40e_add_del_cloud_filter_big_buf(vsi,
5548 ret = i40e_add_del_cloud_filter(vsi, cfilter,
5550 last_aq_status = pf->hw.aq.asq_last_status;
5552 dev_info(&pf->pdev->dev,
5553 "Failed to delete cloud filter, err %s aq_err %s\n",
5554 i40e_stat_str(&pf->hw, ret),
5555 i40e_aq_str(&pf->hw, last_aq_status));
5559 /* delete VSI from FW */
5560 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid,
5563 dev_err(&vsi->back->pdev->dev,
5564 "unable to remove channel (%d) for parent VSI(%d)\n",
5565 ch->seid, p_vsi->seid);
5568 INIT_LIST_HEAD(&vsi->ch_list);
5572 * i40e_is_any_channel - channel exist or not
5573 * @vsi: ptr to VSI to which channels are associated with
5575 * Returns true or false if channel(s) exist for associated VSI or not
5577 static bool i40e_is_any_channel(struct i40e_vsi *vsi)
5579 struct i40e_channel *ch, *ch_tmp;
5581 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5582 if (ch->initialized)
5590 * i40e_get_max_queues_for_channel
5591 * @vsi: ptr to VSI to which channels are associated with
5593 * Helper function which returns max value among the queue counts set on the
5594 * channels/TCs created.
5596 static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi)
5598 struct i40e_channel *ch, *ch_tmp;
5601 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
5602 if (!ch->initialized)
5604 if (ch->num_queue_pairs > max)
5605 max = ch->num_queue_pairs;
5612 * i40e_validate_num_queues - validate num_queues w.r.t channel
5613 * @pf: ptr to PF device
5614 * @num_queues: number of queues
5615 * @vsi: the parent VSI
5616 * @reconfig_rss: indicates should the RSS be reconfigured or not
5618 * This function validates number of queues in the context of new channel
5619 * which is being established and determines if RSS should be reconfigured
5620 * or not for parent VSI.
5622 static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues,
5623 struct i40e_vsi *vsi, bool *reconfig_rss)
5630 *reconfig_rss = false;
5631 if (vsi->current_rss_size) {
5632 if (num_queues > vsi->current_rss_size) {
5633 dev_dbg(&pf->pdev->dev,
5634 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5635 num_queues, vsi->current_rss_size);
5637 } else if ((num_queues < vsi->current_rss_size) &&
5638 (!is_power_of_2(num_queues))) {
5639 dev_dbg(&pf->pdev->dev,
5640 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5641 num_queues, vsi->current_rss_size);
5646 if (!is_power_of_2(num_queues)) {
5647 /* Find the max num_queues configured for channel if channel
5649 * if channel exist, then enforce 'num_queues' to be more than
5650 * max ever queues configured for channel.
5652 max_ch_queues = i40e_get_max_queues_for_channel(vsi);
5653 if (num_queues < max_ch_queues) {
5654 dev_dbg(&pf->pdev->dev,
5655 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5656 num_queues, max_ch_queues);
5659 *reconfig_rss = true;
5666 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5667 * @vsi: the VSI being setup
5668 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5670 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5672 static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
5674 struct i40e_pf *pf = vsi->back;
5675 u8 seed[I40E_HKEY_ARRAY_SIZE];
5676 struct i40e_hw *hw = &pf->hw;
5684 if (rss_size > vsi->rss_size)
5687 local_rss_size = min_t(int, vsi->rss_size, rss_size);
5688 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
5692 /* Ignoring user configured lut if there is one */
5693 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size);
5695 /* Use user configured hash key if there is one, otherwise
5698 if (vsi->rss_hkey_user)
5699 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
5701 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
5703 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
5705 dev_info(&pf->pdev->dev,
5706 "Cannot set RSS lut, err %s aq_err %s\n",
5707 i40e_stat_str(hw, ret),
5708 i40e_aq_str(hw, hw->aq.asq_last_status));
5714 /* Do the update w.r.t. storing rss_size */
5715 if (!vsi->orig_rss_size)
5716 vsi->orig_rss_size = vsi->rss_size;
5717 vsi->current_rss_size = local_rss_size;
5723 * i40e_channel_setup_queue_map - Setup a channel queue map
5724 * @pf: ptr to PF device
5725 * @vsi: the VSI being setup
5726 * @ctxt: VSI context structure
5727 * @ch: ptr to channel structure
5729 * Setup queue map for a specific channel
5731 static void i40e_channel_setup_queue_map(struct i40e_pf *pf,
5732 struct i40e_vsi_context *ctxt,
5733 struct i40e_channel *ch)
5735 u16 qcount, qmap, sections = 0;
5739 sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
5740 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
5742 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix);
5743 ch->num_queue_pairs = qcount;
5745 /* find the next higher power-of-2 of num queue pairs */
5746 pow = ilog2(qcount);
5747 if (!is_power_of_2(qcount))
5750 qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
5751 (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
5753 /* Setup queue TC[0].qmap for given VSI context */
5754 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
5756 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */
5757 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
5758 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue);
5759 ctxt->info.valid_sections |= cpu_to_le16(sections);
5763 * i40e_add_channel - add a channel by adding VSI
5764 * @pf: ptr to PF device
5765 * @uplink_seid: underlying HW switching element (VEB) ID
5766 * @ch: ptr to channel structure
5768 * Add a channel (VSI) using add_vsi and queue_map
5770 static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
5771 struct i40e_channel *ch)
5773 struct i40e_hw *hw = &pf->hw;
5774 struct i40e_vsi_context ctxt;
5775 u8 enabled_tc = 0x1; /* TC0 enabled */
5778 if (ch->type != I40E_VSI_VMDQ2) {
5779 dev_info(&pf->pdev->dev,
5780 "add new vsi failed, ch->type %d\n", ch->type);
5784 memset(&ctxt, 0, sizeof(ctxt));
5785 ctxt.pf_num = hw->pf_id;
5787 ctxt.uplink_seid = uplink_seid;
5788 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
5789 if (ch->type == I40E_VSI_VMDQ2)
5790 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
5792 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) {
5793 ctxt.info.valid_sections |=
5794 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
5795 ctxt.info.switch_id =
5796 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
5799 /* Set queue map for a given VSI context */
5800 i40e_channel_setup_queue_map(pf, &ctxt, ch);
5802 /* Now time to create VSI */
5803 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
5805 dev_info(&pf->pdev->dev,
5806 "add new vsi failed, err %s aq_err %s\n",
5807 i40e_stat_str(&pf->hw, ret),
5808 i40e_aq_str(&pf->hw,
5809 pf->hw.aq.asq_last_status));
5813 /* Success, update channel */
5814 ch->enabled_tc = enabled_tc;
5815 ch->seid = ctxt.seid;
5816 ch->vsi_number = ctxt.vsi_number;
5817 ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx);
5819 /* copy just the sections touched not the entire info
5820 * since not all sections are valid as returned by
5823 ch->info.mapping_flags = ctxt.info.mapping_flags;
5824 memcpy(&ch->info.queue_mapping,
5825 &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping));
5826 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping,
5827 sizeof(ctxt.info.tc_mapping));
5832 static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
5835 struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
5839 bw_data.tc_valid_bits = ch->enabled_tc;
5840 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5841 bw_data.tc_bw_credits[i] = bw_share[i];
5843 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid,
5846 dev_info(&vsi->back->pdev->dev,
5847 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5848 vsi->back->hw.aq.asq_last_status, ch->seid);
5852 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
5853 ch->info.qs_handle[i] = bw_data.qs_handles[i];
5859 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5860 * @pf: ptr to PF device
5861 * @vsi: the VSI being setup
5862 * @ch: ptr to channel structure
5864 * Configure TX rings associated with channel (VSI) since queues are being
5867 static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
5868 struct i40e_vsi *vsi,
5869 struct i40e_channel *ch)
5873 u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
5875 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5876 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
5877 if (ch->enabled_tc & BIT(i))
5881 /* configure BW for new VSI */
5882 ret = i40e_channel_config_bw(vsi, ch, bw_share);
5884 dev_info(&vsi->back->pdev->dev,
5885 "Failed configuring TC map %d for channel (seid %u)\n",
5886 ch->enabled_tc, ch->seid);
5890 for (i = 0; i < ch->num_queue_pairs; i++) {
5891 struct i40e_ring *tx_ring, *rx_ring;
5894 pf_q = ch->base_queue + i;
5896 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5899 tx_ring = vsi->tx_rings[pf_q];
5902 /* Get the RX ring ptr */
5903 rx_ring = vsi->rx_rings[pf_q];
5911 * i40e_setup_hw_channel - setup new channel
5912 * @pf: ptr to PF device
5913 * @vsi: the VSI being setup
5914 * @ch: ptr to channel structure
5915 * @uplink_seid: underlying HW switching element (VEB) ID
5916 * @type: type of channel to be created (VMDq2/VF)
5918 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5919 * and configures TX rings accordingly
5921 static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
5922 struct i40e_vsi *vsi,
5923 struct i40e_channel *ch,
5924 u16 uplink_seid, u8 type)
5928 ch->initialized = false;
5929 ch->base_queue = vsi->next_base_queue;
5932 /* Proceed with creation of channel (VMDq2) VSI */
5933 ret = i40e_add_channel(pf, uplink_seid, ch);
5935 dev_info(&pf->pdev->dev,
5936 "failed to add_channel using uplink_seid %u\n",
5941 /* Mark the successful creation of channel */
5942 ch->initialized = true;
5944 /* Reconfigure TX queues using QTX_CTL register */
5945 ret = i40e_channel_config_tx_ring(pf, vsi, ch);
5947 dev_info(&pf->pdev->dev,
5948 "failed to configure TX rings for channel %u\n",
5953 /* update 'next_base_queue' */
5954 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs;
5955 dev_dbg(&pf->pdev->dev,
5956 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
5957 ch->seid, ch->vsi_number, ch->stat_counter_idx,
5958 ch->num_queue_pairs,
5959 vsi->next_base_queue);
5964 * i40e_setup_channel - setup new channel using uplink element
5965 * @pf: ptr to PF device
5966 * @type: type of channel to be created (VMDq2/VF)
5967 * @uplink_seid: underlying HW switching element (VEB) ID
5968 * @ch: ptr to channel structure
5970 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5971 * and uplink switching element (uplink_seid)
5973 static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
5974 struct i40e_channel *ch)
5980 if (vsi->type == I40E_VSI_MAIN) {
5981 vsi_type = I40E_VSI_VMDQ2;
5983 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n",
5988 /* underlying switching element */
5989 seid = pf->vsi[pf->lan_vsi]->uplink_seid;
5991 /* create channel (VSI), configure TX rings */
5992 ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
5994 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n");
5998 return ch->initialized ? true : false;
6002 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6003 * @vsi: ptr to VSI which has PF backing
6005 * Sets up switch mode correctly if it needs to be changed and perform
6006 * what are allowed modes.
6008 static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
6011 struct i40e_pf *pf = vsi->back;
6012 struct i40e_hw *hw = &pf->hw;
6015 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities);
6019 if (hw->dev_caps.switch_mode) {
6020 /* if switch mode is set, support mode2 (non-tunneled for
6021 * cloud filter) for now
6023 u32 switch_mode = hw->dev_caps.switch_mode &
6024 I40E_SWITCH_MODE_MASK;
6025 if (switch_mode >= I40E_CLOUD_FILTER_MODE1) {
6026 if (switch_mode == I40E_CLOUD_FILTER_MODE2)
6028 dev_err(&pf->pdev->dev,
6029 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6030 hw->dev_caps.switch_mode);
6035 /* Set Bit 7 to be valid */
6036 mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
6038 /* Set L4type for TCP support */
6039 mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
6041 /* Set cloud filter mode */
6042 mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
6044 /* Prep mode field for set_switch_config */
6045 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags,
6046 pf->last_sw_conf_valid_flags,
6048 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
6049 dev_err(&pf->pdev->dev,
6050 "couldn't set switch config bits, err %s aq_err %s\n",
6051 i40e_stat_str(hw, ret),
6053 hw->aq.asq_last_status));
6059 * i40e_create_queue_channel - function to create channel
6060 * @vsi: VSI to be configured
6061 * @ch: ptr to channel (it contains channel specific params)
6063 * This function creates channel (VSI) using num_queues specified by user,
6064 * reconfigs RSS if needed.
6066 int i40e_create_queue_channel(struct i40e_vsi *vsi,
6067 struct i40e_channel *ch)
6069 struct i40e_pf *pf = vsi->back;
6076 if (!ch->num_queue_pairs) {
6077 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n",
6078 ch->num_queue_pairs);
6082 /* validate user requested num_queues for channel */
6083 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi,
6086 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n",
6087 ch->num_queue_pairs);
6091 /* By default we are in VEPA mode, if this is the first VF/VMDq
6092 * VSI to be added switch to VEB mode.
6094 if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) ||
6095 (!i40e_is_any_channel(vsi))) {
6096 if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) {
6097 dev_dbg(&pf->pdev->dev,
6098 "Failed to create channel. Override queues (%u) not power of 2\n",
6099 vsi->tc_config.tc_info[0].qcount);
6103 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
6104 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
6106 if (vsi->type == I40E_VSI_MAIN) {
6107 if (pf->flags & I40E_FLAG_TC_MQPRIO)
6108 i40e_do_reset(pf, I40E_PF_RESET_FLAG,
6111 i40e_do_reset_safe(pf,
6112 I40E_PF_RESET_FLAG);
6115 /* now onwards for main VSI, number of queues will be value
6116 * of TC0's queue count
6120 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6121 * it should be more than num_queues
6123 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) {
6124 dev_dbg(&pf->pdev->dev,
6125 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6126 vsi->cnt_q_avail, ch->num_queue_pairs);
6130 /* reconfig_rss only if vsi type is MAIN_VSI */
6131 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) {
6132 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs);
6134 dev_info(&pf->pdev->dev,
6135 "Error: unable to reconfig rss for num_queues (%u)\n",
6136 ch->num_queue_pairs);
6141 if (!i40e_setup_channel(pf, vsi, ch)) {
6142 dev_info(&pf->pdev->dev, "Failed to setup channel\n");
6146 dev_info(&pf->pdev->dev,
6147 "Setup channel (id:%u) utilizing num_queues %d\n",
6148 ch->seid, ch->num_queue_pairs);
6150 /* configure VSI for BW limit */
6151 if (ch->max_tx_rate) {
6152 u64 credits = ch->max_tx_rate;
6154 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate))
6157 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6158 dev_dbg(&pf->pdev->dev,
6159 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6165 /* in case of VF, this will be main SRIOV VSI */
6166 ch->parent_vsi = vsi;
6168 /* and update main_vsi's count for queue_available to use */
6169 vsi->cnt_q_avail -= ch->num_queue_pairs;
6175 * i40e_configure_queue_channels - Add queue channel for the given TCs
6176 * @vsi: VSI to be configured
6178 * Configures queue channel mapping to the given TCs
6180 static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
6182 struct i40e_channel *ch;
6186 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6187 vsi->tc_seid_map[0] = vsi->seid;
6188 for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6189 if (vsi->tc_config.enabled_tc & BIT(i)) {
6190 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
6196 INIT_LIST_HEAD(&ch->list);
6197 ch->num_queue_pairs =
6198 vsi->tc_config.tc_info[i].qcount;
6200 vsi->tc_config.tc_info[i].qoffset;
6202 /* Bandwidth limit through tc interface is in bytes/s,
6205 max_rate = vsi->mqprio_qopt.max_rate[i];
6206 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6207 ch->max_tx_rate = max_rate;
6209 list_add_tail(&ch->list, &vsi->ch_list);
6211 ret = i40e_create_queue_channel(vsi, ch);
6213 dev_err(&vsi->back->pdev->dev,
6214 "Failed creating queue channel with TC%d: queues %d\n",
6215 i, ch->num_queue_pairs);
6218 vsi->tc_seid_map[i] = ch->seid;
6224 i40e_remove_queue_channels(vsi);
6229 * i40e_veb_config_tc - Configure TCs for given VEB
6231 * @enabled_tc: TC bitmap
6233 * Configures given TC bitmap for VEB (switching) element
6235 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
6237 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
6238 struct i40e_pf *pf = veb->pf;
6242 /* No TCs or already enabled TCs just return */
6243 if (!enabled_tc || veb->enabled_tc == enabled_tc)
6246 bw_data.tc_valid_bits = enabled_tc;
6247 /* bw_data.absolute_credits is not set (relative) */
6249 /* Enable ETS TCs with equal BW Share for now */
6250 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6251 if (enabled_tc & BIT(i))
6252 bw_data.tc_bw_share_credits[i] = 1;
6255 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
6258 dev_info(&pf->pdev->dev,
6259 "VEB bw config failed, err %s aq_err %s\n",
6260 i40e_stat_str(&pf->hw, ret),
6261 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6265 /* Update the BW information */
6266 ret = i40e_veb_get_bw_info(veb);
6268 dev_info(&pf->pdev->dev,
6269 "Failed getting veb bw config, err %s aq_err %s\n",
6270 i40e_stat_str(&pf->hw, ret),
6271 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6278 #ifdef CONFIG_I40E_DCB
6280 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6283 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6284 * the caller would've quiesce all the VSIs before calling
6287 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
6293 /* Enable the TCs available on PF to all VEBs */
6294 tc_map = i40e_pf_get_tc_map(pf);
6295 for (v = 0; v < I40E_MAX_VEB; v++) {
6298 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
6300 dev_info(&pf->pdev->dev,
6301 "Failed configuring TC for VEB seid=%d\n",
6303 /* Will try to configure as many components */
6307 /* Update each VSI */
6308 for (v = 0; v < pf->num_alloc_vsi; v++) {
6312 /* - Enable all TCs for the LAN VSI
6313 * - For all others keep them at TC0 for now
6315 if (v == pf->lan_vsi)
6316 tc_map = i40e_pf_get_tc_map(pf);
6318 tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
6320 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
6322 dev_info(&pf->pdev->dev,
6323 "Failed configuring TC for VSI seid=%d\n",
6325 /* Will try to configure as many components */
6327 /* Re-configure VSI vectors based on updated TC map */
6328 i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
6329 if (pf->vsi[v]->netdev)
6330 i40e_dcbnl_set_all(pf->vsi[v]);
6336 * i40e_resume_port_tx - Resume port Tx
6339 * Resume a port's Tx and issue a PF reset in case of failure to
6342 static int i40e_resume_port_tx(struct i40e_pf *pf)
6344 struct i40e_hw *hw = &pf->hw;
6347 ret = i40e_aq_resume_port_tx(hw, NULL);
6349 dev_info(&pf->pdev->dev,
6350 "Resume Port Tx failed, err %s aq_err %s\n",
6351 i40e_stat_str(&pf->hw, ret),
6352 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6353 /* Schedule PF reset to recover */
6354 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
6355 i40e_service_event_schedule(pf);
6362 * i40e_init_pf_dcb - Initialize DCB configuration
6363 * @pf: PF being configured
6365 * Query the current DCB configuration and cache it
6366 * in the hardware structure
6368 static int i40e_init_pf_dcb(struct i40e_pf *pf)
6370 struct i40e_hw *hw = &pf->hw;
6373 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6374 * Also do not enable DCBx if FW LLDP agent is disabled
6376 if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
6377 (pf->flags & I40E_FLAG_DISABLE_FW_LLDP))
6380 /* Get the initial DCB configuration */
6381 err = i40e_init_dcb(hw);
6383 /* Device/Function is not DCBX capable */
6384 if ((!hw->func_caps.dcb) ||
6385 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
6386 dev_info(&pf->pdev->dev,
6387 "DCBX offload is not supported or is disabled for this PF.\n");
6389 /* When status is not DISABLED then DCBX in FW */
6390 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
6391 DCB_CAP_DCBX_VER_IEEE;
6393 pf->flags |= I40E_FLAG_DCB_CAPABLE;
6394 /* Enable DCB tagging only when more than one TC
6395 * or explicitly disable if only one TC
6397 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
6398 pf->flags |= I40E_FLAG_DCB_ENABLED;
6400 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6401 dev_dbg(&pf->pdev->dev,
6402 "DCBX offload is supported for this PF.\n");
6404 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) {
6405 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n");
6406 pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
6408 dev_info(&pf->pdev->dev,
6409 "Query for DCB configuration failed, err %s aq_err %s\n",
6410 i40e_stat_str(&pf->hw, err),
6411 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6417 #endif /* CONFIG_I40E_DCB */
6418 #define SPEED_SIZE 14
6421 * i40e_print_link_message - print link up or down
6422 * @vsi: the VSI for which link needs a message
6423 * @isup: true of link is up, false otherwise
6425 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
6427 enum i40e_aq_link_speed new_speed;
6428 struct i40e_pf *pf = vsi->back;
6429 char *speed = "Unknown";
6430 char *fc = "Unknown";
6435 new_speed = pf->hw.phy.link_info.link_speed;
6437 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed))
6439 vsi->current_isup = isup;
6440 vsi->current_speed = new_speed;
6442 netdev_info(vsi->netdev, "NIC Link is Down\n");
6446 /* Warn user if link speed on NPAR enabled partition is not at
6449 if (pf->hw.func_caps.npar_enable &&
6450 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
6451 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
6452 netdev_warn(vsi->netdev,
6453 "The partition detected link speed that is less than 10Gbps\n");
6455 switch (pf->hw.phy.link_info.link_speed) {
6456 case I40E_LINK_SPEED_40GB:
6459 case I40E_LINK_SPEED_20GB:
6462 case I40E_LINK_SPEED_25GB:
6465 case I40E_LINK_SPEED_10GB:
6468 case I40E_LINK_SPEED_1GB:
6471 case I40E_LINK_SPEED_100MB:
6478 switch (pf->hw.fc.current_mode) {
6482 case I40E_FC_TX_PAUSE:
6485 case I40E_FC_RX_PAUSE:
6493 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
6494 req_fec = ", Requested FEC: None";
6495 fec = ", FEC: None";
6496 an = ", Autoneg: False";
6498 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
6499 an = ", Autoneg: True";
6501 if (pf->hw.phy.link_info.fec_info &
6502 I40E_AQ_CONFIG_FEC_KR_ENA)
6503 fec = ", FEC: CL74 FC-FEC/BASE-R";
6504 else if (pf->hw.phy.link_info.fec_info &
6505 I40E_AQ_CONFIG_FEC_RS_ENA)
6506 fec = ", FEC: CL108 RS-FEC";
6508 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6509 * both RS and FC are requested
6511 if (vsi->back->hw.phy.link_info.req_fec_info &
6512 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
6513 if (vsi->back->hw.phy.link_info.req_fec_info &
6514 I40E_AQ_REQUEST_FEC_RS)
6515 req_fec = ", Requested FEC: CL108 RS-FEC";
6517 req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
6521 netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6522 speed, req_fec, fec, an, fc);
6526 * i40e_up_complete - Finish the last steps of bringing up a connection
6527 * @vsi: the VSI being configured
6529 static int i40e_up_complete(struct i40e_vsi *vsi)
6531 struct i40e_pf *pf = vsi->back;
6534 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6535 i40e_vsi_configure_msix(vsi);
6537 i40e_configure_msi_and_legacy(vsi);
6540 err = i40e_vsi_start_rings(vsi);
6544 clear_bit(__I40E_VSI_DOWN, vsi->state);
6545 i40e_napi_enable_all(vsi);
6546 i40e_vsi_enable_irq(vsi);
6548 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
6550 i40e_print_link_message(vsi, true);
6551 netif_tx_start_all_queues(vsi->netdev);
6552 netif_carrier_on(vsi->netdev);
6555 /* replay FDIR SB filters */
6556 if (vsi->type == I40E_VSI_FDIR) {
6557 /* reset fd counters */
6560 i40e_fdir_filter_restore(vsi);
6563 /* On the next run of the service_task, notify any clients of the new
6566 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
6567 i40e_service_event_schedule(pf);
6573 * i40e_vsi_reinit_locked - Reset the VSI
6574 * @vsi: the VSI being configured
6576 * Rebuild the ring structs after some configuration
6577 * has changed, e.g. MTU size.
6579 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
6581 struct i40e_pf *pf = vsi->back;
6583 WARN_ON(in_interrupt());
6584 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
6585 usleep_range(1000, 2000);
6589 clear_bit(__I40E_CONFIG_BUSY, pf->state);
6593 * i40e_up - Bring the connection back up after being down
6594 * @vsi: the VSI being configured
6596 int i40e_up(struct i40e_vsi *vsi)
6600 err = i40e_vsi_configure(vsi);
6602 err = i40e_up_complete(vsi);
6608 * i40e_force_link_state - Force the link status
6609 * @pf: board private structure
6610 * @is_up: whether the link state should be forced up or down
6612 static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
6614 struct i40e_aq_get_phy_abilities_resp abilities;
6615 struct i40e_aq_set_phy_config config = {0};
6616 struct i40e_hw *hw = &pf->hw;
6621 /* Card might've been put in an unstable state by other drivers
6622 * and applications, which causes incorrect speed values being
6623 * set on startup. In order to clear speed registers, we call
6624 * get_phy_capabilities twice, once to get initial state of
6625 * available speeds, and once to get current PHY config.
6627 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
6630 dev_err(&pf->pdev->dev,
6631 "failed to get phy cap., ret = %s last_status = %s\n",
6632 i40e_stat_str(hw, err),
6633 i40e_aq_str(hw, hw->aq.asq_last_status));
6636 speed = abilities.link_speed;
6638 /* Get the current phy config */
6639 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
6642 dev_err(&pf->pdev->dev,
6643 "failed to get phy cap., ret = %s last_status = %s\n",
6644 i40e_stat_str(hw, err),
6645 i40e_aq_str(hw, hw->aq.asq_last_status));
6649 /* If link needs to go up, but was not forced to go down,
6650 * and its speed values are OK, no need for a flap
6652 if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
6653 return I40E_SUCCESS;
6655 /* To force link we need to set bits for all supported PHY types,
6656 * but there are now more than 32, so we need to split the bitmap
6657 * across two fields.
6659 mask = I40E_PHY_TYPES_BITMASK;
6660 config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
6661 config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
6662 /* Copy the old settings, except of phy_type */
6663 config.abilities = abilities.abilities;
6664 if (abilities.link_speed != 0)
6665 config.link_speed = abilities.link_speed;
6667 config.link_speed = speed;
6668 config.eee_capability = abilities.eee_capability;
6669 config.eeer = abilities.eeer_val;
6670 config.low_power_ctrl = abilities.d3_lpan;
6671 config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
6672 I40E_AQ_PHY_FEC_CONFIG_MASK;
6673 err = i40e_aq_set_phy_config(hw, &config, NULL);
6676 dev_err(&pf->pdev->dev,
6677 "set phy config ret = %s last_status = %s\n",
6678 i40e_stat_str(&pf->hw, err),
6679 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6683 /* Update the link info */
6684 err = i40e_update_link_info(hw);
6686 /* Wait a little bit (on 40G cards it sometimes takes a really
6687 * long time for link to come back from the atomic reset)
6691 i40e_update_link_info(hw);
6694 i40e_aq_set_link_restart_an(hw, true, NULL);
6696 return I40E_SUCCESS;
6700 * i40e_down - Shutdown the connection processing
6701 * @vsi: the VSI being stopped
6703 void i40e_down(struct i40e_vsi *vsi)
6707 /* It is assumed that the caller of this function
6708 * sets the vsi->state __I40E_VSI_DOWN bit.
6711 netif_carrier_off(vsi->netdev);
6712 netif_tx_disable(vsi->netdev);
6714 i40e_vsi_disable_irq(vsi);
6715 i40e_vsi_stop_rings(vsi);
6716 if (vsi->type == I40E_VSI_MAIN &&
6717 vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
6718 i40e_force_link_state(vsi->back, false);
6719 i40e_napi_disable_all(vsi);
6721 for (i = 0; i < vsi->num_queue_pairs; i++) {
6722 i40e_clean_tx_ring(vsi->tx_rings[i]);
6723 if (i40e_enabled_xdp_vsi(vsi))
6724 i40e_clean_tx_ring(vsi->xdp_rings[i]);
6725 i40e_clean_rx_ring(vsi->rx_rings[i]);
6731 * i40e_validate_mqprio_qopt- validate queue mapping info
6732 * @vsi: the VSI being configured
6733 * @mqprio_qopt: queue parametrs
6735 static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi,
6736 struct tc_mqprio_qopt_offload *mqprio_qopt)
6738 u64 sum_max_rate = 0;
6742 if (mqprio_qopt->qopt.offset[0] != 0 ||
6743 mqprio_qopt->qopt.num_tc < 1 ||
6744 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS)
6746 for (i = 0; ; i++) {
6747 if (!mqprio_qopt->qopt.count[i])
6749 if (mqprio_qopt->min_rate[i]) {
6750 dev_err(&vsi->back->pdev->dev,
6751 "Invalid min tx rate (greater than 0) specified\n");
6754 max_rate = mqprio_qopt->max_rate[i];
6755 do_div(max_rate, I40E_BW_MBPS_DIVISOR);
6756 sum_max_rate += max_rate;
6758 if (i >= mqprio_qopt->qopt.num_tc - 1)
6760 if (mqprio_qopt->qopt.offset[i + 1] !=
6761 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
6764 if (vsi->num_queue_pairs <
6765 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) {
6766 dev_err(&vsi->back->pdev->dev,
6767 "Failed to create traffic channel, insufficient number of queues.\n");
6770 if (sum_max_rate > i40e_get_link_speed(vsi)) {
6771 dev_err(&vsi->back->pdev->dev,
6772 "Invalid max tx rate specified\n");
6779 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6780 * @vsi: the VSI being configured
6782 static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
6787 /* Only TC0 is enabled */
6788 vsi->tc_config.numtc = 1;
6789 vsi->tc_config.enabled_tc = 1;
6790 qcount = min_t(int, vsi->alloc_queue_pairs,
6791 i40e_pf_get_max_q_per_tc(vsi->back));
6792 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
6793 /* For the TC that is not enabled set the offset to to default
6794 * queue and allocate one queue for the given TC.
6796 vsi->tc_config.tc_info[i].qoffset = 0;
6798 vsi->tc_config.tc_info[i].qcount = qcount;
6800 vsi->tc_config.tc_info[i].qcount = 1;
6801 vsi->tc_config.tc_info[i].netdev_tc = 0;
6806 * i40e_setup_tc - configure multiple traffic classes
6807 * @netdev: net device to configure
6808 * @type_data: tc offload data
6810 static int i40e_setup_tc(struct net_device *netdev, void *type_data)
6812 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
6813 struct i40e_netdev_priv *np = netdev_priv(netdev);
6814 struct i40e_vsi *vsi = np->vsi;
6815 struct i40e_pf *pf = vsi->back;
6816 u8 enabled_tc = 0, num_tc, hw;
6817 bool need_reset = false;
6818 int old_queue_pairs;
6823 old_queue_pairs = vsi->num_queue_pairs;
6824 num_tc = mqprio_qopt->qopt.num_tc;
6825 hw = mqprio_qopt->qopt.hw;
6826 mode = mqprio_qopt->mode;
6828 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6829 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
6833 /* Check if MFP enabled */
6834 if (pf->flags & I40E_FLAG_MFP_ENABLED) {
6836 "Configuring TC not supported in MFP mode\n");
6840 case TC_MQPRIO_MODE_DCB:
6841 pf->flags &= ~I40E_FLAG_TC_MQPRIO;
6843 /* Check if DCB enabled to continue */
6844 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
6846 "DCB is not enabled for adapter\n");
6850 /* Check whether tc count is within enabled limit */
6851 if (num_tc > i40e_pf_get_num_tc(pf)) {
6853 "TC count greater than enabled on link for adapter\n");
6857 case TC_MQPRIO_MODE_CHANNEL:
6858 if (pf->flags & I40E_FLAG_DCB_ENABLED) {
6860 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6863 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
6865 ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt);
6868 memcpy(&vsi->mqprio_qopt, mqprio_qopt,
6869 sizeof(*mqprio_qopt));
6870 pf->flags |= I40E_FLAG_TC_MQPRIO;
6871 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
6878 /* Generate TC map for number of tc requested */
6879 for (i = 0; i < num_tc; i++)
6880 enabled_tc |= BIT(i);
6882 /* Requesting same TC configuration as already enabled */
6883 if (enabled_tc == vsi->tc_config.enabled_tc &&
6884 mode != TC_MQPRIO_MODE_CHANNEL)
6887 /* Quiesce VSI queues */
6888 i40e_quiesce_vsi(vsi);
6890 if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO))
6891 i40e_remove_queue_channels(vsi);
6893 /* Configure VSI for enabled TCs */
6894 ret = i40e_vsi_config_tc(vsi, enabled_tc);
6896 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
6902 if (pf->flags & I40E_FLAG_TC_MQPRIO) {
6903 if (vsi->mqprio_qopt.max_rate[0]) {
6904 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
6906 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
6907 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
6909 u64 credits = max_tx_rate;
6911 do_div(credits, I40E_BW_CREDIT_DIVISOR);
6912 dev_dbg(&vsi->back->pdev->dev,
6913 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6922 ret = i40e_configure_queue_channels(vsi);
6924 vsi->num_queue_pairs = old_queue_pairs;
6926 "Failed configuring queue channels\n");
6933 /* Reset the configuration data to defaults, only TC0 is enabled */
6935 i40e_vsi_set_default_tc_config(vsi);
6940 i40e_unquiesce_vsi(vsi);
6945 * i40e_set_cld_element - sets cloud filter element data
6946 * @filter: cloud filter rule
6947 * @cld: ptr to cloud filter element data
6949 * This is helper function to copy data into cloud filter element
6952 i40e_set_cld_element(struct i40e_cloud_filter *filter,
6953 struct i40e_aqc_cloud_filters_element_data *cld)
6958 memset(cld, 0, sizeof(*cld));
6959 ether_addr_copy(cld->outer_mac, filter->dst_mac);
6960 ether_addr_copy(cld->inner_mac, filter->src_mac);
6962 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6)
6965 if (filter->n_proto == ETH_P_IPV6) {
6966 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
6967 for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6);
6969 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]);
6970 ipa = cpu_to_le32(ipa);
6971 memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa));
6974 ipa = be32_to_cpu(filter->dst_ipv4);
6975 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa));
6978 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id));
6980 /* tenant_id is not supported by FW now, once the support is enabled
6981 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
6983 if (filter->tenant_id)
6988 * i40e_add_del_cloud_filter - Add/del cloud filter
6989 * @vsi: pointer to VSI
6990 * @filter: cloud filter rule
6991 * @add: if true, add, if false, delete
6993 * Add or delete a cloud filter for a specific flow spec.
6994 * Returns 0 if the filter were successfully added.
6996 int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
6997 struct i40e_cloud_filter *filter, bool add)
6999 struct i40e_aqc_cloud_filters_element_data cld_filter;
7000 struct i40e_pf *pf = vsi->back;
7002 static const u16 flag_table[128] = {
7003 [I40E_CLOUD_FILTER_FLAGS_OMAC] =
7004 I40E_AQC_ADD_CLOUD_FILTER_OMAC,
7005 [I40E_CLOUD_FILTER_FLAGS_IMAC] =
7006 I40E_AQC_ADD_CLOUD_FILTER_IMAC,
7007 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] =
7008 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN,
7009 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] =
7010 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID,
7011 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] =
7012 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC,
7013 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] =
7014 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID,
7015 [I40E_CLOUD_FILTER_FLAGS_IIP] =
7016 I40E_AQC_ADD_CLOUD_FILTER_IIP,
7019 if (filter->flags >= ARRAY_SIZE(flag_table))
7020 return I40E_ERR_CONFIG;
7022 memset(&cld_filter, 0, sizeof(cld_filter));
7024 /* copy element needed to add cloud filter from filter */
7025 i40e_set_cld_element(filter, &cld_filter);
7027 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE)
7028 cld_filter.flags = cpu_to_le16(filter->tunnel_type <<
7029 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT);
7031 if (filter->n_proto == ETH_P_IPV6)
7032 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7033 I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7035 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] |
7036 I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7039 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid,
7042 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid,
7045 dev_dbg(&pf->pdev->dev,
7046 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7047 add ? "add" : "delete", filter->dst_port, ret,
7048 pf->hw.aq.asq_last_status);
7050 dev_info(&pf->pdev->dev,
7051 "%s cloud filter for VSI: %d\n",
7052 add ? "Added" : "Deleted", filter->seid);
7057 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
7058 * @vsi: pointer to VSI
7059 * @filter: cloud filter rule
7060 * @add: if true, add, if false, delete
7062 * Add or delete a cloud filter for a specific flow spec using big buffer.
7063 * Returns 0 if the filter were successfully added.
7065 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
7066 struct i40e_cloud_filter *filter,
7069 struct i40e_aqc_cloud_filters_element_bb cld_filter;
7070 struct i40e_pf *pf = vsi->back;
7073 /* Both (src/dst) valid mac_addr are not supported */
7074 if ((is_valid_ether_addr(filter->dst_mac) &&
7075 is_valid_ether_addr(filter->src_mac)) ||
7076 (is_multicast_ether_addr(filter->dst_mac) &&
7077 is_multicast_ether_addr(filter->src_mac)))
7080 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
7081 * ports are not supported via big buffer now.
7083 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
7086 /* adding filter using src_port/src_ip is not supported at this stage */
7087 if (filter->src_port ||
7088 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
7089 !ipv6_addr_any(&filter->ip.v6.src_ip6))
7092 memset(&cld_filter, 0, sizeof(cld_filter));
7094 /* copy element needed to add cloud filter from filter */
7095 i40e_set_cld_element(filter, &cld_filter.element);
7097 if (is_valid_ether_addr(filter->dst_mac) ||
7098 is_valid_ether_addr(filter->src_mac) ||
7099 is_multicast_ether_addr(filter->dst_mac) ||
7100 is_multicast_ether_addr(filter->src_mac)) {
7101 /* MAC + IP : unsupported mode */
7102 if (filter->dst_ipv4)
7105 /* since we validated that L4 port must be valid before
7106 * we get here, start with respective "flags" value
7107 * and update if vlan is present or not
7109 cld_filter.element.flags =
7110 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT);
7112 if (filter->vlan_id) {
7113 cld_filter.element.flags =
7114 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
7117 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
7118 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
7119 cld_filter.element.flags =
7120 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
7121 if (filter->n_proto == ETH_P_IPV6)
7122 cld_filter.element.flags |=
7123 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6);
7125 cld_filter.element.flags |=
7126 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4);
7128 dev_err(&pf->pdev->dev,
7129 "either mac or ip has to be valid for cloud filter\n");
7133 /* Now copy L4 port in Byte 6..7 in general fields */
7134 cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] =
7135 be16_to_cpu(filter->dst_port);
7138 /* Validate current device switch mode, change if necessary */
7139 ret = i40e_validate_and_set_switch_mode(vsi);
7141 dev_err(&pf->pdev->dev,
7142 "failed to set switch mode, ret %d\n",
7147 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid,
7150 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid,
7155 dev_dbg(&pf->pdev->dev,
7156 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7157 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status);
7159 dev_info(&pf->pdev->dev,
7160 "%s cloud filter for VSI: %d, L4 port: %d\n",
7161 add ? "add" : "delete", filter->seid,
7162 ntohs(filter->dst_port));
7167 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7168 * @vsi: Pointer to VSI
7169 * @cls_flower: Pointer to struct tc_cls_flower_offload
7170 * @filter: Pointer to cloud filter structure
7173 static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
7174 struct tc_cls_flower_offload *f,
7175 struct i40e_cloud_filter *filter)
7177 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
7178 struct i40e_pf *pf = vsi->back;
7181 if (f->dissector->used_keys &
7182 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7183 BIT(FLOW_DISSECTOR_KEY_BASIC) |
7184 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7185 BIT(FLOW_DISSECTOR_KEY_VLAN) |
7186 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7187 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7188 BIT(FLOW_DISSECTOR_KEY_PORTS) |
7189 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
7190 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n",
7191 f->dissector->used_keys);
7195 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
7196 struct flow_dissector_key_keyid *key =
7197 skb_flow_dissector_target(f->dissector,
7198 FLOW_DISSECTOR_KEY_ENC_KEYID,
7201 struct flow_dissector_key_keyid *mask =
7202 skb_flow_dissector_target(f->dissector,
7203 FLOW_DISSECTOR_KEY_ENC_KEYID,
7206 if (mask->keyid != 0)
7207 field_flags |= I40E_CLOUD_FIELD_TEN_ID;
7209 filter->tenant_id = be32_to_cpu(key->keyid);
7212 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
7213 struct flow_dissector_key_basic *key =
7214 skb_flow_dissector_target(f->dissector,
7215 FLOW_DISSECTOR_KEY_BASIC,
7218 struct flow_dissector_key_basic *mask =
7219 skb_flow_dissector_target(f->dissector,
7220 FLOW_DISSECTOR_KEY_BASIC,
7223 n_proto_key = ntohs(key->n_proto);
7224 n_proto_mask = ntohs(mask->n_proto);
7226 if (n_proto_key == ETH_P_ALL) {
7230 filter->n_proto = n_proto_key & n_proto_mask;
7231 filter->ip_proto = key->ip_proto;
7234 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7235 struct flow_dissector_key_eth_addrs *key =
7236 skb_flow_dissector_target(f->dissector,
7237 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7240 struct flow_dissector_key_eth_addrs *mask =
7241 skb_flow_dissector_target(f->dissector,
7242 FLOW_DISSECTOR_KEY_ETH_ADDRS,
7245 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7246 if (!is_zero_ether_addr(mask->dst)) {
7247 if (is_broadcast_ether_addr(mask->dst)) {
7248 field_flags |= I40E_CLOUD_FIELD_OMAC;
7250 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n",
7252 return I40E_ERR_CONFIG;
7256 if (!is_zero_ether_addr(mask->src)) {
7257 if (is_broadcast_ether_addr(mask->src)) {
7258 field_flags |= I40E_CLOUD_FIELD_IMAC;
7260 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n",
7262 return I40E_ERR_CONFIG;
7265 ether_addr_copy(filter->dst_mac, key->dst);
7266 ether_addr_copy(filter->src_mac, key->src);
7269 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
7270 struct flow_dissector_key_vlan *key =
7271 skb_flow_dissector_target(f->dissector,
7272 FLOW_DISSECTOR_KEY_VLAN,
7274 struct flow_dissector_key_vlan *mask =
7275 skb_flow_dissector_target(f->dissector,
7276 FLOW_DISSECTOR_KEY_VLAN,
7279 if (mask->vlan_id) {
7280 if (mask->vlan_id == VLAN_VID_MASK) {
7281 field_flags |= I40E_CLOUD_FIELD_IVLAN;
7284 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n",
7286 return I40E_ERR_CONFIG;
7290 filter->vlan_id = cpu_to_be16(key->vlan_id);
7293 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
7294 struct flow_dissector_key_control *key =
7295 skb_flow_dissector_target(f->dissector,
7296 FLOW_DISSECTOR_KEY_CONTROL,
7299 addr_type = key->addr_type;
7302 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7303 struct flow_dissector_key_ipv4_addrs *key =
7304 skb_flow_dissector_target(f->dissector,
7305 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7307 struct flow_dissector_key_ipv4_addrs *mask =
7308 skb_flow_dissector_target(f->dissector,
7309 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
7313 if (mask->dst == cpu_to_be32(0xffffffff)) {
7314 field_flags |= I40E_CLOUD_FIELD_IIP;
7316 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n",
7318 return I40E_ERR_CONFIG;
7323 if (mask->src == cpu_to_be32(0xffffffff)) {
7324 field_flags |= I40E_CLOUD_FIELD_IIP;
7326 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n",
7328 return I40E_ERR_CONFIG;
7332 if (field_flags & I40E_CLOUD_FIELD_TEN_ID) {
7333 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n");
7334 return I40E_ERR_CONFIG;
7336 filter->dst_ipv4 = key->dst;
7337 filter->src_ipv4 = key->src;
7340 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7341 struct flow_dissector_key_ipv6_addrs *key =
7342 skb_flow_dissector_target(f->dissector,
7343 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7345 struct flow_dissector_key_ipv6_addrs *mask =
7346 skb_flow_dissector_target(f->dissector,
7347 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
7350 /* src and dest IPV6 address should not be LOOPBACK
7351 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7353 if (ipv6_addr_loopback(&key->dst) ||
7354 ipv6_addr_loopback(&key->src)) {
7355 dev_err(&pf->pdev->dev,
7356 "Bad ipv6, addr is LOOPBACK\n");
7357 return I40E_ERR_CONFIG;
7359 if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
7360 field_flags |= I40E_CLOUD_FIELD_IIP;
7362 memcpy(&filter->src_ipv6, &key->src.s6_addr32,
7363 sizeof(filter->src_ipv6));
7364 memcpy(&filter->dst_ipv6, &key->dst.s6_addr32,
7365 sizeof(filter->dst_ipv6));
7368 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
7369 struct flow_dissector_key_ports *key =
7370 skb_flow_dissector_target(f->dissector,
7371 FLOW_DISSECTOR_KEY_PORTS,
7373 struct flow_dissector_key_ports *mask =
7374 skb_flow_dissector_target(f->dissector,
7375 FLOW_DISSECTOR_KEY_PORTS,
7379 if (mask->src == cpu_to_be16(0xffff)) {
7380 field_flags |= I40E_CLOUD_FIELD_IIP;
7382 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n",
7383 be16_to_cpu(mask->src));
7384 return I40E_ERR_CONFIG;
7389 if (mask->dst == cpu_to_be16(0xffff)) {
7390 field_flags |= I40E_CLOUD_FIELD_IIP;
7392 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n",
7393 be16_to_cpu(mask->dst));
7394 return I40E_ERR_CONFIG;
7398 filter->dst_port = key->dst;
7399 filter->src_port = key->src;
7401 switch (filter->ip_proto) {
7406 dev_err(&pf->pdev->dev,
7407 "Only UDP and TCP transport are supported\n");
7411 filter->flags = field_flags;
7416 * i40e_handle_tclass: Forward to a traffic class on the device
7417 * @vsi: Pointer to VSI
7418 * @tc: traffic class index on the device
7419 * @filter: Pointer to cloud filter structure
7422 static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc,
7423 struct i40e_cloud_filter *filter)
7425 struct i40e_channel *ch, *ch_tmp;
7427 /* direct to a traffic class on the same device */
7429 filter->seid = vsi->seid;
7431 } else if (vsi->tc_config.enabled_tc & BIT(tc)) {
7432 if (!filter->dst_port) {
7433 dev_err(&vsi->back->pdev->dev,
7434 "Specify destination port to direct to traffic class that is not default\n");
7437 if (list_empty(&vsi->ch_list))
7439 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list,
7441 if (ch->seid == vsi->tc_seid_map[tc])
7442 filter->seid = ch->seid;
7446 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n");
7451 * i40e_configure_clsflower - Configure tc flower filters
7452 * @vsi: Pointer to VSI
7453 * @cls_flower: Pointer to struct tc_cls_flower_offload
7456 static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7457 struct tc_cls_flower_offload *cls_flower)
7459 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
7460 struct i40e_cloud_filter *filter = NULL;
7461 struct i40e_pf *pf = vsi->back;
7465 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
7469 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
7470 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state))
7473 if (pf->fdir_pf_active_filters ||
7474 (!hlist_empty(&pf->fdir_filter_list))) {
7475 dev_err(&vsi->back->pdev->dev,
7476 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7480 if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) {
7481 dev_err(&vsi->back->pdev->dev,
7482 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7483 vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7484 vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7487 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
7491 filter->cookie = cls_flower->cookie;
7493 err = i40e_parse_cls_flower(vsi, cls_flower, filter);
7497 err = i40e_handle_tclass(vsi, tc, filter);
7501 /* Add cloud filter */
7502 if (filter->dst_port)
7503 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true);
7505 err = i40e_add_del_cloud_filter(vsi, filter, true);
7508 dev_err(&pf->pdev->dev,
7509 "Failed to add cloud filter, err %s\n",
7510 i40e_stat_str(&pf->hw, err));
7514 /* add filter to the ordered list */
7515 INIT_HLIST_NODE(&filter->cloud_node);
7517 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list);
7519 pf->num_cloud_filters++;
7528 * i40e_find_cloud_filter - Find the could filter in the list
7529 * @vsi: Pointer to VSI
7530 * @cookie: filter specific cookie
7533 static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi,
7534 unsigned long *cookie)
7536 struct i40e_cloud_filter *filter = NULL;
7537 struct hlist_node *node2;
7539 hlist_for_each_entry_safe(filter, node2,
7540 &vsi->back->cloud_filter_list, cloud_node)
7541 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
7547 * i40e_delete_clsflower - Remove tc flower filters
7548 * @vsi: Pointer to VSI
7549 * @cls_flower: Pointer to struct tc_cls_flower_offload
7552 static int i40e_delete_clsflower(struct i40e_vsi *vsi,
7553 struct tc_cls_flower_offload *cls_flower)
7555 struct i40e_cloud_filter *filter = NULL;
7556 struct i40e_pf *pf = vsi->back;
7559 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie);
7564 hash_del(&filter->cloud_node);
7566 if (filter->dst_port)
7567 err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false);
7569 err = i40e_add_del_cloud_filter(vsi, filter, false);
7573 dev_err(&pf->pdev->dev,
7574 "Failed to delete cloud filter, err %s\n",
7575 i40e_stat_str(&pf->hw, err));
7576 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
7579 pf->num_cloud_filters--;
7580 if (!pf->num_cloud_filters)
7581 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7582 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7583 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7584 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7585 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7591 * i40e_setup_tc_cls_flower - flower classifier offloads
7592 * @netdev: net device to configure
7593 * @type_data: offload data
7595 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
7596 struct tc_cls_flower_offload *cls_flower)
7598 struct i40e_vsi *vsi = np->vsi;
7600 switch (cls_flower->command) {
7601 case TC_CLSFLOWER_REPLACE:
7602 return i40e_configure_clsflower(vsi, cls_flower);
7603 case TC_CLSFLOWER_DESTROY:
7604 return i40e_delete_clsflower(vsi, cls_flower);
7605 case TC_CLSFLOWER_STATS:
7612 static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
7615 struct i40e_netdev_priv *np = cb_priv;
7617 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data))
7621 case TC_SETUP_CLSFLOWER:
7622 return i40e_setup_tc_cls_flower(np, type_data);
7629 static int i40e_setup_tc_block(struct net_device *dev,
7630 struct tc_block_offload *f)
7632 struct i40e_netdev_priv *np = netdev_priv(dev);
7634 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
7637 switch (f->command) {
7639 return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb,
7641 case TC_BLOCK_UNBIND:
7642 tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np);
7649 static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type,
7653 case TC_SETUP_QDISC_MQPRIO:
7654 return i40e_setup_tc(netdev, type_data);
7655 case TC_SETUP_BLOCK:
7656 return i40e_setup_tc_block(netdev, type_data);
7663 * i40e_open - Called when a network interface is made active
7664 * @netdev: network interface device structure
7666 * The open entry point is called when a network interface is made
7667 * active by the system (IFF_UP). At this point all resources needed
7668 * for transmit and receive operations are allocated, the interrupt
7669 * handler is registered with the OS, the netdev watchdog subtask is
7670 * enabled, and the stack is notified that the interface is ready.
7672 * Returns 0 on success, negative value on failure
7674 int i40e_open(struct net_device *netdev)
7676 struct i40e_netdev_priv *np = netdev_priv(netdev);
7677 struct i40e_vsi *vsi = np->vsi;
7678 struct i40e_pf *pf = vsi->back;
7681 /* disallow open during test or if eeprom is broken */
7682 if (test_bit(__I40E_TESTING, pf->state) ||
7683 test_bit(__I40E_BAD_EEPROM, pf->state))
7686 netif_carrier_off(netdev);
7688 if (i40e_force_link_state(pf, true))
7691 err = i40e_vsi_open(vsi);
7695 /* configure global TSO hardware offload settings */
7696 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
7697 TCP_FLAG_FIN) >> 16);
7698 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
7700 TCP_FLAG_CWR) >> 16);
7701 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
7703 udp_tunnel_get_rx_info(netdev);
7710 * @vsi: the VSI to open
7712 * Finish initialization of the VSI.
7714 * Returns 0 on success, negative value on failure
7716 * Note: expects to be called while under rtnl_lock()
7718 int i40e_vsi_open(struct i40e_vsi *vsi)
7720 struct i40e_pf *pf = vsi->back;
7721 char int_name[I40E_INT_NAME_STR_LEN];
7724 /* allocate descriptors */
7725 err = i40e_vsi_setup_tx_resources(vsi);
7728 err = i40e_vsi_setup_rx_resources(vsi);
7732 err = i40e_vsi_configure(vsi);
7737 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
7738 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
7739 err = i40e_vsi_request_irq(vsi, int_name);
7743 /* Notify the stack of the actual queue counts. */
7744 err = netif_set_real_num_tx_queues(vsi->netdev,
7745 vsi->num_queue_pairs);
7747 goto err_set_queues;
7749 err = netif_set_real_num_rx_queues(vsi->netdev,
7750 vsi->num_queue_pairs);
7752 goto err_set_queues;
7754 } else if (vsi->type == I40E_VSI_FDIR) {
7755 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
7756 dev_driver_string(&pf->pdev->dev),
7757 dev_name(&pf->pdev->dev));
7758 err = i40e_vsi_request_irq(vsi, int_name);
7767 err = i40e_up_complete(vsi);
7769 goto err_up_complete;
7776 i40e_vsi_free_irq(vsi);
7778 i40e_vsi_free_rx_resources(vsi);
7780 i40e_vsi_free_tx_resources(vsi);
7781 if (vsi == pf->vsi[pf->lan_vsi])
7782 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
7788 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
7789 * @pf: Pointer to PF
7791 * This function destroys the hlist where all the Flow Director
7792 * filters were saved.
7794 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
7796 struct i40e_fdir_filter *filter;
7797 struct i40e_flex_pit *pit_entry, *tmp;
7798 struct hlist_node *node2;
7800 hlist_for_each_entry_safe(filter, node2,
7801 &pf->fdir_filter_list, fdir_node) {
7802 hlist_del(&filter->fdir_node);
7806 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
7807 list_del(&pit_entry->list);
7810 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
7812 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
7813 list_del(&pit_entry->list);
7816 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
7818 pf->fdir_pf_active_filters = 0;
7819 pf->fd_tcp4_filter_cnt = 0;
7820 pf->fd_udp4_filter_cnt = 0;
7821 pf->fd_sctp4_filter_cnt = 0;
7822 pf->fd_ip4_filter_cnt = 0;
7824 /* Reprogram the default input set for TCP/IPv4 */
7825 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
7826 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7827 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7829 /* Reprogram the default input set for UDP/IPv4 */
7830 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
7831 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7832 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7834 /* Reprogram the default input set for SCTP/IPv4 */
7835 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
7836 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
7837 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
7839 /* Reprogram the default input set for Other/IPv4 */
7840 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
7841 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7843 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
7844 I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
7848 * i40e_cloud_filter_exit - Cleans up the cloud filters
7849 * @pf: Pointer to PF
7851 * This function destroys the hlist where all the cloud filters
7854 static void i40e_cloud_filter_exit(struct i40e_pf *pf)
7856 struct i40e_cloud_filter *cfilter;
7857 struct hlist_node *node;
7859 hlist_for_each_entry_safe(cfilter, node,
7860 &pf->cloud_filter_list, cloud_node) {
7861 hlist_del(&cfilter->cloud_node);
7864 pf->num_cloud_filters = 0;
7866 if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) &&
7867 !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) {
7868 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7869 pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER;
7870 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
7875 * i40e_close - Disables a network interface
7876 * @netdev: network interface device structure
7878 * The close entry point is called when an interface is de-activated
7879 * by the OS. The hardware is still under the driver's control, but
7880 * this netdev interface is disabled.
7882 * Returns 0, this is not allowed to fail
7884 int i40e_close(struct net_device *netdev)
7886 struct i40e_netdev_priv *np = netdev_priv(netdev);
7887 struct i40e_vsi *vsi = np->vsi;
7889 i40e_vsi_close(vsi);
7895 * i40e_do_reset - Start a PF or Core Reset sequence
7896 * @pf: board private structure
7897 * @reset_flags: which reset is requested
7898 * @lock_acquired: indicates whether or not the lock has been acquired
7899 * before this function was called.
7901 * The essential difference in resets is that the PF Reset
7902 * doesn't clear the packet buffers, doesn't reset the PE
7903 * firmware, and doesn't bother the other PFs on the chip.
7905 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
7909 WARN_ON(in_interrupt());
7912 /* do the biggest reset indicated */
7913 if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
7915 /* Request a Global Reset
7917 * This will start the chip's countdown to the actual full
7918 * chip reset event, and a warning interrupt to be sent
7919 * to all PFs, including the requestor. Our handler
7920 * for the warning interrupt will deal with the shutdown
7921 * and recovery of the switch setup.
7923 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
7924 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7925 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
7926 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7928 } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
7930 /* Request a Core Reset
7932 * Same as Global Reset, except does *not* include the MAC/PHY
7934 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
7935 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
7936 val |= I40E_GLGEN_RTRIG_CORER_MASK;
7937 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
7938 i40e_flush(&pf->hw);
7940 } else if (reset_flags & I40E_PF_RESET_FLAG) {
7942 /* Request a PF Reset
7944 * Resets only the PF-specific registers
7946 * This goes directly to the tear-down and rebuild of
7947 * the switch, since we need to do all the recovery as
7948 * for the Core Reset.
7950 dev_dbg(&pf->pdev->dev, "PFR requested\n");
7951 i40e_handle_reset_warning(pf, lock_acquired);
7953 } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
7954 /* Request a PF Reset
7956 * Resets PF and reinitializes PFs VSI.
7958 i40e_prep_for_reset(pf, lock_acquired);
7959 i40e_reset_and_rebuild(pf, true, lock_acquired);
7961 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
7964 /* Find the VSI(s) that requested a re-init */
7965 dev_info(&pf->pdev->dev,
7966 "VSI reinit requested\n");
7967 for (v = 0; v < pf->num_alloc_vsi; v++) {
7968 struct i40e_vsi *vsi = pf->vsi[v];
7971 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED,
7973 i40e_vsi_reinit_locked(pf->vsi[v]);
7975 } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
7978 /* Find the VSI(s) that needs to be brought down */
7979 dev_info(&pf->pdev->dev, "VSI down requested\n");
7980 for (v = 0; v < pf->num_alloc_vsi; v++) {
7981 struct i40e_vsi *vsi = pf->vsi[v];
7984 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED,
7986 set_bit(__I40E_VSI_DOWN, vsi->state);
7991 dev_info(&pf->pdev->dev,
7992 "bad reset request 0x%08x\n", reset_flags);
7996 #ifdef CONFIG_I40E_DCB
7998 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
7999 * @pf: board private structure
8000 * @old_cfg: current DCB config
8001 * @new_cfg: new DCB config
8003 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
8004 struct i40e_dcbx_config *old_cfg,
8005 struct i40e_dcbx_config *new_cfg)
8007 bool need_reconfig = false;
8009 /* Check if ETS configuration has changed */
8010 if (memcmp(&new_cfg->etscfg,
8012 sizeof(new_cfg->etscfg))) {
8013 /* If Priority Table has changed reconfig is needed */
8014 if (memcmp(&new_cfg->etscfg.prioritytable,
8015 &old_cfg->etscfg.prioritytable,
8016 sizeof(new_cfg->etscfg.prioritytable))) {
8017 need_reconfig = true;
8018 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
8021 if (memcmp(&new_cfg->etscfg.tcbwtable,
8022 &old_cfg->etscfg.tcbwtable,
8023 sizeof(new_cfg->etscfg.tcbwtable)))
8024 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
8026 if (memcmp(&new_cfg->etscfg.tsatable,
8027 &old_cfg->etscfg.tsatable,
8028 sizeof(new_cfg->etscfg.tsatable)))
8029 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
8032 /* Check if PFC configuration has changed */
8033 if (memcmp(&new_cfg->pfc,
8035 sizeof(new_cfg->pfc))) {
8036 need_reconfig = true;
8037 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
8040 /* Check if APP Table has changed */
8041 if (memcmp(&new_cfg->app,
8043 sizeof(new_cfg->app))) {
8044 need_reconfig = true;
8045 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
8048 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
8049 return need_reconfig;
8053 * i40e_handle_lldp_event - Handle LLDP Change MIB event
8054 * @pf: board private structure
8055 * @e: event info posted on ARQ
8057 static int i40e_handle_lldp_event(struct i40e_pf *pf,
8058 struct i40e_arq_event_info *e)
8060 struct i40e_aqc_lldp_get_mib *mib =
8061 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
8062 struct i40e_hw *hw = &pf->hw;
8063 struct i40e_dcbx_config tmp_dcbx_cfg;
8064 bool need_reconfig = false;
8068 /* Not DCB capable or capability disabled */
8069 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
8072 /* Ignore if event is not for Nearest Bridge */
8073 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
8074 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
8075 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
8076 if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
8079 /* Check MIB Type and return if event for Remote MIB update */
8080 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
8081 dev_dbg(&pf->pdev->dev,
8082 "LLDP event mib type %s\n", type ? "remote" : "local");
8083 if (type == I40E_AQ_LLDP_MIB_REMOTE) {
8084 /* Update the remote cached instance and return */
8085 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
8086 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
8087 &hw->remote_dcbx_config);
8091 /* Store the old configuration */
8092 tmp_dcbx_cfg = hw->local_dcbx_config;
8094 /* Reset the old DCBx configuration data */
8095 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
8096 /* Get updated DCBX data from firmware */
8097 ret = i40e_get_dcb_config(&pf->hw);
8099 dev_info(&pf->pdev->dev,
8100 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8101 i40e_stat_str(&pf->hw, ret),
8102 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8106 /* No change detected in DCBX configs */
8107 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
8108 sizeof(tmp_dcbx_cfg))) {
8109 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
8113 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
8114 &hw->local_dcbx_config);
8116 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
8121 /* Enable DCB tagging only when more than one TC */
8122 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
8123 pf->flags |= I40E_FLAG_DCB_ENABLED;
8125 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
8127 set_bit(__I40E_PORT_SUSPENDED, pf->state);
8128 /* Reconfiguration needed quiesce all VSIs */
8129 i40e_pf_quiesce_all_vsi(pf);
8131 /* Changes in configuration update VEB/VSI */
8132 i40e_dcb_reconfigure(pf);
8134 ret = i40e_resume_port_tx(pf);
8136 clear_bit(__I40E_PORT_SUSPENDED, pf->state);
8137 /* In case of error no point in resuming VSIs */
8141 /* Wait for the PF's queues to be disabled */
8142 ret = i40e_pf_wait_queues_disabled(pf);
8144 /* Schedule PF reset to recover */
8145 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8146 i40e_service_event_schedule(pf);
8148 i40e_pf_unquiesce_all_vsi(pf);
8149 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
8150 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
8156 #endif /* CONFIG_I40E_DCB */
8159 * i40e_do_reset_safe - Protected reset path for userland calls.
8160 * @pf: board private structure
8161 * @reset_flags: which reset is requested
8164 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
8167 i40e_do_reset(pf, reset_flags, true);
8172 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8173 * @pf: board private structure
8174 * @e: event info posted on ARQ
8176 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8179 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
8180 struct i40e_arq_event_info *e)
8182 struct i40e_aqc_lan_overflow *data =
8183 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
8184 u32 queue = le32_to_cpu(data->prtdcb_rupto);
8185 u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
8186 struct i40e_hw *hw = &pf->hw;
8190 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8193 /* Queue belongs to VF, find the VF and issue VF reset */
8194 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
8195 >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
8196 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
8197 >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
8198 vf_id -= hw->func_caps.vf_base_id;
8199 vf = &pf->vf[vf_id];
8200 i40e_vc_notify_vf_reset(vf);
8201 /* Allow VF to process pending reset notification */
8203 i40e_reset_vf(vf, false);
8208 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8209 * @pf: board private structure
8211 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
8215 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8216 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
8221 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8222 * @pf: board private structure
8224 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
8228 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
8229 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
8230 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
8231 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
8236 * i40e_get_global_fd_count - Get total FD filters programmed on device
8237 * @pf: board private structure
8239 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
8243 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
8244 fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
8245 ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
8246 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
8251 * i40e_reenable_fdir_sb - Restore FDir SB capability
8252 * @pf: board private structure
8254 static void i40e_reenable_fdir_sb(struct i40e_pf *pf)
8256 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state))
8257 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
8258 (I40E_DEBUG_FD & pf->hw.debug_mask))
8259 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8263 * i40e_reenable_fdir_atr - Restore FDir ATR capability
8264 * @pf: board private structure
8266 static void i40e_reenable_fdir_atr(struct i40e_pf *pf)
8268 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) {
8269 /* ATR uses the same filtering logic as SB rules. It only
8270 * functions properly if the input set mask is at the default
8271 * settings. It is safe to restore the default input set
8272 * because there are no active TCPv4 filter rules.
8274 i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
8275 I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
8276 I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
8278 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8279 (I40E_DEBUG_FD & pf->hw.debug_mask))
8280 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8285 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
8286 * @pf: board private structure
8287 * @filter: FDir filter to remove
8289 static void i40e_delete_invalid_filter(struct i40e_pf *pf,
8290 struct i40e_fdir_filter *filter)
8292 /* Update counters */
8293 pf->fdir_pf_active_filters--;
8296 switch (filter->flow_type) {
8298 pf->fd_tcp4_filter_cnt--;
8301 pf->fd_udp4_filter_cnt--;
8304 pf->fd_sctp4_filter_cnt--;
8307 switch (filter->ip4_proto) {
8309 pf->fd_tcp4_filter_cnt--;
8312 pf->fd_udp4_filter_cnt--;
8315 pf->fd_sctp4_filter_cnt--;
8318 pf->fd_ip4_filter_cnt--;
8324 /* Remove the filter from the list and free memory */
8325 hlist_del(&filter->fdir_node);
8330 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8331 * @pf: board private structure
8333 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
8335 struct i40e_fdir_filter *filter;
8336 u32 fcnt_prog, fcnt_avail;
8337 struct hlist_node *node;
8339 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8342 /* Check if we have enough room to re-enable FDir SB capability. */
8343 fcnt_prog = i40e_get_global_fd_count(pf);
8344 fcnt_avail = pf->fdir_pf_filter_count;
8345 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
8346 (pf->fd_add_err == 0) ||
8347 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt))
8348 i40e_reenable_fdir_sb(pf);
8350 /* We should wait for even more space before re-enabling ATR.
8351 * Additionally, we cannot enable ATR as long as we still have TCP SB
8354 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
8355 (pf->fd_tcp4_filter_cnt == 0))
8356 i40e_reenable_fdir_atr(pf);
8358 /* if hw had a problem adding a filter, delete it */
8359 if (pf->fd_inv > 0) {
8360 hlist_for_each_entry_safe(filter, node,
8361 &pf->fdir_filter_list, fdir_node)
8362 if (filter->fd_id == pf->fd_inv)
8363 i40e_delete_invalid_filter(pf, filter);
8367 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8368 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8370 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8371 * @pf: board private structure
8373 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
8375 unsigned long min_flush_time;
8376 int flush_wait_retry = 50;
8377 bool disable_atr = false;
8381 if (!time_after(jiffies, pf->fd_flush_timestamp +
8382 (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
8385 /* If the flush is happening too quick and we have mostly SB rules we
8386 * should not re-enable ATR for some time.
8388 min_flush_time = pf->fd_flush_timestamp +
8389 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
8390 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
8392 if (!(time_after(jiffies, min_flush_time)) &&
8393 (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
8394 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8395 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
8399 pf->fd_flush_timestamp = jiffies;
8400 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8401 /* flush all filters */
8402 wr32(&pf->hw, I40E_PFQF_CTL_1,
8403 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
8404 i40e_flush(&pf->hw);
8408 /* Check FD flush status every 5-6msec */
8409 usleep_range(5000, 6000);
8410 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
8411 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
8413 } while (flush_wait_retry--);
8414 if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
8415 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
8417 /* replay sideband filters */
8418 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
8419 if (!disable_atr && !pf->fd_tcp4_filter_cnt)
8420 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
8421 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
8422 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8423 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
8428 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8429 * @pf: board private structure
8431 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
8433 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
8436 /* We can see up to 256 filter programming desc in transit if the filters are
8437 * being applied really fast; before we see the first
8438 * filter miss error on Rx queue 0. Accumulating enough error messages before
8439 * reacting will make sure we don't cause flush too often.
8441 #define I40E_MAX_FD_PROGRAM_ERROR 256
8444 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8445 * @pf: board private structure
8447 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
8450 /* if interface is down do nothing */
8451 if (test_bit(__I40E_DOWN, pf->state))
8454 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
8455 i40e_fdir_flush_and_replay(pf);
8457 i40e_fdir_check_and_reenable(pf);
8462 * i40e_vsi_link_event - notify VSI of a link event
8463 * @vsi: vsi to be notified
8464 * @link_up: link up or down
8466 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
8468 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state))
8471 switch (vsi->type) {
8473 if (!vsi->netdev || !vsi->netdev_registered)
8477 netif_carrier_on(vsi->netdev);
8478 netif_tx_wake_all_queues(vsi->netdev);
8480 netif_carrier_off(vsi->netdev);
8481 netif_tx_stop_all_queues(vsi->netdev);
8485 case I40E_VSI_SRIOV:
8486 case I40E_VSI_VMDQ2:
8488 case I40E_VSI_IWARP:
8489 case I40E_VSI_MIRROR:
8491 /* there is no notification for other VSIs */
8497 * i40e_veb_link_event - notify elements on the veb of a link event
8498 * @veb: veb to be notified
8499 * @link_up: link up or down
8501 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
8506 if (!veb || !veb->pf)
8510 /* depth first... */
8511 for (i = 0; i < I40E_MAX_VEB; i++)
8512 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
8513 i40e_veb_link_event(pf->veb[i], link_up);
8515 /* ... now the local VSIs */
8516 for (i = 0; i < pf->num_alloc_vsi; i++)
8517 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
8518 i40e_vsi_link_event(pf->vsi[i], link_up);
8522 * i40e_link_event - Update netif_carrier status
8523 * @pf: board private structure
8525 static void i40e_link_event(struct i40e_pf *pf)
8527 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8528 u8 new_link_speed, old_link_speed;
8530 bool new_link, old_link;
8532 /* save off old link status information */
8533 pf->hw.phy.link_info_old = pf->hw.phy.link_info;
8535 /* set this to force the get_link_status call to refresh state */
8536 pf->hw.phy.get_link_info = true;
8538 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
8540 status = i40e_get_link_status(&pf->hw, &new_link);
8542 /* On success, disable temp link polling */
8543 if (status == I40E_SUCCESS) {
8544 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state);
8546 /* Enable link polling temporarily until i40e_get_link_status
8547 * returns I40E_SUCCESS
8549 set_bit(__I40E_TEMP_LINK_POLLING, pf->state);
8550 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
8555 old_link_speed = pf->hw.phy.link_info_old.link_speed;
8556 new_link_speed = pf->hw.phy.link_info.link_speed;
8558 if (new_link == old_link &&
8559 new_link_speed == old_link_speed &&
8560 (test_bit(__I40E_VSI_DOWN, vsi->state) ||
8561 new_link == netif_carrier_ok(vsi->netdev)))
8564 i40e_print_link_message(vsi, new_link);
8566 /* Notify the base of the switch tree connected to
8567 * the link. Floating VEBs are not notified.
8569 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
8570 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
8572 i40e_vsi_link_event(vsi, new_link);
8575 i40e_vc_notify_link_state(pf);
8577 if (pf->flags & I40E_FLAG_PTP)
8578 i40e_ptp_set_increment(pf);
8582 * i40e_watchdog_subtask - periodic checks not using event driven response
8583 * @pf: board private structure
8585 static void i40e_watchdog_subtask(struct i40e_pf *pf)
8589 /* if interface is down do nothing */
8590 if (test_bit(__I40E_DOWN, pf->state) ||
8591 test_bit(__I40E_CONFIG_BUSY, pf->state))
8594 /* make sure we don't do these things too often */
8595 if (time_before(jiffies, (pf->service_timer_previous +
8596 pf->service_timer_period)))
8598 pf->service_timer_previous = jiffies;
8600 if ((pf->flags & I40E_FLAG_LINK_POLLING_ENABLED) ||
8601 test_bit(__I40E_TEMP_LINK_POLLING, pf->state))
8602 i40e_link_event(pf);
8604 /* Update the stats for active netdevs so the network stack
8605 * can look at updated numbers whenever it cares to
8607 for (i = 0; i < pf->num_alloc_vsi; i++)
8608 if (pf->vsi[i] && pf->vsi[i]->netdev)
8609 i40e_update_stats(pf->vsi[i]);
8611 if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
8612 /* Update the stats for the active switching components */
8613 for (i = 0; i < I40E_MAX_VEB; i++)
8615 i40e_update_veb_stats(pf->veb[i]);
8618 i40e_ptp_rx_hang(pf);
8619 i40e_ptp_tx_hang(pf);
8623 * i40e_reset_subtask - Set up for resetting the device and driver
8624 * @pf: board private structure
8626 static void i40e_reset_subtask(struct i40e_pf *pf)
8628 u32 reset_flags = 0;
8630 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) {
8631 reset_flags |= BIT(__I40E_REINIT_REQUESTED);
8632 clear_bit(__I40E_REINIT_REQUESTED, pf->state);
8634 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) {
8635 reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
8636 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state);
8638 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) {
8639 reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
8640 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state);
8642 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) {
8643 reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
8644 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state);
8646 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) {
8647 reset_flags |= BIT(__I40E_DOWN_REQUESTED);
8648 clear_bit(__I40E_DOWN_REQUESTED, pf->state);
8651 /* If there's a recovery already waiting, it takes
8652 * precedence before starting a new reset sequence.
8654 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
8655 i40e_prep_for_reset(pf, false);
8657 i40e_rebuild(pf, false, false);
8660 /* If we're already down or resetting, just bail */
8662 !test_bit(__I40E_DOWN, pf->state) &&
8663 !test_bit(__I40E_CONFIG_BUSY, pf->state)) {
8664 i40e_do_reset(pf, reset_flags, false);
8669 * i40e_handle_link_event - Handle link event
8670 * @pf: board private structure
8671 * @e: event info posted on ARQ
8673 static void i40e_handle_link_event(struct i40e_pf *pf,
8674 struct i40e_arq_event_info *e)
8676 struct i40e_aqc_get_link_status *status =
8677 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
8679 /* Do a new status request to re-enable LSE reporting
8680 * and load new status information into the hw struct
8681 * This completely ignores any state information
8682 * in the ARQ event info, instead choosing to always
8683 * issue the AQ update link status command.
8685 i40e_link_event(pf);
8687 /* Check if module meets thermal requirements */
8688 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
8689 dev_err(&pf->pdev->dev,
8690 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8691 dev_err(&pf->pdev->dev,
8692 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8694 /* check for unqualified module, if link is down, suppress
8695 * the message if link was forced to be down.
8697 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
8698 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
8699 (!(status->link_info & I40E_AQ_LINK_UP)) &&
8700 (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
8701 dev_err(&pf->pdev->dev,
8702 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8703 dev_err(&pf->pdev->dev,
8704 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8710 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8711 * @pf: board private structure
8713 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
8715 struct i40e_arq_event_info event;
8716 struct i40e_hw *hw = &pf->hw;
8723 /* Do not run clean AQ when PF reset fails */
8724 if (test_bit(__I40E_RESET_FAILED, pf->state))
8727 /* check for error indications */
8728 val = rd32(&pf->hw, pf->hw.aq.arq.len);
8730 if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
8731 if (hw->debug_mask & I40E_DEBUG_AQ)
8732 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
8733 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
8735 if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
8736 if (hw->debug_mask & I40E_DEBUG_AQ)
8737 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
8738 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
8739 pf->arq_overflows++;
8741 if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
8742 if (hw->debug_mask & I40E_DEBUG_AQ)
8743 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
8744 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
8747 wr32(&pf->hw, pf->hw.aq.arq.len, val);
8749 val = rd32(&pf->hw, pf->hw.aq.asq.len);
8751 if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
8752 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8753 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
8754 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
8756 if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
8757 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8758 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
8759 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
8761 if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
8762 if (pf->hw.debug_mask & I40E_DEBUG_AQ)
8763 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
8764 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
8767 wr32(&pf->hw, pf->hw.aq.asq.len, val);
8769 event.buf_len = I40E_MAX_AQ_BUF_SIZE;
8770 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
8775 ret = i40e_clean_arq_element(hw, &event, &pending);
8776 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
8779 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
8783 opcode = le16_to_cpu(event.desc.opcode);
8786 case i40e_aqc_opc_get_link_status:
8787 i40e_handle_link_event(pf, &event);
8789 case i40e_aqc_opc_send_msg_to_pf:
8790 ret = i40e_vc_process_vf_msg(pf,
8791 le16_to_cpu(event.desc.retval),
8792 le32_to_cpu(event.desc.cookie_high),
8793 le32_to_cpu(event.desc.cookie_low),
8797 case i40e_aqc_opc_lldp_update_mib:
8798 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
8799 #ifdef CONFIG_I40E_DCB
8801 ret = i40e_handle_lldp_event(pf, &event);
8803 #endif /* CONFIG_I40E_DCB */
8805 case i40e_aqc_opc_event_lan_overflow:
8806 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
8807 i40e_handle_lan_overflow_event(pf, &event);
8809 case i40e_aqc_opc_send_msg_to_peer:
8810 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
8812 case i40e_aqc_opc_nvm_erase:
8813 case i40e_aqc_opc_nvm_update:
8814 case i40e_aqc_opc_oem_post_update:
8815 i40e_debug(&pf->hw, I40E_DEBUG_NVM,
8816 "ARQ NVM operation 0x%04x completed\n",
8820 dev_info(&pf->pdev->dev,
8821 "ARQ: Unknown event 0x%04x ignored\n",
8825 } while (i++ < pf->adminq_work_limit);
8827 if (i < pf->adminq_work_limit)
8828 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state);
8830 /* re-enable Admin queue interrupt cause */
8831 val = rd32(hw, I40E_PFINT_ICR0_ENA);
8832 val |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
8833 wr32(hw, I40E_PFINT_ICR0_ENA, val);
8836 kfree(event.msg_buf);
8840 * i40e_verify_eeprom - make sure eeprom is good to use
8841 * @pf: board private structure
8843 static void i40e_verify_eeprom(struct i40e_pf *pf)
8847 err = i40e_diag_eeprom_test(&pf->hw);
8849 /* retry in case of garbage read */
8850 err = i40e_diag_eeprom_test(&pf->hw);
8852 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8854 set_bit(__I40E_BAD_EEPROM, pf->state);
8858 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) {
8859 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
8860 clear_bit(__I40E_BAD_EEPROM, pf->state);
8865 * i40e_enable_pf_switch_lb
8866 * @pf: pointer to the PF structure
8868 * enable switch loop back or die - no point in a return value
8870 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
8872 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8873 struct i40e_vsi_context ctxt;
8876 ctxt.seid = pf->main_vsi_seid;
8877 ctxt.pf_num = pf->hw.pf_id;
8879 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8881 dev_info(&pf->pdev->dev,
8882 "couldn't get PF vsi config, err %s aq_err %s\n",
8883 i40e_stat_str(&pf->hw, ret),
8884 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8887 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8888 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8889 ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8891 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8893 dev_info(&pf->pdev->dev,
8894 "update vsi switch failed, err %s aq_err %s\n",
8895 i40e_stat_str(&pf->hw, ret),
8896 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8901 * i40e_disable_pf_switch_lb
8902 * @pf: pointer to the PF structure
8904 * disable switch loop back or die - no point in a return value
8906 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
8908 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
8909 struct i40e_vsi_context ctxt;
8912 ctxt.seid = pf->main_vsi_seid;
8913 ctxt.pf_num = pf->hw.pf_id;
8915 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8917 dev_info(&pf->pdev->dev,
8918 "couldn't get PF vsi config, err %s aq_err %s\n",
8919 i40e_stat_str(&pf->hw, ret),
8920 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8923 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8924 ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8925 ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8927 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
8929 dev_info(&pf->pdev->dev,
8930 "update vsi switch failed, err %s aq_err %s\n",
8931 i40e_stat_str(&pf->hw, ret),
8932 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8937 * i40e_config_bridge_mode - Configure the HW bridge mode
8938 * @veb: pointer to the bridge instance
8940 * Configure the loop back mode for the LAN VSI that is downlink to the
8941 * specified HW bridge instance. It is expected this function is called
8942 * when a new HW bridge is instantiated.
8944 static void i40e_config_bridge_mode(struct i40e_veb *veb)
8946 struct i40e_pf *pf = veb->pf;
8948 if (pf->hw.debug_mask & I40E_DEBUG_LAN)
8949 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
8950 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
8951 if (veb->bridge_mode & BRIDGE_MODE_VEPA)
8952 i40e_disable_pf_switch_lb(pf);
8954 i40e_enable_pf_switch_lb(pf);
8958 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
8959 * @veb: pointer to the VEB instance
8961 * This is a recursive function that first builds the attached VSIs then
8962 * recurses in to build the next layer of VEB. We track the connections
8963 * through our own index numbers because the seid's from the HW could
8964 * change across the reset.
8966 static int i40e_reconstitute_veb(struct i40e_veb *veb)
8968 struct i40e_vsi *ctl_vsi = NULL;
8969 struct i40e_pf *pf = veb->pf;
8973 /* build VSI that owns this VEB, temporarily attached to base VEB */
8974 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
8976 pf->vsi[v]->veb_idx == veb->idx &&
8977 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
8978 ctl_vsi = pf->vsi[v];
8983 dev_info(&pf->pdev->dev,
8984 "missing owner VSI for veb_idx %d\n", veb->idx);
8986 goto end_reconstitute;
8988 if (ctl_vsi != pf->vsi[pf->lan_vsi])
8989 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
8990 ret = i40e_add_vsi(ctl_vsi);
8992 dev_info(&pf->pdev->dev,
8993 "rebuild of veb_idx %d owner VSI failed: %d\n",
8995 goto end_reconstitute;
8997 i40e_vsi_reset_stats(ctl_vsi);
8999 /* create the VEB in the switch and move the VSI onto the VEB */
9000 ret = i40e_add_veb(veb, ctl_vsi);
9002 goto end_reconstitute;
9004 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
9005 veb->bridge_mode = BRIDGE_MODE_VEB;
9007 veb->bridge_mode = BRIDGE_MODE_VEPA;
9008 i40e_config_bridge_mode(veb);
9010 /* create the remaining VSIs attached to this VEB */
9011 for (v = 0; v < pf->num_alloc_vsi; v++) {
9012 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
9015 if (pf->vsi[v]->veb_idx == veb->idx) {
9016 struct i40e_vsi *vsi = pf->vsi[v];
9018 vsi->uplink_seid = veb->seid;
9019 ret = i40e_add_vsi(vsi);
9021 dev_info(&pf->pdev->dev,
9022 "rebuild of vsi_idx %d failed: %d\n",
9024 goto end_reconstitute;
9026 i40e_vsi_reset_stats(vsi);
9030 /* create any VEBs attached to this VEB - RECURSION */
9031 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9032 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
9033 pf->veb[veb_idx]->uplink_seid = veb->seid;
9034 ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
9045 * i40e_get_capabilities - get info about the HW
9046 * @pf: the PF struct
9048 static int i40e_get_capabilities(struct i40e_pf *pf,
9049 enum i40e_admin_queue_opc list_type)
9051 struct i40e_aqc_list_capabilities_element_resp *cap_buf;
9056 buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
9058 cap_buf = kzalloc(buf_len, GFP_KERNEL);
9062 /* this loads the data into the hw struct for us */
9063 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
9064 &data_size, list_type,
9066 /* data loaded, buffer no longer needed */
9069 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
9070 /* retry with a larger buffer */
9071 buf_len = data_size;
9072 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
9073 dev_info(&pf->pdev->dev,
9074 "capability discovery failed, err %s aq_err %s\n",
9075 i40e_stat_str(&pf->hw, err),
9076 i40e_aq_str(&pf->hw,
9077 pf->hw.aq.asq_last_status));
9082 if (pf->hw.debug_mask & I40E_DEBUG_USER) {
9083 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9084 dev_info(&pf->pdev->dev,
9085 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9086 pf->hw.pf_id, pf->hw.func_caps.num_vfs,
9087 pf->hw.func_caps.num_msix_vectors,
9088 pf->hw.func_caps.num_msix_vectors_vf,
9089 pf->hw.func_caps.fd_filters_guaranteed,
9090 pf->hw.func_caps.fd_filters_best_effort,
9091 pf->hw.func_caps.num_tx_qp,
9092 pf->hw.func_caps.num_vsis);
9093 } else if (list_type == i40e_aqc_opc_list_dev_capabilities) {
9094 dev_info(&pf->pdev->dev,
9095 "switch_mode=0x%04x, function_valid=0x%08x\n",
9096 pf->hw.dev_caps.switch_mode,
9097 pf->hw.dev_caps.valid_functions);
9098 dev_info(&pf->pdev->dev,
9099 "SR-IOV=%d, num_vfs for all function=%u\n",
9100 pf->hw.dev_caps.sr_iov_1_1,
9101 pf->hw.dev_caps.num_vfs);
9102 dev_info(&pf->pdev->dev,
9103 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9104 pf->hw.dev_caps.num_vsis,
9105 pf->hw.dev_caps.num_rx_qp,
9106 pf->hw.dev_caps.num_tx_qp);
9109 if (list_type == i40e_aqc_opc_list_func_capabilities) {
9110 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9111 + pf->hw.func_caps.num_vfs)
9112 if (pf->hw.revision_id == 0 &&
9113 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) {
9114 dev_info(&pf->pdev->dev,
9115 "got num_vsis %d, setting num_vsis to %d\n",
9116 pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
9117 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
9123 static int i40e_vsi_clear(struct i40e_vsi *vsi);
9126 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
9127 * @pf: board private structure
9129 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
9131 struct i40e_vsi *vsi;
9133 /* quick workaround for an NVM issue that leaves a critical register
9136 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
9137 static const u32 hkey[] = {
9138 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9139 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9140 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9144 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
9145 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
9148 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
9151 /* find existing VSI and see if it needs configuring */
9152 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9154 /* create a new VSI if none exists */
9156 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
9157 pf->vsi[pf->lan_vsi]->seid, 0);
9159 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
9160 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9161 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
9166 i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
9170 * i40e_fdir_teardown - release the Flow Director resources
9171 * @pf: board private structure
9173 static void i40e_fdir_teardown(struct i40e_pf *pf)
9175 struct i40e_vsi *vsi;
9177 i40e_fdir_filter_exit(pf);
9178 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
9180 i40e_vsi_release(vsi);
9184 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9186 * @seid: seid of main or channel VSIs
9188 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9189 * existed before reset
9191 static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
9193 struct i40e_cloud_filter *cfilter;
9194 struct i40e_pf *pf = vsi->back;
9195 struct hlist_node *node;
9198 /* Add cloud filters back if they exist */
9199 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
9201 if (cfilter->seid != seid)
9204 if (cfilter->dst_port)
9205 ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
9208 ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
9211 dev_dbg(&pf->pdev->dev,
9212 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9213 i40e_stat_str(&pf->hw, ret),
9214 i40e_aq_str(&pf->hw,
9215 pf->hw.aq.asq_last_status));
9223 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9226 * Rebuilds channel VSIs if they existed before reset
9228 static int i40e_rebuild_channels(struct i40e_vsi *vsi)
9230 struct i40e_channel *ch, *ch_tmp;
9233 if (list_empty(&vsi->ch_list))
9236 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
9237 if (!ch->initialized)
9239 /* Proceed with creation of channel (VMDq2) VSI */
9240 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch);
9242 dev_info(&vsi->back->pdev->dev,
9243 "failed to rebuild channels using uplink_seid %u\n",
9247 /* Reconfigure TX queues using QTX_CTL register */
9248 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch);
9250 dev_info(&vsi->back->pdev->dev,
9251 "failed to configure TX rings for channel %u\n",
9255 /* update 'next_base_queue' */
9256 vsi->next_base_queue = vsi->next_base_queue +
9257 ch->num_queue_pairs;
9258 if (ch->max_tx_rate) {
9259 u64 credits = ch->max_tx_rate;
9261 if (i40e_set_bw_limit(vsi, ch->seid,
9265 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9266 dev_dbg(&vsi->back->pdev->dev,
9267 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9272 ret = i40e_rebuild_cloud_filters(vsi, ch->seid);
9274 dev_dbg(&vsi->back->pdev->dev,
9275 "Failed to rebuild cloud filters for channel VSI %u\n",
9284 * i40e_prep_for_reset - prep for the core to reset
9285 * @pf: board private structure
9286 * @lock_acquired: indicates whether or not the lock has been acquired
9287 * before this function was called.
9289 * Close up the VFs and other things in prep for PF Reset.
9291 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
9293 struct i40e_hw *hw = &pf->hw;
9294 i40e_status ret = 0;
9297 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
9298 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9300 if (i40e_check_asq_alive(&pf->hw))
9301 i40e_vc_notify_reset(pf);
9303 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
9305 /* quiesce the VSIs and their queues that are not already DOWN */
9306 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9309 i40e_pf_quiesce_all_vsi(pf);
9313 for (v = 0; v < pf->num_alloc_vsi; v++) {
9315 pf->vsi[v]->seid = 0;
9318 i40e_shutdown_adminq(&pf->hw);
9320 /* call shutdown HMC */
9321 if (hw->hmc.hmc_obj) {
9322 ret = i40e_shutdown_lan_hmc(hw);
9324 dev_warn(&pf->pdev->dev,
9325 "shutdown_lan_hmc failed: %d\n", ret);
9330 * i40e_send_version - update firmware with driver version
9333 static void i40e_send_version(struct i40e_pf *pf)
9335 struct i40e_driver_version dv;
9337 dv.major_version = DRV_VERSION_MAJOR;
9338 dv.minor_version = DRV_VERSION_MINOR;
9339 dv.build_version = DRV_VERSION_BUILD;
9340 dv.subbuild_version = 0;
9341 strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
9342 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
9346 * i40e_get_oem_version - get OEM specific version information
9347 * @hw: pointer to the hardware structure
9349 static void i40e_get_oem_version(struct i40e_hw *hw)
9351 u16 block_offset = 0xffff;
9352 u16 block_length = 0;
9353 u16 capabilities = 0;
9357 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9358 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9359 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9360 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9361 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9362 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9363 #define I40E_NVM_OEM_LENGTH 3
9365 /* Check if pointer to OEM version block is valid. */
9366 i40e_read_nvm_word(hw, I40E_SR_NVM_OEM_VERSION_PTR, &block_offset);
9367 if (block_offset == 0xffff)
9370 /* Check if OEM version block has correct length. */
9371 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_LENGTH_OFFSET,
9373 if (block_length < I40E_NVM_OEM_LENGTH)
9376 /* Check if OEM version format is as expected. */
9377 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_CAPABILITIES_OFFSET,
9379 if ((capabilities & I40E_NVM_OEM_CAPABILITIES_MASK) != 0)
9382 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_GEN_OFFSET,
9384 i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
9386 hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
9387 hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
9391 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9392 * @pf: board private structure
9394 static int i40e_reset(struct i40e_pf *pf)
9396 struct i40e_hw *hw = &pf->hw;
9399 ret = i40e_pf_reset(hw);
9401 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
9402 set_bit(__I40E_RESET_FAILED, pf->state);
9403 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9411 * i40e_rebuild - rebuild using a saved config
9412 * @pf: board private structure
9413 * @reinit: if the Main VSI needs to re-initialized.
9414 * @lock_acquired: indicates whether or not the lock has been acquired
9415 * before this function was called.
9417 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
9419 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
9420 struct i40e_hw *hw = &pf->hw;
9425 if (test_bit(__I40E_DOWN, pf->state))
9426 goto clear_recovery;
9427 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
9429 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9430 ret = i40e_init_adminq(&pf->hw);
9432 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
9433 i40e_stat_str(&pf->hw, ret),
9434 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9435 goto clear_recovery;
9437 i40e_get_oem_version(&pf->hw);
9439 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
9440 ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
9441 hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
9442 /* The following delay is necessary for 4.33 firmware and older
9443 * to recover after EMP reset. 200 ms should suffice but we
9444 * put here 300 ms to be sure that FW is ready to operate
9450 /* re-verify the eeprom if we just had an EMP reset */
9451 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
9452 i40e_verify_eeprom(pf);
9454 i40e_clear_pxe_mode(hw);
9455 ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
9457 goto end_core_reset;
9459 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
9460 hw->func_caps.num_rx_qp, 0, 0);
9462 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
9463 goto end_core_reset;
9465 ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
9467 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
9468 goto end_core_reset;
9471 /* Enable FW to write a default DCB config on link-up */
9472 i40e_aq_set_dcb_parameters(hw, true, NULL);
9474 #ifdef CONFIG_I40E_DCB
9475 ret = i40e_init_pf_dcb(pf);
9477 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
9478 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9479 /* Continue without DCB enabled */
9481 #endif /* CONFIG_I40E_DCB */
9482 /* do basic switch setup */
9485 ret = i40e_setup_pf_switch(pf, reinit);
9489 /* The driver only wants link up/down and module qualification
9490 * reports from firmware. Note the negative logic.
9492 ret = i40e_aq_set_phy_int_mask(&pf->hw,
9493 ~(I40E_AQ_EVENT_LINK_UPDOWN |
9494 I40E_AQ_EVENT_MEDIA_NA |
9495 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
9497 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
9498 i40e_stat_str(&pf->hw, ret),
9499 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9501 /* Rebuild the VSIs and VEBs that existed before reset.
9502 * They are still in our local switch element arrays, so only
9503 * need to rebuild the switch model in the HW.
9505 * If there were VEBs but the reconstitution failed, we'll try
9506 * try to recover minimal use by getting the basic PF VSI working.
9508 if (vsi->uplink_seid != pf->mac_seid) {
9509 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
9510 /* find the one VEB connected to the MAC, and find orphans */
9511 for (v = 0; v < I40E_MAX_VEB; v++) {
9515 if (pf->veb[v]->uplink_seid == pf->mac_seid ||
9516 pf->veb[v]->uplink_seid == 0) {
9517 ret = i40e_reconstitute_veb(pf->veb[v]);
9522 /* If Main VEB failed, we're in deep doodoo,
9523 * so give up rebuilding the switch and set up
9524 * for minimal rebuild of PF VSI.
9525 * If orphan failed, we'll report the error
9526 * but try to keep going.
9528 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
9529 dev_info(&pf->pdev->dev,
9530 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9532 vsi->uplink_seid = pf->mac_seid;
9534 } else if (pf->veb[v]->uplink_seid == 0) {
9535 dev_info(&pf->pdev->dev,
9536 "rebuild of orphan VEB failed: %d\n",
9543 if (vsi->uplink_seid == pf->mac_seid) {
9544 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
9545 /* no VEB, so rebuild only the Main VSI */
9546 ret = i40e_add_vsi(vsi);
9548 dev_info(&pf->pdev->dev,
9549 "rebuild of Main VSI failed: %d\n", ret);
9554 if (vsi->mqprio_qopt.max_rate[0]) {
9555 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
9558 do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR);
9559 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate);
9563 credits = max_tx_rate;
9564 do_div(credits, I40E_BW_CREDIT_DIVISOR);
9565 dev_dbg(&vsi->back->pdev->dev,
9566 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9572 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid);
9576 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9577 * for this main VSI if they exist
9579 ret = i40e_rebuild_channels(vsi);
9583 /* Reconfigure hardware for allowing smaller MSS in the case
9584 * of TSO, so that we avoid the MDD being fired and causing
9585 * a reset in the case of small MSS+TSO.
9587 #define I40E_REG_MSS 0x000E64DC
9588 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
9589 #define I40E_64BYTE_MSS 0x400000
9590 val = rd32(hw, I40E_REG_MSS);
9591 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
9592 val &= ~I40E_REG_MSS_MIN_MASK;
9593 val |= I40E_64BYTE_MSS;
9594 wr32(hw, I40E_REG_MSS, val);
9597 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
9599 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
9601 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
9602 i40e_stat_str(&pf->hw, ret),
9603 i40e_aq_str(&pf->hw,
9604 pf->hw.aq.asq_last_status));
9606 /* reinit the misc interrupt */
9607 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9608 ret = i40e_setup_misc_vector(pf);
9610 /* Add a filter to drop all Flow control frames from any VSI from being
9611 * transmitted. By doing so we stop a malicious VF from sending out
9612 * PAUSE or PFC frames and potentially controlling traffic for other
9614 * The FW can still send Flow control frames if enabled.
9616 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
9619 /* restart the VSIs that were rebuilt and running before the reset */
9620 i40e_pf_unquiesce_all_vsi(pf);
9622 /* Release the RTNL lock before we start resetting VFs */
9626 /* Restore promiscuous settings */
9627 ret = i40e_set_promiscuous(pf, pf->cur_promisc);
9629 dev_warn(&pf->pdev->dev,
9630 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9631 pf->cur_promisc ? "on" : "off",
9632 i40e_stat_str(&pf->hw, ret),
9633 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9635 i40e_reset_all_vfs(pf, true);
9637 /* tell the firmware that we're starting */
9638 i40e_send_version(pf);
9640 /* We've already released the lock, so don't do it again */
9641 goto end_core_reset;
9647 clear_bit(__I40E_RESET_FAILED, pf->state);
9649 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
9650 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
9654 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9655 * @pf: board private structure
9656 * @reinit: if the Main VSI needs to re-initialized.
9657 * @lock_acquired: indicates whether or not the lock has been acquired
9658 * before this function was called.
9660 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
9664 /* Now we wait for GRST to settle out.
9665 * We don't have to delete the VEBs or VSIs from the hw switch
9666 * because the reset will make them disappear.
9668 ret = i40e_reset(pf);
9670 i40e_rebuild(pf, reinit, lock_acquired);
9674 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
9675 * @pf: board private structure
9677 * Close up the VFs and other things in prep for a Core Reset,
9678 * then get ready to rebuild the world.
9679 * @lock_acquired: indicates whether or not the lock has been acquired
9680 * before this function was called.
9682 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
9684 i40e_prep_for_reset(pf, lock_acquired);
9685 i40e_reset_and_rebuild(pf, false, lock_acquired);
9689 * i40e_handle_mdd_event
9690 * @pf: pointer to the PF structure
9692 * Called from the MDD irq handler to identify possibly malicious vfs
9694 static void i40e_handle_mdd_event(struct i40e_pf *pf)
9696 struct i40e_hw *hw = &pf->hw;
9697 bool mdd_detected = false;
9698 bool pf_mdd_detected = false;
9703 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state))
9706 /* find what triggered the MDD event */
9707 reg = rd32(hw, I40E_GL_MDET_TX);
9708 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
9709 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
9710 I40E_GL_MDET_TX_PF_NUM_SHIFT;
9711 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
9712 I40E_GL_MDET_TX_VF_NUM_SHIFT;
9713 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
9714 I40E_GL_MDET_TX_EVENT_SHIFT;
9715 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
9716 I40E_GL_MDET_TX_QUEUE_SHIFT) -
9717 pf->hw.func_caps.base_queue;
9718 if (netif_msg_tx_err(pf))
9719 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9720 event, queue, pf_num, vf_num);
9721 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
9722 mdd_detected = true;
9724 reg = rd32(hw, I40E_GL_MDET_RX);
9725 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
9726 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
9727 I40E_GL_MDET_RX_FUNCTION_SHIFT;
9728 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
9729 I40E_GL_MDET_RX_EVENT_SHIFT;
9730 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
9731 I40E_GL_MDET_RX_QUEUE_SHIFT) -
9732 pf->hw.func_caps.base_queue;
9733 if (netif_msg_rx_err(pf))
9734 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9735 event, queue, func);
9736 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
9737 mdd_detected = true;
9741 reg = rd32(hw, I40E_PF_MDET_TX);
9742 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
9743 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
9744 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
9745 pf_mdd_detected = true;
9747 reg = rd32(hw, I40E_PF_MDET_RX);
9748 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
9749 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
9750 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
9751 pf_mdd_detected = true;
9753 /* Queue belongs to the PF, initiate a reset */
9754 if (pf_mdd_detected) {
9755 set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
9756 i40e_service_event_schedule(pf);
9760 /* see if one of the VFs needs its hand slapped */
9761 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
9763 reg = rd32(hw, I40E_VP_MDET_TX(i));
9764 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
9765 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
9766 vf->num_mdd_events++;
9767 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
9771 reg = rd32(hw, I40E_VP_MDET_RX(i));
9772 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
9773 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
9774 vf->num_mdd_events++;
9775 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
9779 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
9780 dev_info(&pf->pdev->dev,
9781 "Too many MDD events on VF %d, disabled\n", i);
9782 dev_info(&pf->pdev->dev,
9783 "Use PF Control I/F to re-enable the VF\n");
9784 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
9788 /* re-enable mdd interrupt cause */
9789 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state);
9790 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
9791 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
9792 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
9796 static const char *i40e_tunnel_name(u8 type)
9799 case UDP_TUNNEL_TYPE_VXLAN:
9801 case UDP_TUNNEL_TYPE_GENEVE:
9809 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9810 * @pf: board private structure
9812 static void i40e_sync_udp_filters(struct i40e_pf *pf)
9816 /* loop through and set pending bit for all active UDP filters */
9817 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9818 if (pf->udp_ports[i].port)
9819 pf->pending_udp_bitmap |= BIT_ULL(i);
9822 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
9826 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9827 * @pf: board private structure
9829 static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
9831 struct i40e_hw *hw = &pf->hw;
9832 u8 filter_index, type;
9836 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
9839 /* acquire RTNL to maintain state of flags and port requests */
9842 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
9843 if (pf->pending_udp_bitmap & BIT_ULL(i)) {
9844 struct i40e_udp_port_config *udp_port;
9845 i40e_status ret = 0;
9847 udp_port = &pf->udp_ports[i];
9848 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9850 port = READ_ONCE(udp_port->port);
9851 type = READ_ONCE(udp_port->type);
9852 filter_index = READ_ONCE(udp_port->filter_index);
9854 /* release RTNL while we wait on AQ command */
9858 ret = i40e_aq_add_udp_tunnel(hw, port,
9862 else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
9863 ret = i40e_aq_del_udp_tunnel(hw, filter_index,
9866 /* reacquire RTNL so we can update filter_index */
9870 dev_info(&pf->pdev->dev,
9871 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9872 i40e_tunnel_name(type),
9873 port ? "add" : "delete",
9876 i40e_stat_str(&pf->hw, ret),
9877 i40e_aq_str(&pf->hw,
9878 pf->hw.aq.asq_last_status));
9880 /* failed to add, just reset port,
9881 * drop pending bit for any deletion
9884 pf->pending_udp_bitmap &= ~BIT_ULL(i);
9887 /* record filter index on success */
9888 udp_port->filter_index = filter_index;
9897 * i40e_service_task - Run the driver's async subtasks
9898 * @work: pointer to work_struct containing our data
9900 static void i40e_service_task(struct work_struct *work)
9902 struct i40e_pf *pf = container_of(work,
9905 unsigned long start_time = jiffies;
9907 /* don't bother with service tasks if a reset is in progress */
9908 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
9911 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
9914 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
9915 i40e_sync_filters_subtask(pf);
9916 i40e_reset_subtask(pf);
9917 i40e_handle_mdd_event(pf);
9918 i40e_vc_process_vflr_event(pf);
9919 i40e_watchdog_subtask(pf);
9920 i40e_fdir_reinit_subtask(pf);
9921 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
9922 /* Client subtask will reopen next time through. */
9923 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
9925 i40e_client_subtask(pf);
9926 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
9928 i40e_notify_client_of_l2_param_changes(
9929 pf->vsi[pf->lan_vsi]);
9931 i40e_sync_filters_subtask(pf);
9932 i40e_sync_udp_filters_subtask(pf);
9933 i40e_clean_adminq_subtask(pf);
9935 /* flush memory to make sure state is correct before next watchdog */
9936 smp_mb__before_atomic();
9937 clear_bit(__I40E_SERVICE_SCHED, pf->state);
9939 /* If the tasks have taken longer than one timer cycle or there
9940 * is more work to be done, reschedule the service task now
9941 * rather than wait for the timer to tick again.
9943 if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
9944 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) ||
9945 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) ||
9946 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state))
9947 i40e_service_event_schedule(pf);
9951 * i40e_service_timer - timer callback
9952 * @data: pointer to PF struct
9954 static void i40e_service_timer(struct timer_list *t)
9956 struct i40e_pf *pf = from_timer(pf, t, service_timer);
9958 mod_timer(&pf->service_timer,
9959 round_jiffies(jiffies + pf->service_timer_period));
9960 i40e_service_event_schedule(pf);
9964 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
9965 * @vsi: the VSI being configured
9967 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
9969 struct i40e_pf *pf = vsi->back;
9971 switch (vsi->type) {
9973 vsi->alloc_queue_pairs = pf->num_lan_qps;
9974 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9975 I40E_REQ_DESCRIPTOR_MULTIPLE);
9976 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
9977 vsi->num_q_vectors = pf->num_lan_msix;
9979 vsi->num_q_vectors = 1;
9984 vsi->alloc_queue_pairs = 1;
9985 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
9986 I40E_REQ_DESCRIPTOR_MULTIPLE);
9987 vsi->num_q_vectors = pf->num_fdsb_msix;
9990 case I40E_VSI_VMDQ2:
9991 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
9992 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
9993 I40E_REQ_DESCRIPTOR_MULTIPLE);
9994 vsi->num_q_vectors = pf->num_vmdq_msix;
9997 case I40E_VSI_SRIOV:
9998 vsi->alloc_queue_pairs = pf->num_vf_qps;
9999 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
10000 I40E_REQ_DESCRIPTOR_MULTIPLE);
10012 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
10013 * @vsi: VSI pointer
10014 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
10016 * On error: returns error code (negative)
10017 * On success: returns 0
10019 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
10021 struct i40e_ring **next_rings;
10025 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
10026 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs *
10027 (i40e_enabled_xdp_vsi(vsi) ? 3 : 2);
10028 vsi->tx_rings = kzalloc(size, GFP_KERNEL);
10029 if (!vsi->tx_rings)
10031 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs;
10032 if (i40e_enabled_xdp_vsi(vsi)) {
10033 vsi->xdp_rings = next_rings;
10034 next_rings += vsi->alloc_queue_pairs;
10036 vsi->rx_rings = next_rings;
10038 if (alloc_qvectors) {
10039 /* allocate memory for q_vector pointers */
10040 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
10041 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
10042 if (!vsi->q_vectors) {
10050 kfree(vsi->tx_rings);
10055 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
10056 * @pf: board private structure
10057 * @type: type of VSI
10059 * On error: returns error code (negative)
10060 * On success: returns vsi index in PF (positive)
10062 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10065 struct i40e_vsi *vsi;
10069 /* Need to protect the allocation of the VSIs at the PF level */
10070 mutex_lock(&pf->switch_mutex);
10072 /* VSI list may be fragmented if VSI creation/destruction has
10073 * been happening. We can afford to do a quick scan to look
10074 * for any free VSIs in the list.
10076 * find next empty vsi slot, looping back around if necessary
10079 while (i < pf->num_alloc_vsi && pf->vsi[i])
10081 if (i >= pf->num_alloc_vsi) {
10083 while (i < pf->next_vsi && pf->vsi[i])
10087 if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
10088 vsi_idx = i; /* Found one! */
10091 goto unlock_pf; /* out of VSI slots! */
10093 pf->next_vsi = ++i;
10095 vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
10102 set_bit(__I40E_VSI_DOWN, vsi->state);
10104 vsi->idx = vsi_idx;
10105 vsi->int_rate_limit = 0;
10106 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
10107 pf->rss_table_size : 64;
10108 vsi->netdev_registered = false;
10109 vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
10110 hash_init(vsi->mac_filter_hash);
10111 vsi->irqs_ready = false;
10113 ret = i40e_set_num_rings_in_vsi(vsi);
10117 ret = i40e_vsi_alloc_arrays(vsi, true);
10121 /* Setup default MSIX irq handler for VSI */
10122 i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
10124 /* Initialize VSI lock */
10125 spin_lock_init(&vsi->mac_filter_hash_lock);
10126 pf->vsi[vsi_idx] = vsi;
10131 pf->next_vsi = i - 1;
10134 mutex_unlock(&pf->switch_mutex);
10139 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
10140 * @vsi: VSI pointer
10141 * @free_qvectors: a bool to specify if q_vectors need to be freed.
10143 * On error: returns error code (negative)
10144 * On success: returns 0
10146 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
10148 /* free the ring and vector containers */
10149 if (free_qvectors) {
10150 kfree(vsi->q_vectors);
10151 vsi->q_vectors = NULL;
10153 kfree(vsi->tx_rings);
10154 vsi->tx_rings = NULL;
10155 vsi->rx_rings = NULL;
10156 vsi->xdp_rings = NULL;
10160 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
10162 * @vsi: Pointer to VSI structure
10164 static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
10169 kfree(vsi->rss_hkey_user);
10170 vsi->rss_hkey_user = NULL;
10172 kfree(vsi->rss_lut_user);
10173 vsi->rss_lut_user = NULL;
10177 * i40e_vsi_clear - Deallocate the VSI provided
10178 * @vsi: the VSI being un-configured
10180 static int i40e_vsi_clear(struct i40e_vsi *vsi)
10182 struct i40e_pf *pf;
10191 mutex_lock(&pf->switch_mutex);
10192 if (!pf->vsi[vsi->idx]) {
10193 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10194 vsi->idx, vsi->idx, vsi->type);
10198 if (pf->vsi[vsi->idx] != vsi) {
10199 dev_err(&pf->pdev->dev,
10200 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10201 pf->vsi[vsi->idx]->idx,
10202 pf->vsi[vsi->idx]->type,
10203 vsi->idx, vsi->type);
10207 /* updates the PF for this cleared vsi */
10208 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10209 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10211 i40e_vsi_free_arrays(vsi, true);
10212 i40e_clear_rss_config_user(vsi);
10214 pf->vsi[vsi->idx] = NULL;
10215 if (vsi->idx < pf->next_vsi)
10216 pf->next_vsi = vsi->idx;
10219 mutex_unlock(&pf->switch_mutex);
10227 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10228 * @vsi: the VSI being cleaned
10230 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
10234 if (vsi->tx_rings && vsi->tx_rings[0]) {
10235 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10236 kfree_rcu(vsi->tx_rings[i], rcu);
10237 WRITE_ONCE(vsi->tx_rings[i], NULL);
10238 WRITE_ONCE(vsi->rx_rings[i], NULL);
10239 if (vsi->xdp_rings)
10240 WRITE_ONCE(vsi->xdp_rings[i], NULL);
10246 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10247 * @vsi: the VSI being configured
10249 static int i40e_alloc_rings(struct i40e_vsi *vsi)
10251 int i, qpv = i40e_enabled_xdp_vsi(vsi) ? 3 : 2;
10252 struct i40e_pf *pf = vsi->back;
10253 struct i40e_ring *ring;
10255 /* Set basic values in the rings to be used later during open() */
10256 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
10257 /* allocate space for both Tx and Rx in one shot */
10258 ring = kcalloc(qpv, sizeof(struct i40e_ring), GFP_KERNEL);
10262 ring->queue_index = i;
10263 ring->reg_idx = vsi->base_queue + i;
10264 ring->ring_active = false;
10266 ring->netdev = vsi->netdev;
10267 ring->dev = &pf->pdev->dev;
10268 ring->count = vsi->num_desc;
10271 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10272 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10273 ring->itr_setting = pf->tx_itr_default;
10274 WRITE_ONCE(vsi->tx_rings[i], ring++);
10276 if (!i40e_enabled_xdp_vsi(vsi))
10279 ring->queue_index = vsi->alloc_queue_pairs + i;
10280 ring->reg_idx = vsi->base_queue + ring->queue_index;
10281 ring->ring_active = false;
10283 ring->netdev = NULL;
10284 ring->dev = &pf->pdev->dev;
10285 ring->count = vsi->num_desc;
10288 if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
10289 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
10290 set_ring_xdp(ring);
10291 ring->itr_setting = pf->tx_itr_default;
10292 WRITE_ONCE(vsi->xdp_rings[i], ring++);
10295 ring->queue_index = i;
10296 ring->reg_idx = vsi->base_queue + i;
10297 ring->ring_active = false;
10299 ring->netdev = vsi->netdev;
10300 ring->dev = &pf->pdev->dev;
10301 ring->count = vsi->num_desc;
10304 ring->itr_setting = pf->rx_itr_default;
10305 WRITE_ONCE(vsi->rx_rings[i], ring);
10311 i40e_vsi_clear_rings(vsi);
10316 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10317 * @pf: board private structure
10318 * @vectors: the number of MSI-X vectors to request
10320 * Returns the number of vectors reserved, or error
10322 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
10324 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
10325 I40E_MIN_MSIX, vectors);
10327 dev_info(&pf->pdev->dev,
10328 "MSI-X vector reservation failed: %d\n", vectors);
10336 * i40e_init_msix - Setup the MSIX capability
10337 * @pf: board private structure
10339 * Work with the OS to set up the MSIX vectors needed.
10341 * Returns the number of vectors reserved or negative on failure
10343 static int i40e_init_msix(struct i40e_pf *pf)
10345 struct i40e_hw *hw = &pf->hw;
10346 int cpus, extra_vectors;
10350 int iwarp_requested = 0;
10352 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
10355 /* The number of vectors we'll request will be comprised of:
10356 * - Add 1 for "other" cause for Admin Queue events, etc.
10357 * - The number of LAN queue pairs
10358 * - Queues being used for RSS.
10359 * We don't need as many as max_rss_size vectors.
10360 * use rss_size instead in the calculation since that
10361 * is governed by number of cpus in the system.
10362 * - assumes symmetric Tx/Rx pairing
10363 * - The number of VMDq pairs
10364 * - The CPU count within the NUMA node if iWARP is enabled
10365 * Once we count this up, try the request.
10367 * If we can't get what we want, we'll simplify to nearly nothing
10368 * and try again. If that still fails, we punt.
10370 vectors_left = hw->func_caps.num_msix_vectors;
10373 /* reserve one vector for miscellaneous handler */
10374 if (vectors_left) {
10379 /* reserve some vectors for the main PF traffic queues. Initially we
10380 * only reserve at most 50% of the available vectors, in the case that
10381 * the number of online CPUs is large. This ensures that we can enable
10382 * extra features as well. Once we've enabled the other features, we
10383 * will use any remaining vectors to reach as close as we can to the
10384 * number of online CPUs.
10386 cpus = num_online_cpus();
10387 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
10388 vectors_left -= pf->num_lan_msix;
10390 /* reserve one vector for sideband flow director */
10391 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10392 if (vectors_left) {
10393 pf->num_fdsb_msix = 1;
10397 pf->num_fdsb_msix = 0;
10401 /* can we reserve enough for iWARP? */
10402 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10403 iwarp_requested = pf->num_iwarp_msix;
10406 pf->num_iwarp_msix = 0;
10407 else if (vectors_left < pf->num_iwarp_msix)
10408 pf->num_iwarp_msix = 1;
10409 v_budget += pf->num_iwarp_msix;
10410 vectors_left -= pf->num_iwarp_msix;
10413 /* any vectors left over go for VMDq support */
10414 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
10415 if (!vectors_left) {
10416 pf->num_vmdq_msix = 0;
10417 pf->num_vmdq_qps = 0;
10419 int vmdq_vecs_wanted =
10420 pf->num_vmdq_vsis * pf->num_vmdq_qps;
10422 min_t(int, vectors_left, vmdq_vecs_wanted);
10424 /* if we're short on vectors for what's desired, we limit
10425 * the queues per vmdq. If this is still more than are
10426 * available, the user will need to change the number of
10427 * queues/vectors used by the PF later with the ethtool
10430 if (vectors_left < vmdq_vecs_wanted) {
10431 pf->num_vmdq_qps = 1;
10432 vmdq_vecs_wanted = pf->num_vmdq_vsis;
10433 vmdq_vecs = min_t(int,
10437 pf->num_vmdq_msix = pf->num_vmdq_qps;
10439 v_budget += vmdq_vecs;
10440 vectors_left -= vmdq_vecs;
10444 /* On systems with a large number of SMP cores, we previously limited
10445 * the number of vectors for num_lan_msix to be at most 50% of the
10446 * available vectors, to allow for other features. Now, we add back
10447 * the remaining vectors. However, we ensure that the total
10448 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10449 * calculate the number of vectors we can add without going over the
10450 * cap of CPUs. For systems with a small number of CPUs this will be
10453 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
10454 pf->num_lan_msix += extra_vectors;
10455 vectors_left -= extra_vectors;
10457 WARN(vectors_left < 0,
10458 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10460 v_budget += pf->num_lan_msix;
10461 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
10463 if (!pf->msix_entries)
10466 for (i = 0; i < v_budget; i++)
10467 pf->msix_entries[i].entry = i;
10468 v_actual = i40e_reserve_msix_vectors(pf, v_budget);
10470 if (v_actual < I40E_MIN_MSIX) {
10471 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
10472 kfree(pf->msix_entries);
10473 pf->msix_entries = NULL;
10474 pci_disable_msix(pf->pdev);
10477 } else if (v_actual == I40E_MIN_MSIX) {
10478 /* Adjust for minimal MSIX use */
10479 pf->num_vmdq_vsis = 0;
10480 pf->num_vmdq_qps = 0;
10481 pf->num_lan_qps = 1;
10482 pf->num_lan_msix = 1;
10484 } else if (v_actual != v_budget) {
10485 /* If we have limited resources, we will start with no vectors
10486 * for the special features and then allocate vectors to some
10487 * of these features based on the policy and at the end disable
10488 * the features that did not get any vectors.
10492 dev_info(&pf->pdev->dev,
10493 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10494 v_actual, v_budget);
10495 /* reserve the misc vector */
10496 vec = v_actual - 1;
10498 /* Scale vector usage down */
10499 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
10500 pf->num_vmdq_vsis = 1;
10501 pf->num_vmdq_qps = 1;
10503 /* partition out the remaining vectors */
10506 pf->num_lan_msix = 1;
10509 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10510 pf->num_lan_msix = 1;
10511 pf->num_iwarp_msix = 1;
10513 pf->num_lan_msix = 2;
10517 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
10518 pf->num_iwarp_msix = min_t(int, (vec / 3),
10520 pf->num_vmdq_vsis = min_t(int, (vec / 3),
10521 I40E_DEFAULT_NUM_VMDQ_VSI);
10523 pf->num_vmdq_vsis = min_t(int, (vec / 2),
10524 I40E_DEFAULT_NUM_VMDQ_VSI);
10526 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10527 pf->num_fdsb_msix = 1;
10530 pf->num_lan_msix = min_t(int,
10531 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
10533 pf->num_lan_qps = pf->num_lan_msix;
10538 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
10539 (pf->num_fdsb_msix == 0)) {
10540 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10541 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10542 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10544 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10545 (pf->num_vmdq_msix == 0)) {
10546 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
10547 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
10550 if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
10551 (pf->num_iwarp_msix == 0)) {
10552 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
10553 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
10555 i40e_debug(&pf->hw, I40E_DEBUG_INIT,
10556 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10558 pf->num_vmdq_msix * pf->num_vmdq_vsis,
10560 pf->num_iwarp_msix);
10566 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
10567 * @vsi: the VSI being configured
10568 * @v_idx: index of the vector in the vsi struct
10569 * @cpu: cpu to be used on affinity_mask
10571 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10573 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
10575 struct i40e_q_vector *q_vector;
10577 /* allocate q_vector */
10578 q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
10582 q_vector->vsi = vsi;
10583 q_vector->v_idx = v_idx;
10584 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
10587 netif_napi_add(vsi->netdev, &q_vector->napi,
10588 i40e_napi_poll, NAPI_POLL_WEIGHT);
10590 /* tie q_vector and vsi together */
10591 vsi->q_vectors[v_idx] = q_vector;
10597 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
10598 * @vsi: the VSI being configured
10600 * We allocate one q_vector per queue interrupt. If allocation fails we
10603 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
10605 struct i40e_pf *pf = vsi->back;
10606 int err, v_idx, num_q_vectors, current_cpu;
10608 /* if not MSIX, give the one vector only to the LAN VSI */
10609 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
10610 num_q_vectors = vsi->num_q_vectors;
10611 else if (vsi == pf->vsi[pf->lan_vsi])
10616 current_cpu = cpumask_first(cpu_online_mask);
10618 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
10619 err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
10622 current_cpu = cpumask_next(current_cpu, cpu_online_mask);
10623 if (unlikely(current_cpu >= nr_cpu_ids))
10624 current_cpu = cpumask_first(cpu_online_mask);
10631 i40e_free_q_vector(vsi, v_idx);
10637 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10638 * @pf: board private structure to initialize
10640 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
10645 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10646 vectors = i40e_init_msix(pf);
10648 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
10649 I40E_FLAG_IWARP_ENABLED |
10650 I40E_FLAG_RSS_ENABLED |
10651 I40E_FLAG_DCB_CAPABLE |
10652 I40E_FLAG_DCB_ENABLED |
10653 I40E_FLAG_SRIOV_ENABLED |
10654 I40E_FLAG_FD_SB_ENABLED |
10655 I40E_FLAG_FD_ATR_ENABLED |
10656 I40E_FLAG_VMDQ_ENABLED);
10657 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
10659 /* rework the queue expectations without MSIX */
10660 i40e_determine_queue_usage(pf);
10664 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10665 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
10666 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
10667 vectors = pci_enable_msi(pf->pdev);
10669 dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
10671 pf->flags &= ~I40E_FLAG_MSI_ENABLED;
10673 vectors = 1; /* one MSI or Legacy vector */
10676 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
10677 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10679 /* set up vector assignment tracking */
10680 size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
10681 pf->irq_pile = kzalloc(size, GFP_KERNEL);
10685 pf->irq_pile->num_entries = vectors;
10686 pf->irq_pile->search_hint = 0;
10688 /* track first vector for misc interrupts, ignore return */
10689 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
10695 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10696 * @pf: private board data structure
10698 * Restore the interrupt scheme that was cleared when we suspended the
10699 * device. This should be called during resume to re-allocate the q_vectors
10700 * and reacquire IRQs.
10702 static int i40e_restore_interrupt_scheme(struct i40e_pf *pf)
10706 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10707 * scheme. We need to re-enabled them here in order to attempt to
10708 * re-acquire the MSI or MSI-X vectors
10710 pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
10712 err = i40e_init_interrupt_scheme(pf);
10716 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10717 * rings together again.
10719 for (i = 0; i < pf->num_alloc_vsi; i++) {
10721 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]);
10724 i40e_vsi_map_rings_to_vectors(pf->vsi[i]);
10728 err = i40e_setup_misc_vector(pf);
10732 if (pf->flags & I40E_FLAG_IWARP_ENABLED)
10733 i40e_client_update_msix_info(pf);
10740 i40e_vsi_free_q_vectors(pf->vsi[i]);
10747 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10748 * @pf: board private structure
10750 * This sets up the handler for MSIX 0, which is used to manage the
10751 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10752 * when in MSI or Legacy interrupt mode.
10754 static int i40e_setup_misc_vector(struct i40e_pf *pf)
10756 struct i40e_hw *hw = &pf->hw;
10759 /* Only request the IRQ once, the first time through. */
10760 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) {
10761 err = request_irq(pf->msix_entries[0].vector,
10762 i40e_intr, 0, pf->int_name, pf);
10764 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state);
10765 dev_info(&pf->pdev->dev,
10766 "request_irq for %s failed: %d\n",
10767 pf->int_name, err);
10772 i40e_enable_misc_int_causes(pf);
10774 /* associate no queues to the misc vector */
10775 wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
10776 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K >> 1);
10780 i40e_irq_dynamic_enable_icr0(pf);
10786 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10787 * @vsi: Pointer to vsi structure
10788 * @seed: Buffter to store the hash keys
10789 * @lut: Buffer to store the lookup table entries
10790 * @lut_size: Size of buffer to store the lookup table entries
10792 * Return 0 on success, negative on failure
10794 static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
10795 u8 *lut, u16 lut_size)
10797 struct i40e_pf *pf = vsi->back;
10798 struct i40e_hw *hw = &pf->hw;
10802 ret = i40e_aq_get_rss_key(hw, vsi->id,
10803 (struct i40e_aqc_get_set_rss_key_data *)seed);
10805 dev_info(&pf->pdev->dev,
10806 "Cannot get RSS key, err %s aq_err %s\n",
10807 i40e_stat_str(&pf->hw, ret),
10808 i40e_aq_str(&pf->hw,
10809 pf->hw.aq.asq_last_status));
10815 bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
10817 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
10819 dev_info(&pf->pdev->dev,
10820 "Cannot get RSS lut, err %s aq_err %s\n",
10821 i40e_stat_str(&pf->hw, ret),
10822 i40e_aq_str(&pf->hw,
10823 pf->hw.aq.asq_last_status));
10832 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
10833 * @vsi: Pointer to vsi structure
10834 * @seed: RSS hash seed
10835 * @lut: Lookup table
10836 * @lut_size: Lookup table size
10838 * Returns 0 on success, negative on failure
10840 static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
10841 const u8 *lut, u16 lut_size)
10843 struct i40e_pf *pf = vsi->back;
10844 struct i40e_hw *hw = &pf->hw;
10845 u16 vf_id = vsi->vf_id;
10848 /* Fill out hash function seed */
10850 u32 *seed_dw = (u32 *)seed;
10852 if (vsi->type == I40E_VSI_MAIN) {
10853 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10854 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
10855 } else if (vsi->type == I40E_VSI_SRIOV) {
10856 for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
10857 wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
10859 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
10864 u32 *lut_dw = (u32 *)lut;
10866 if (vsi->type == I40E_VSI_MAIN) {
10867 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10869 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10870 wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
10871 } else if (vsi->type == I40E_VSI_SRIOV) {
10872 if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
10874 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
10875 wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
10877 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
10886 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10887 * @vsi: Pointer to VSI structure
10888 * @seed: Buffer to store the keys
10889 * @lut: Buffer to store the lookup table entries
10890 * @lut_size: Size of buffer to store the lookup table entries
10892 * Returns 0 on success, negative on failure
10894 static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
10895 u8 *lut, u16 lut_size)
10897 struct i40e_pf *pf = vsi->back;
10898 struct i40e_hw *hw = &pf->hw;
10902 u32 *seed_dw = (u32 *)seed;
10904 for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
10905 seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
10908 u32 *lut_dw = (u32 *)lut;
10910 if (lut_size != I40E_HLUT_ARRAY_SIZE)
10912 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
10913 lut_dw[i] = rd32(hw, I40E_PFQF_HLUT(i));
10920 * i40e_config_rss - Configure RSS keys and lut
10921 * @vsi: Pointer to VSI structure
10922 * @seed: RSS hash seed
10923 * @lut: Lookup table
10924 * @lut_size: Lookup table size
10926 * Returns 0 on success, negative on failure
10928 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10930 struct i40e_pf *pf = vsi->back;
10932 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
10933 return i40e_config_rss_aq(vsi, seed, lut, lut_size);
10935 return i40e_config_rss_reg(vsi, seed, lut, lut_size);
10939 * i40e_get_rss - Get RSS keys and lut
10940 * @vsi: Pointer to VSI structure
10941 * @seed: Buffer to store the keys
10942 * @lut: Buffer to store the lookup table entries
10943 * @lut_size: Size of buffer to store the lookup table entries
10945 * Returns 0 on success, negative on failure
10947 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
10949 struct i40e_pf *pf = vsi->back;
10951 if (pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)
10952 return i40e_get_rss_aq(vsi, seed, lut, lut_size);
10954 return i40e_get_rss_reg(vsi, seed, lut, lut_size);
10958 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
10959 * @pf: Pointer to board private structure
10960 * @lut: Lookup table
10961 * @rss_table_size: Lookup table size
10962 * @rss_size: Range of queue number for hashing
10964 void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
10965 u16 rss_table_size, u16 rss_size)
10969 for (i = 0; i < rss_table_size; i++)
10970 lut[i] = i % rss_size;
10974 * i40e_pf_config_rss - Prepare for RSS if used
10975 * @pf: board private structure
10977 static int i40e_pf_config_rss(struct i40e_pf *pf)
10979 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
10980 u8 seed[I40E_HKEY_ARRAY_SIZE];
10982 struct i40e_hw *hw = &pf->hw;
10987 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
10988 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
10989 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
10990 hena |= i40e_pf_get_default_rss_hena(pf);
10992 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
10993 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
10995 /* Determine the RSS table size based on the hardware capabilities */
10996 reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
10997 reg_val = (pf->rss_table_size == 512) ?
10998 (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
10999 (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
11000 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
11002 /* Determine the RSS size of the VSI */
11003 if (!vsi->rss_size) {
11005 /* If the firmware does something weird during VSI init, we
11006 * could end up with zero TCs. Check for that to avoid
11007 * divide-by-zero. It probably won't pass traffic, but it also
11010 qcount = vsi->num_queue_pairs /
11011 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1);
11012 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11014 if (!vsi->rss_size)
11017 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
11021 /* Use user configured lut if there is one, otherwise use default */
11022 if (vsi->rss_lut_user)
11023 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
11025 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
11027 /* Use user configured hash key if there is one, otherwise
11030 if (vsi->rss_hkey_user)
11031 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
11033 netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
11034 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
11041 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
11042 * @pf: board private structure
11043 * @queue_count: the requested queue count for rss.
11045 * returns 0 if rss is not enabled, if enabled returns the final rss queue
11046 * count which may be different from the requested queue count.
11047 * Note: expects to be called while under rtnl_lock()
11049 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
11051 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
11054 if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
11057 new_rss_size = min_t(int, queue_count, pf->rss_size_max);
11059 if (queue_count != vsi->num_queue_pairs) {
11062 vsi->req_queue_pairs = queue_count;
11063 i40e_prep_for_reset(pf, true);
11065 pf->alloc_rss_size = new_rss_size;
11067 i40e_reset_and_rebuild(pf, true, true);
11069 /* Discard the user configured hash keys and lut, if less
11070 * queues are enabled.
11072 if (queue_count < vsi->rss_size) {
11073 i40e_clear_rss_config_user(vsi);
11074 dev_dbg(&pf->pdev->dev,
11075 "discard user configured hash keys and lut\n");
11078 /* Reset vsi->rss_size, as number of enabled queues changed */
11079 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
11080 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
11082 i40e_pf_config_rss(pf);
11084 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n",
11085 vsi->req_queue_pairs, pf->rss_size_max);
11086 return pf->alloc_rss_size;
11090 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
11091 * @pf: board private structure
11093 i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
11095 i40e_status status;
11096 bool min_valid, max_valid;
11097 u32 max_bw, min_bw;
11099 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
11100 &min_valid, &max_valid);
11104 pf->min_bw = min_bw;
11106 pf->max_bw = max_bw;
11113 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
11114 * @pf: board private structure
11116 i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
11118 struct i40e_aqc_configure_partition_bw_data bw_data;
11119 i40e_status status;
11121 memset(&bw_data, 0, sizeof(bw_data));
11123 /* Set the valid bit for this PF */
11124 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
11125 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
11126 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK;
11128 /* Set the new bandwidths */
11129 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
11135 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
11136 * @pf: board private structure
11138 i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
11140 /* Commit temporary BW setting to permanent NVM image */
11141 enum i40e_admin_queue_err last_aq_status;
11145 if (pf->hw.partition_id != 1) {
11146 dev_info(&pf->pdev->dev,
11147 "Commit BW only works on partition 1! This is partition %d",
11148 pf->hw.partition_id);
11149 ret = I40E_NOT_SUPPORTED;
11150 goto bw_commit_out;
11153 /* Acquire NVM for read access */
11154 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
11155 last_aq_status = pf->hw.aq.asq_last_status;
11157 dev_info(&pf->pdev->dev,
11158 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11159 i40e_stat_str(&pf->hw, ret),
11160 i40e_aq_str(&pf->hw, last_aq_status));
11161 goto bw_commit_out;
11164 /* Read word 0x10 of NVM - SW compatibility word 1 */
11165 ret = i40e_aq_read_nvm(&pf->hw,
11166 I40E_SR_NVM_CONTROL_WORD,
11167 0x10, sizeof(nvm_word), &nvm_word,
11169 /* Save off last admin queue command status before releasing
11172 last_aq_status = pf->hw.aq.asq_last_status;
11173 i40e_release_nvm(&pf->hw);
11175 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
11176 i40e_stat_str(&pf->hw, ret),
11177 i40e_aq_str(&pf->hw, last_aq_status));
11178 goto bw_commit_out;
11181 /* Wait a bit for NVM release to complete */
11184 /* Acquire NVM for write access */
11185 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
11186 last_aq_status = pf->hw.aq.asq_last_status;
11188 dev_info(&pf->pdev->dev,
11189 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11190 i40e_stat_str(&pf->hw, ret),
11191 i40e_aq_str(&pf->hw, last_aq_status));
11192 goto bw_commit_out;
11194 /* Write it back out unchanged to initiate update NVM,
11195 * which will force a write of the shadow (alt) RAM to
11196 * the NVM - thus storing the bandwidth values permanently.
11198 ret = i40e_aq_update_nvm(&pf->hw,
11199 I40E_SR_NVM_CONTROL_WORD,
11200 0x10, sizeof(nvm_word),
11201 &nvm_word, true, 0, NULL);
11202 /* Save off last admin queue command status before releasing
11205 last_aq_status = pf->hw.aq.asq_last_status;
11206 i40e_release_nvm(&pf->hw);
11208 dev_info(&pf->pdev->dev,
11209 "BW settings NOT SAVED, err %s aq_err %s\n",
11210 i40e_stat_str(&pf->hw, ret),
11211 i40e_aq_str(&pf->hw, last_aq_status));
11218 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
11219 * @pf: board private structure to initialize
11221 * i40e_sw_init initializes the Adapter private data structure.
11222 * Fields are initialized based on PCI device information and
11223 * OS network device settings (MTU size).
11225 static int i40e_sw_init(struct i40e_pf *pf)
11231 /* Set default capability flags */
11232 pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
11233 I40E_FLAG_MSI_ENABLED |
11234 I40E_FLAG_MSIX_ENABLED;
11236 /* Set default ITR */
11237 pf->rx_itr_default = I40E_ITR_RX_DEF;
11238 pf->tx_itr_default = I40E_ITR_TX_DEF;
11240 /* Depending on PF configurations, it is possible that the RSS
11241 * maximum might end up larger than the available queues
11243 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
11244 pf->alloc_rss_size = 1;
11245 pf->rss_table_size = pf->hw.func_caps.rss_table_size;
11246 pf->rss_size_max = min_t(int, pf->rss_size_max,
11247 pf->hw.func_caps.num_tx_qp);
11249 /* find the next higher power-of-2 of num cpus */
11250 pow = roundup_pow_of_two(num_online_cpus());
11251 pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
11253 if (pf->hw.func_caps.rss) {
11254 pf->flags |= I40E_FLAG_RSS_ENABLED;
11255 pf->alloc_rss_size = min_t(int, pf->rss_size_max,
11256 num_online_cpus());
11259 /* MFP mode enabled */
11260 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
11261 pf->flags |= I40E_FLAG_MFP_ENABLED;
11262 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
11263 if (i40e_get_partition_bw_setting(pf)) {
11264 dev_warn(&pf->pdev->dev,
11265 "Could not get partition bw settings\n");
11267 dev_info(&pf->pdev->dev,
11268 "Partition BW Min = %8.8x, Max = %8.8x\n",
11269 pf->min_bw, pf->max_bw);
11271 /* nudge the Tx scheduler */
11272 i40e_set_partition_bw_setting(pf);
11276 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
11277 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
11278 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
11279 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
11280 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
11281 pf->hw.num_partitions > 1)
11282 dev_info(&pf->pdev->dev,
11283 "Flow Director Sideband mode Disabled in MFP mode\n");
11285 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11286 pf->fdir_pf_filter_count =
11287 pf->hw.func_caps.fd_filters_guaranteed;
11288 pf->hw.fdir_shared_filter_count =
11289 pf->hw.func_caps.fd_filters_best_effort;
11292 if (pf->hw.mac.type == I40E_MAC_X722) {
11293 pf->hw_features |= (I40E_HW_RSS_AQ_CAPABLE |
11294 I40E_HW_128_QP_RSS_CAPABLE |
11295 I40E_HW_ATR_EVICT_CAPABLE |
11296 I40E_HW_WB_ON_ITR_CAPABLE |
11297 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE |
11298 I40E_HW_NO_PCI_LINK_CHECK |
11299 I40E_HW_USE_SET_LLDP_MIB |
11300 I40E_HW_GENEVE_OFFLOAD_CAPABLE |
11301 I40E_HW_PTP_L4_CAPABLE |
11302 I40E_HW_WOL_MC_MAGIC_PKT_WAKE |
11303 I40E_HW_OUTER_UDP_CSUM_CAPABLE);
11305 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11306 if (rd32(&pf->hw, I40E_GLQF_FDEVICTENA(1)) !=
11307 I40E_FDEVICT_PCTYPE_DEFAULT) {
11308 dev_warn(&pf->pdev->dev,
11309 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11310 pf->hw_features &= ~I40E_HW_ATR_EVICT_CAPABLE;
11312 } else if ((pf->hw.aq.api_maj_ver > 1) ||
11313 ((pf->hw.aq.api_maj_ver == 1) &&
11314 (pf->hw.aq.api_min_ver > 4))) {
11315 /* Supported in FW API version higher than 1.4 */
11316 pf->hw_features |= I40E_HW_GENEVE_OFFLOAD_CAPABLE;
11319 /* Enable HW ATR eviction if possible */
11320 if (pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE)
11321 pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED;
11323 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11324 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
11325 (pf->hw.aq.fw_maj_ver < 4))) {
11326 pf->hw_features |= I40E_HW_RESTART_AUTONEG;
11327 /* No DCB support for FW < v4.33 */
11328 pf->hw_features |= I40E_HW_NO_DCB_SUPPORT;
11331 /* Disable FW LLDP if FW < v4.3 */
11332 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11333 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
11334 (pf->hw.aq.fw_maj_ver < 4)))
11335 pf->hw_features |= I40E_HW_STOP_FW_LLDP;
11337 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11338 if ((pf->hw.mac.type == I40E_MAC_XL710) &&
11339 (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
11340 (pf->hw.aq.fw_maj_ver >= 5)))
11341 pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
11343 /* Enable PTP L4 if FW > v6.0 */
11344 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11345 pf->hw.aq.fw_maj_ver >= 6)
11346 pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
11348 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) {
11349 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
11350 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
11351 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
11354 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) {
11355 pf->flags |= I40E_FLAG_IWARP_ENABLED;
11356 /* IWARP needs one extra vector for CQP just like MISC.*/
11357 pf->num_iwarp_msix = (int)num_online_cpus() + 1;
11359 /* Stopping the FW LLDP engine is only supported on the
11360 * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP
11361 * engine is not supported if NPAR is functioning on this
11364 if (pf->hw.mac.type == I40E_MAC_XL710 &&
11365 !pf->hw.func_caps.npar_enable &&
11366 (pf->hw.aq.api_maj_ver > 1 ||
11367 (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6)))
11368 pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP;
11370 #ifdef CONFIG_PCI_IOV
11371 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
11372 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
11373 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
11374 pf->num_req_vfs = min_t(int,
11375 pf->hw.func_caps.num_vfs,
11376 I40E_MAX_VF_COUNT);
11378 #endif /* CONFIG_PCI_IOV */
11379 pf->eeprom_version = 0xDEAD;
11380 pf->lan_veb = I40E_NO_VEB;
11381 pf->lan_vsi = I40E_NO_VSI;
11383 /* By default FW has this off for performance reasons */
11384 pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
11386 /* set up queue assignment tracking */
11387 size = sizeof(struct i40e_lump_tracking)
11388 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
11389 pf->qp_pile = kzalloc(size, GFP_KERNEL);
11390 if (!pf->qp_pile) {
11394 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
11395 pf->qp_pile->search_hint = 0;
11397 pf->tx_timeout_recovery_level = 1;
11399 mutex_init(&pf->switch_mutex);
11406 * i40e_set_ntuple - set the ntuple feature flag and take action
11407 * @pf: board private structure to initialize
11408 * @features: the feature set that the stack is suggesting
11410 * returns a bool to indicate if reset needs to happen
11412 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
11414 bool need_reset = false;
11416 /* Check if Flow Director n-tuple support was enabled or disabled. If
11417 * the state changed, we need to reset.
11419 if (features & NETIF_F_NTUPLE) {
11420 /* Enable filters and mark for reset */
11421 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
11423 /* enable FD_SB only if there is MSI-X vector and no cloud
11426 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) {
11427 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
11428 pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE;
11431 /* turn off filters, mark for reset and clear SW filter list */
11432 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
11434 i40e_fdir_filter_exit(pf);
11436 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
11437 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state);
11438 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
11440 /* reset fd counters */
11441 pf->fd_add_err = 0;
11442 pf->fd_atr_cnt = 0;
11443 /* if ATR was auto disabled it can be re-enabled. */
11444 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
11445 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
11446 (I40E_DEBUG_FD & pf->hw.debug_mask))
11447 dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
11453 * i40e_clear_rss_lut - clear the rx hash lookup table
11454 * @vsi: the VSI being configured
11456 static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
11458 struct i40e_pf *pf = vsi->back;
11459 struct i40e_hw *hw = &pf->hw;
11460 u16 vf_id = vsi->vf_id;
11463 if (vsi->type == I40E_VSI_MAIN) {
11464 for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
11465 wr32(hw, I40E_PFQF_HLUT(i), 0);
11466 } else if (vsi->type == I40E_VSI_SRIOV) {
11467 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
11468 i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf_id), 0);
11470 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
11475 * i40e_set_features - set the netdev feature flags
11476 * @netdev: ptr to the netdev being adjusted
11477 * @features: the feature set that the stack is suggesting
11478 * Note: expects to be called while under rtnl_lock()
11480 static int i40e_set_features(struct net_device *netdev,
11481 netdev_features_t features)
11483 struct i40e_netdev_priv *np = netdev_priv(netdev);
11484 struct i40e_vsi *vsi = np->vsi;
11485 struct i40e_pf *pf = vsi->back;
11488 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
11489 i40e_pf_config_rss(pf);
11490 else if (!(features & NETIF_F_RXHASH) &&
11491 netdev->features & NETIF_F_RXHASH)
11492 i40e_clear_rss_lut(vsi);
11494 if (features & NETIF_F_HW_VLAN_CTAG_RX)
11495 i40e_vlan_stripping_enable(vsi);
11497 i40e_vlan_stripping_disable(vsi);
11499 if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) {
11500 dev_err(&pf->pdev->dev,
11501 "Offloaded tc filters active, can't turn hw_tc_offload off");
11505 need_reset = i40e_set_ntuple(pf, features);
11508 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11514 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11515 * @pf: board private structure
11516 * @port: The UDP port to look up
11518 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11520 static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
11524 for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
11525 /* Do not report ports with pending deletions as
11528 if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
11530 if (pf->udp_ports[i].port == port)
11538 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11539 * @netdev: This physical port's netdev
11540 * @ti: Tunnel endpoint information
11542 static void i40e_udp_tunnel_add(struct net_device *netdev,
11543 struct udp_tunnel_info *ti)
11545 struct i40e_netdev_priv *np = netdev_priv(netdev);
11546 struct i40e_vsi *vsi = np->vsi;
11547 struct i40e_pf *pf = vsi->back;
11548 u16 port = ntohs(ti->port);
11552 idx = i40e_get_udp_port_idx(pf, port);
11554 /* Check if port already exists */
11555 if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11556 netdev_info(netdev, "port %d already offloaded\n", port);
11560 /* Now check if there is space to add the new port */
11561 next_idx = i40e_get_udp_port_idx(pf, 0);
11563 if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
11564 netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11569 switch (ti->type) {
11570 case UDP_TUNNEL_TYPE_VXLAN:
11571 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
11573 case UDP_TUNNEL_TYPE_GENEVE:
11574 if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
11576 pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
11582 /* New port: add it and mark its index in the bitmap */
11583 pf->udp_ports[next_idx].port = port;
11584 pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
11585 pf->pending_udp_bitmap |= BIT_ULL(next_idx);
11586 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11590 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11591 * @netdev: This physical port's netdev
11592 * @ti: Tunnel endpoint information
11594 static void i40e_udp_tunnel_del(struct net_device *netdev,
11595 struct udp_tunnel_info *ti)
11597 struct i40e_netdev_priv *np = netdev_priv(netdev);
11598 struct i40e_vsi *vsi = np->vsi;
11599 struct i40e_pf *pf = vsi->back;
11600 u16 port = ntohs(ti->port);
11603 idx = i40e_get_udp_port_idx(pf, port);
11605 /* Check if port already exists */
11606 if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
11609 switch (ti->type) {
11610 case UDP_TUNNEL_TYPE_VXLAN:
11611 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
11614 case UDP_TUNNEL_TYPE_GENEVE:
11615 if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
11622 /* if port exists, set it to 0 (mark for deletion)
11623 * and make it pending
11625 pf->udp_ports[idx].port = 0;
11627 /* Toggle pending bit instead of setting it. This way if we are
11628 * deleting a port that has yet to be added we just clear the pending
11629 * bit and don't have to worry about it.
11631 pf->pending_udp_bitmap ^= BIT_ULL(idx);
11632 set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
11636 netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
11640 static int i40e_get_phys_port_id(struct net_device *netdev,
11641 struct netdev_phys_item_id *ppid)
11643 struct i40e_netdev_priv *np = netdev_priv(netdev);
11644 struct i40e_pf *pf = np->vsi->back;
11645 struct i40e_hw *hw = &pf->hw;
11647 if (!(pf->hw_features & I40E_HW_PORT_ID_VALID))
11648 return -EOPNOTSUPP;
11650 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
11651 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
11657 * i40e_ndo_fdb_add - add an entry to the hardware database
11658 * @ndm: the input from the stack
11659 * @tb: pointer to array of nladdr (unused)
11660 * @dev: the net device pointer
11661 * @addr: the MAC address entry being added
11663 * @flags: instructions from stack about fdb operation
11665 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
11666 struct net_device *dev,
11667 const unsigned char *addr, u16 vid,
11670 struct i40e_netdev_priv *np = netdev_priv(dev);
11671 struct i40e_pf *pf = np->vsi->back;
11674 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
11675 return -EOPNOTSUPP;
11678 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
11682 /* Hardware does not support aging addresses so if a
11683 * ndm_state is given only allow permanent addresses
11685 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
11686 netdev_info(dev, "FDB only supports static addresses\n");
11690 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
11691 err = dev_uc_add_excl(dev, addr);
11692 else if (is_multicast_ether_addr(addr))
11693 err = dev_mc_add_excl(dev, addr);
11697 /* Only return duplicate errors if NLM_F_EXCL is set */
11698 if (err == -EEXIST && !(flags & NLM_F_EXCL))
11705 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11706 * @dev: the netdev being configured
11707 * @nlh: RTNL message
11708 * @flags: bridge flags
11710 * Inserts a new hardware bridge if not already created and
11711 * enables the bridging mode requested (VEB or VEPA). If the
11712 * hardware bridge has already been inserted and the request
11713 * is to change the mode then that requires a PF reset to
11714 * allow rebuild of the components with required hardware
11715 * bridge mode enabled.
11717 * Note: expects to be called while under rtnl_lock()
11719 static int i40e_ndo_bridge_setlink(struct net_device *dev,
11720 struct nlmsghdr *nlh,
11723 struct i40e_netdev_priv *np = netdev_priv(dev);
11724 struct i40e_vsi *vsi = np->vsi;
11725 struct i40e_pf *pf = vsi->back;
11726 struct i40e_veb *veb = NULL;
11727 struct nlattr *attr, *br_spec;
11730 /* Only for PF VSI for now */
11731 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11732 return -EOPNOTSUPP;
11734 /* Find the HW bridge for PF VSI */
11735 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11736 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11740 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11742 nla_for_each_nested(attr, br_spec, rem) {
11745 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11748 mode = nla_get_u16(attr);
11749 if ((mode != BRIDGE_MODE_VEPA) &&
11750 (mode != BRIDGE_MODE_VEB))
11753 /* Insert a new HW bridge */
11755 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
11756 vsi->tc_config.enabled_tc);
11758 veb->bridge_mode = mode;
11759 i40e_config_bridge_mode(veb);
11761 /* No Bridge HW offload available */
11765 } else if (mode != veb->bridge_mode) {
11766 /* Existing HW bridge but different mode needs reset */
11767 veb->bridge_mode = mode;
11768 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11769 if (mode == BRIDGE_MODE_VEB)
11770 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
11772 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
11773 i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
11782 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11785 * @seq: RTNL message seq #
11786 * @dev: the netdev being configured
11787 * @filter_mask: unused
11788 * @nlflags: netlink flags passed in
11790 * Return the mode in which the hardware bridge is operating in
11793 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11794 struct net_device *dev,
11795 u32 __always_unused filter_mask,
11798 struct i40e_netdev_priv *np = netdev_priv(dev);
11799 struct i40e_vsi *vsi = np->vsi;
11800 struct i40e_pf *pf = vsi->back;
11801 struct i40e_veb *veb = NULL;
11804 /* Only for PF VSI for now */
11805 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
11806 return -EOPNOTSUPP;
11808 /* Find the HW bridge for the PF VSI */
11809 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
11810 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
11817 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
11818 0, 0, nlflags, filter_mask, NULL);
11822 * i40e_features_check - Validate encapsulated packet conforms to limits
11824 * @dev: This physical port's netdev
11825 * @features: Offload features that the stack believes apply
11827 static netdev_features_t i40e_features_check(struct sk_buff *skb,
11828 struct net_device *dev,
11829 netdev_features_t features)
11833 /* No point in doing any of this if neither checksum nor GSO are
11834 * being requested for this frame. We can rule out both by just
11835 * checking for CHECKSUM_PARTIAL
11837 if (skb->ip_summed != CHECKSUM_PARTIAL)
11840 /* We cannot support GSO if the MSS is going to be less than
11841 * 64 bytes. If it is then we need to drop support for GSO.
11843 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
11844 features &= ~NETIF_F_GSO_MASK;
11846 /* MACLEN can support at most 63 words */
11847 len = skb_network_header(skb) - skb->data;
11848 if (len & ~(63 * 2))
11851 /* IPLEN and EIPLEN can support at most 127 dwords */
11852 len = skb_transport_header(skb) - skb_network_header(skb);
11853 if (len & ~(127 * 4))
11856 if (skb->encapsulation) {
11857 /* L4TUNLEN can support 127 words */
11858 len = skb_inner_network_header(skb) - skb_transport_header(skb);
11859 if (len & ~(127 * 2))
11862 /* IPLEN can support at most 127 dwords */
11863 len = skb_inner_transport_header(skb) -
11864 skb_inner_network_header(skb);
11865 if (len & ~(127 * 4))
11869 /* No need to validate L4LEN as TCP is the only protocol with a
11870 * a flexible value and we support all possible values supported
11871 * by TCP, which is at most 15 dwords
11876 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11880 * i40e_xdp_setup - add/remove an XDP program
11881 * @vsi: VSI to changed
11882 * @prog: XDP program
11884 static int i40e_xdp_setup(struct i40e_vsi *vsi,
11885 struct bpf_prog *prog)
11887 int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
11888 struct i40e_pf *pf = vsi->back;
11889 struct bpf_prog *old_prog;
11893 /* Don't allow frames that span over multiple buffers */
11894 if (frame_size > vsi->rx_buf_len)
11897 if (!i40e_enabled_xdp_vsi(vsi) && !prog)
11900 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
11901 need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
11904 i40e_prep_for_reset(pf, true);
11906 old_prog = xchg(&vsi->xdp_prog, prog);
11909 i40e_reset_and_rebuild(pf, true, true);
11911 for (i = 0; i < vsi->num_queue_pairs; i++)
11912 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
11915 bpf_prog_put(old_prog);
11921 * i40e_xdp - implements ndo_bpf for i40e
11923 * @xdp: XDP command
11925 static int i40e_xdp(struct net_device *dev,
11926 struct netdev_bpf *xdp)
11928 struct i40e_netdev_priv *np = netdev_priv(dev);
11929 struct i40e_vsi *vsi = np->vsi;
11931 if (vsi->type != I40E_VSI_MAIN)
11934 switch (xdp->command) {
11935 case XDP_SETUP_PROG:
11936 return i40e_xdp_setup(vsi, xdp->prog);
11937 case XDP_QUERY_PROG:
11938 xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0;
11945 static const struct net_device_ops i40e_netdev_ops = {
11946 .ndo_open = i40e_open,
11947 .ndo_stop = i40e_close,
11948 .ndo_start_xmit = i40e_lan_xmit_frame,
11949 .ndo_get_stats64 = i40e_get_netdev_stats_struct,
11950 .ndo_set_rx_mode = i40e_set_rx_mode,
11951 .ndo_validate_addr = eth_validate_addr,
11952 .ndo_set_mac_address = i40e_set_mac,
11953 .ndo_change_mtu = i40e_change_mtu,
11954 .ndo_do_ioctl = i40e_ioctl,
11955 .ndo_tx_timeout = i40e_tx_timeout,
11956 .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid,
11957 .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid,
11958 #ifdef CONFIG_NET_POLL_CONTROLLER
11959 .ndo_poll_controller = i40e_netpoll,
11961 .ndo_setup_tc = __i40e_setup_tc,
11962 .ndo_set_features = i40e_set_features,
11963 .ndo_set_vf_mac = i40e_ndo_set_vf_mac,
11964 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
11965 .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
11966 .ndo_get_vf_config = i40e_ndo_get_vf_config,
11967 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
11968 .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
11969 .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
11970 .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
11971 .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
11972 .ndo_get_phys_port_id = i40e_get_phys_port_id,
11973 .ndo_fdb_add = i40e_ndo_fdb_add,
11974 .ndo_features_check = i40e_features_check,
11975 .ndo_bridge_getlink = i40e_ndo_bridge_getlink,
11976 .ndo_bridge_setlink = i40e_ndo_bridge_setlink,
11977 .ndo_bpf = i40e_xdp,
11978 .ndo_xdp_xmit = i40e_xdp_xmit,
11982 * i40e_config_netdev - Setup the netdev flags
11983 * @vsi: the VSI being configured
11985 * Returns 0 on success, negative value on failure
11987 static int i40e_config_netdev(struct i40e_vsi *vsi)
11989 struct i40e_pf *pf = vsi->back;
11990 struct i40e_hw *hw = &pf->hw;
11991 struct i40e_netdev_priv *np;
11992 struct net_device *netdev;
11993 u8 broadcast[ETH_ALEN];
11994 u8 mac_addr[ETH_ALEN];
11996 netdev_features_t hw_enc_features;
11997 netdev_features_t hw_features;
11999 etherdev_size = sizeof(struct i40e_netdev_priv);
12000 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
12004 vsi->netdev = netdev;
12005 np = netdev_priv(netdev);
12008 hw_enc_features = NETIF_F_SG |
12010 NETIF_F_IPV6_CSUM |
12012 NETIF_F_SOFT_FEATURES |
12017 NETIF_F_GSO_GRE_CSUM |
12018 NETIF_F_GSO_PARTIAL |
12019 NETIF_F_GSO_IPXIP4 |
12020 NETIF_F_GSO_IPXIP6 |
12021 NETIF_F_GSO_UDP_TUNNEL |
12022 NETIF_F_GSO_UDP_TUNNEL_CSUM |
12028 if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
12029 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
12031 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
12033 netdev->hw_enc_features |= hw_enc_features;
12035 /* record features VLANs can make use of */
12036 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
12038 if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
12039 netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
12041 hw_features = hw_enc_features |
12042 NETIF_F_HW_VLAN_CTAG_TX |
12043 NETIF_F_HW_VLAN_CTAG_RX;
12045 netdev->hw_features |= hw_features;
12047 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
12048 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
12050 if (vsi->type == I40E_VSI_MAIN) {
12051 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
12052 ether_addr_copy(mac_addr, hw->mac.perm_addr);
12053 /* The following steps are necessary for two reasons. First,
12054 * some older NVM configurations load a default MAC-VLAN
12055 * filter that will accept any tagged packet, and we want to
12056 * replace this with a normal filter. Additionally, it is
12057 * possible our MAC address was provided by the platform using
12058 * Open Firmware or similar.
12060 * Thus, we need to remove the default filter and install one
12061 * specific to the MAC address.
12063 i40e_rm_default_mac_filter(vsi, mac_addr);
12064 spin_lock_bh(&vsi->mac_filter_hash_lock);
12065 i40e_add_mac_filter(vsi, mac_addr);
12066 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12068 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
12069 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
12070 * the end, which is 4 bytes long, so force truncation of the
12071 * original name by IFNAMSIZ - 4
12073 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
12075 pf->vsi[pf->lan_vsi]->netdev->name);
12076 eth_random_addr(mac_addr);
12078 spin_lock_bh(&vsi->mac_filter_hash_lock);
12079 i40e_add_mac_filter(vsi, mac_addr);
12080 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12083 /* Add the broadcast filter so that we initially will receive
12084 * broadcast packets. Note that when a new VLAN is first added the
12085 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
12086 * specific filters as part of transitioning into "vlan" operation.
12087 * When more VLANs are added, the driver will copy each existing MAC
12088 * filter and add it for the new VLAN.
12090 * Broadcast filters are handled specially by
12091 * i40e_sync_filters_subtask, as the driver must to set the broadcast
12092 * promiscuous bit instead of adding this directly as a MAC/VLAN
12093 * filter. The subtask will update the correct broadcast promiscuous
12094 * bits as VLANs become active or inactive.
12096 eth_broadcast_addr(broadcast);
12097 spin_lock_bh(&vsi->mac_filter_hash_lock);
12098 i40e_add_mac_filter(vsi, broadcast);
12099 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12101 ether_addr_copy(netdev->dev_addr, mac_addr);
12102 ether_addr_copy(netdev->perm_addr, mac_addr);
12104 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
12105 netdev->neigh_priv_len = sizeof(u32) * 4;
12107 netdev->priv_flags |= IFF_UNICAST_FLT;
12108 netdev->priv_flags |= IFF_SUPP_NOFCS;
12109 /* Setup netdev TC information */
12110 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
12112 netdev->netdev_ops = &i40e_netdev_ops;
12113 netdev->watchdog_timeo = 5 * HZ;
12114 i40e_set_ethtool_ops(netdev);
12116 /* MTU range: 68 - 9706 */
12117 netdev->min_mtu = ETH_MIN_MTU;
12118 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD;
12124 * i40e_vsi_delete - Delete a VSI from the switch
12125 * @vsi: the VSI being removed
12127 * Returns 0 on success, negative value on failure
12129 static void i40e_vsi_delete(struct i40e_vsi *vsi)
12131 /* remove default VSI is not allowed */
12132 if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
12135 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
12139 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
12140 * @vsi: the VSI being queried
12142 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
12144 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
12146 struct i40e_veb *veb;
12147 struct i40e_pf *pf = vsi->back;
12149 /* Uplink is not a bridge so default to VEB */
12150 if (vsi->veb_idx == I40E_NO_VEB)
12153 veb = pf->veb[vsi->veb_idx];
12155 dev_info(&pf->pdev->dev,
12156 "There is no veb associated with the bridge\n");
12160 /* Uplink is a bridge in VEPA mode */
12161 if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
12164 /* Uplink is a bridge in VEB mode */
12168 /* VEPA is now default bridge, so return 0 */
12173 * i40e_add_vsi - Add a VSI to the switch
12174 * @vsi: the VSI being configured
12176 * This initializes a VSI context depending on the VSI type to be added and
12177 * passes it down to the add_vsi aq command.
12179 static int i40e_add_vsi(struct i40e_vsi *vsi)
12182 struct i40e_pf *pf = vsi->back;
12183 struct i40e_hw *hw = &pf->hw;
12184 struct i40e_vsi_context ctxt;
12185 struct i40e_mac_filter *f;
12186 struct hlist_node *h;
12189 u8 enabled_tc = 0x1; /* TC0 enabled */
12192 memset(&ctxt, 0, sizeof(ctxt));
12193 switch (vsi->type) {
12194 case I40E_VSI_MAIN:
12195 /* The PF's main VSI is already setup as part of the
12196 * device initialization, so we'll not bother with
12197 * the add_vsi call, but we will retrieve the current
12200 ctxt.seid = pf->main_vsi_seid;
12201 ctxt.pf_num = pf->hw.pf_id;
12203 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
12204 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12206 dev_info(&pf->pdev->dev,
12207 "couldn't get PF vsi config, err %s aq_err %s\n",
12208 i40e_stat_str(&pf->hw, ret),
12209 i40e_aq_str(&pf->hw,
12210 pf->hw.aq.asq_last_status));
12213 vsi->info = ctxt.info;
12214 vsi->info.valid_sections = 0;
12216 vsi->seid = ctxt.seid;
12217 vsi->id = ctxt.vsi_number;
12219 enabled_tc = i40e_pf_get_tc_map(pf);
12221 /* Source pruning is enabled by default, so the flag is
12222 * negative logic - if it's set, we need to fiddle with
12223 * the VSI to disable source pruning.
12225 if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
12226 memset(&ctxt, 0, sizeof(ctxt));
12227 ctxt.seid = pf->main_vsi_seid;
12228 ctxt.pf_num = pf->hw.pf_id;
12230 ctxt.info.valid_sections |=
12231 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12232 ctxt.info.switch_id =
12233 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
12234 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12236 dev_info(&pf->pdev->dev,
12237 "update vsi failed, err %s aq_err %s\n",
12238 i40e_stat_str(&pf->hw, ret),
12239 i40e_aq_str(&pf->hw,
12240 pf->hw.aq.asq_last_status));
12246 /* MFP mode setup queue map and update VSI */
12247 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
12248 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
12249 memset(&ctxt, 0, sizeof(ctxt));
12250 ctxt.seid = pf->main_vsi_seid;
12251 ctxt.pf_num = pf->hw.pf_id;
12253 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
12254 ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
12256 dev_info(&pf->pdev->dev,
12257 "update vsi failed, err %s aq_err %s\n",
12258 i40e_stat_str(&pf->hw, ret),
12259 i40e_aq_str(&pf->hw,
12260 pf->hw.aq.asq_last_status));
12264 /* update the local VSI info queue map */
12265 i40e_vsi_update_queue_map(vsi, &ctxt);
12266 vsi->info.valid_sections = 0;
12268 /* Default/Main VSI is only enabled for TC0
12269 * reconfigure it to enable all TCs that are
12270 * available on the port in SFP mode.
12271 * For MFP case the iSCSI PF would use this
12272 * flow to enable LAN+iSCSI TC.
12274 ret = i40e_vsi_config_tc(vsi, enabled_tc);
12276 /* Single TC condition is not fatal,
12277 * message and continue
12279 dev_info(&pf->pdev->dev,
12280 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12282 i40e_stat_str(&pf->hw, ret),
12283 i40e_aq_str(&pf->hw,
12284 pf->hw.aq.asq_last_status));
12289 case I40E_VSI_FDIR:
12290 ctxt.pf_num = hw->pf_id;
12292 ctxt.uplink_seid = vsi->uplink_seid;
12293 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12294 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
12295 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
12296 (i40e_is_vsi_uplink_mode_veb(vsi))) {
12297 ctxt.info.valid_sections |=
12298 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12299 ctxt.info.switch_id =
12300 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12302 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12305 case I40E_VSI_VMDQ2:
12306 ctxt.pf_num = hw->pf_id;
12308 ctxt.uplink_seid = vsi->uplink_seid;
12309 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12310 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
12312 /* This VSI is connected to VEB so the switch_id
12313 * should be set to zero by default.
12315 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12316 ctxt.info.valid_sections |=
12317 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12318 ctxt.info.switch_id =
12319 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12322 /* Setup the VSI tx/rx queue map for TC0 only for now */
12323 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12326 case I40E_VSI_SRIOV:
12327 ctxt.pf_num = hw->pf_id;
12328 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
12329 ctxt.uplink_seid = vsi->uplink_seid;
12330 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
12331 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
12333 /* This VSI is connected to VEB so the switch_id
12334 * should be set to zero by default.
12336 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
12337 ctxt.info.valid_sections |=
12338 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
12339 ctxt.info.switch_id =
12340 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
12343 if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
12344 ctxt.info.valid_sections |=
12345 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
12346 ctxt.info.queueing_opt_flags |=
12347 (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
12348 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
12351 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
12352 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
12353 if (pf->vf[vsi->vf_id].spoofchk) {
12354 ctxt.info.valid_sections |=
12355 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
12356 ctxt.info.sec_flags |=
12357 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
12358 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
12360 /* Setup the VSI tx/rx queue map for TC0 only for now */
12361 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
12364 case I40E_VSI_IWARP:
12365 /* send down message to iWARP */
12372 if (vsi->type != I40E_VSI_MAIN) {
12373 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
12375 dev_info(&vsi->back->pdev->dev,
12376 "add vsi failed, err %s aq_err %s\n",
12377 i40e_stat_str(&pf->hw, ret),
12378 i40e_aq_str(&pf->hw,
12379 pf->hw.aq.asq_last_status));
12383 vsi->info = ctxt.info;
12384 vsi->info.valid_sections = 0;
12385 vsi->seid = ctxt.seid;
12386 vsi->id = ctxt.vsi_number;
12389 vsi->active_filters = 0;
12390 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
12391 spin_lock_bh(&vsi->mac_filter_hash_lock);
12392 /* If macvlan filters already exist, force them to get loaded */
12393 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
12394 f->state = I40E_FILTER_NEW;
12397 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12400 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
12401 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
12404 /* Update VSI BW information */
12405 ret = i40e_vsi_get_bw_info(vsi);
12407 dev_info(&pf->pdev->dev,
12408 "couldn't get vsi bw info, err %s aq_err %s\n",
12409 i40e_stat_str(&pf->hw, ret),
12410 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
12411 /* VSI is already added so not tearing that up */
12420 * i40e_vsi_release - Delete a VSI and free its resources
12421 * @vsi: the VSI being removed
12423 * Returns 0 on success or < 0 on error
12425 int i40e_vsi_release(struct i40e_vsi *vsi)
12427 struct i40e_mac_filter *f;
12428 struct hlist_node *h;
12429 struct i40e_veb *veb = NULL;
12430 struct i40e_pf *pf;
12436 /* release of a VEB-owner or last VSI is not allowed */
12437 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
12438 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
12439 vsi->seid, vsi->uplink_seid);
12442 if (vsi == pf->vsi[pf->lan_vsi] &&
12443 !test_bit(__I40E_DOWN, pf->state)) {
12444 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
12448 uplink_seid = vsi->uplink_seid;
12449 if (vsi->type != I40E_VSI_SRIOV) {
12450 if (vsi->netdev_registered) {
12451 vsi->netdev_registered = false;
12453 /* results in a call to i40e_close() */
12454 unregister_netdev(vsi->netdev);
12457 i40e_vsi_close(vsi);
12459 i40e_vsi_disable_irq(vsi);
12462 spin_lock_bh(&vsi->mac_filter_hash_lock);
12464 /* clear the sync flag on all filters */
12466 __dev_uc_unsync(vsi->netdev, NULL);
12467 __dev_mc_unsync(vsi->netdev, NULL);
12470 /* make sure any remaining filters are marked for deletion */
12471 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
12472 __i40e_del_filter(vsi, f);
12474 spin_unlock_bh(&vsi->mac_filter_hash_lock);
12476 i40e_sync_vsi_filters(vsi);
12478 i40e_vsi_delete(vsi);
12479 i40e_vsi_free_q_vectors(vsi);
12481 free_netdev(vsi->netdev);
12482 vsi->netdev = NULL;
12484 i40e_vsi_clear_rings(vsi);
12485 i40e_vsi_clear(vsi);
12487 /* If this was the last thing on the VEB, except for the
12488 * controlling VSI, remove the VEB, which puts the controlling
12489 * VSI onto the next level down in the switch.
12491 * Well, okay, there's one more exception here: don't remove
12492 * the orphan VEBs yet. We'll wait for an explicit remove request
12493 * from up the network stack.
12495 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
12497 pf->vsi[i]->uplink_seid == uplink_seid &&
12498 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12499 n++; /* count the VSIs */
12502 for (i = 0; i < I40E_MAX_VEB; i++) {
12505 if (pf->veb[i]->uplink_seid == uplink_seid)
12506 n++; /* count the VEBs */
12507 if (pf->veb[i]->seid == uplink_seid)
12510 if (n == 0 && veb && veb->uplink_seid != 0)
12511 i40e_veb_release(veb);
12517 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12518 * @vsi: ptr to the VSI
12520 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12521 * corresponding SW VSI structure and initializes num_queue_pairs for the
12522 * newly allocated VSI.
12524 * Returns 0 on success or negative on failure
12526 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
12529 struct i40e_pf *pf = vsi->back;
12531 if (vsi->q_vectors[0]) {
12532 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
12537 if (vsi->base_vector) {
12538 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
12539 vsi->seid, vsi->base_vector);
12543 ret = i40e_vsi_alloc_q_vectors(vsi);
12545 dev_info(&pf->pdev->dev,
12546 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12547 vsi->num_q_vectors, vsi->seid, ret);
12548 vsi->num_q_vectors = 0;
12549 goto vector_setup_out;
12552 /* In Legacy mode, we do not have to get any other vector since we
12553 * piggyback on the misc/ICR0 for queue interrupts.
12555 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
12557 if (vsi->num_q_vectors)
12558 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
12559 vsi->num_q_vectors, vsi->idx);
12560 if (vsi->base_vector < 0) {
12561 dev_info(&pf->pdev->dev,
12562 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12563 vsi->num_q_vectors, vsi->seid, vsi->base_vector);
12564 i40e_vsi_free_q_vectors(vsi);
12566 goto vector_setup_out;
12574 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12575 * @vsi: pointer to the vsi.
12577 * This re-allocates a vsi's queue resources.
12579 * Returns pointer to the successfully allocated and configured VSI sw struct
12580 * on success, otherwise returns NULL on failure.
12582 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
12584 u16 alloc_queue_pairs;
12585 struct i40e_pf *pf;
12594 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
12595 i40e_vsi_clear_rings(vsi);
12597 i40e_vsi_free_arrays(vsi, false);
12598 i40e_set_num_rings_in_vsi(vsi);
12599 ret = i40e_vsi_alloc_arrays(vsi, false);
12603 alloc_queue_pairs = vsi->alloc_queue_pairs *
12604 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12606 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12608 dev_info(&pf->pdev->dev,
12609 "failed to get tracking for %d queues for VSI %d err %d\n",
12610 alloc_queue_pairs, vsi->seid, ret);
12613 vsi->base_queue = ret;
12615 /* Update the FW view of the VSI. Force a reset of TC and queue
12616 * layout configurations.
12618 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
12619 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
12620 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
12621 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
12622 if (vsi->type == I40E_VSI_MAIN)
12623 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
12625 /* assign it some queues */
12626 ret = i40e_alloc_rings(vsi);
12630 /* map all of the rings to the q_vectors */
12631 i40e_vsi_map_rings_to_vectors(vsi);
12635 i40e_vsi_free_q_vectors(vsi);
12636 if (vsi->netdev_registered) {
12637 vsi->netdev_registered = false;
12638 unregister_netdev(vsi->netdev);
12639 free_netdev(vsi->netdev);
12640 vsi->netdev = NULL;
12642 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12644 i40e_vsi_clear(vsi);
12649 * i40e_vsi_setup - Set up a VSI by a given type
12650 * @pf: board private structure
12652 * @uplink_seid: the switch element to link to
12653 * @param1: usage depends upon VSI type. For VF types, indicates VF id
12655 * This allocates the sw VSI structure and its queue resources, then add a VSI
12656 * to the identified VEB.
12658 * Returns pointer to the successfully allocated and configure VSI sw struct on
12659 * success, otherwise returns NULL on failure.
12661 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
12662 u16 uplink_seid, u32 param1)
12664 struct i40e_vsi *vsi = NULL;
12665 struct i40e_veb *veb = NULL;
12666 u16 alloc_queue_pairs;
12670 /* The requested uplink_seid must be either
12671 * - the PF's port seid
12672 * no VEB is needed because this is the PF
12673 * or this is a Flow Director special case VSI
12674 * - seid of an existing VEB
12675 * - seid of a VSI that owns an existing VEB
12676 * - seid of a VSI that doesn't own a VEB
12677 * a new VEB is created and the VSI becomes the owner
12678 * - seid of the PF VSI, which is what creates the first VEB
12679 * this is a special case of the previous
12681 * Find which uplink_seid we were given and create a new VEB if needed
12683 for (i = 0; i < I40E_MAX_VEB; i++) {
12684 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
12690 if (!veb && uplink_seid != pf->mac_seid) {
12692 for (i = 0; i < pf->num_alloc_vsi; i++) {
12693 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
12699 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
12704 if (vsi->uplink_seid == pf->mac_seid)
12705 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
12706 vsi->tc_config.enabled_tc);
12707 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
12708 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
12709 vsi->tc_config.enabled_tc);
12711 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
12712 dev_info(&vsi->back->pdev->dev,
12713 "New VSI creation error, uplink seid of LAN VSI expected.\n");
12716 /* We come up by default in VEPA mode if SRIOV is not
12717 * already enabled, in which case we can't force VEPA
12720 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
12721 veb->bridge_mode = BRIDGE_MODE_VEPA;
12722 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
12724 i40e_config_bridge_mode(veb);
12726 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
12727 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
12731 dev_info(&pf->pdev->dev, "couldn't add VEB\n");
12735 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
12736 uplink_seid = veb->seid;
12739 /* get vsi sw struct */
12740 v_idx = i40e_vsi_mem_alloc(pf, type);
12743 vsi = pf->vsi[v_idx];
12747 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
12749 if (type == I40E_VSI_MAIN)
12750 pf->lan_vsi = v_idx;
12751 else if (type == I40E_VSI_SRIOV)
12752 vsi->vf_id = param1;
12753 /* assign it some queues */
12754 alloc_queue_pairs = vsi->alloc_queue_pairs *
12755 (i40e_enabled_xdp_vsi(vsi) ? 2 : 1);
12757 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx);
12759 dev_info(&pf->pdev->dev,
12760 "failed to get tracking for %d queues for VSI %d err=%d\n",
12761 alloc_queue_pairs, vsi->seid, ret);
12764 vsi->base_queue = ret;
12766 /* get a VSI from the hardware */
12767 vsi->uplink_seid = uplink_seid;
12768 ret = i40e_add_vsi(vsi);
12772 switch (vsi->type) {
12773 /* setup the netdev if needed */
12774 case I40E_VSI_MAIN:
12775 case I40E_VSI_VMDQ2:
12776 ret = i40e_config_netdev(vsi);
12779 ret = register_netdev(vsi->netdev);
12782 vsi->netdev_registered = true;
12783 netif_carrier_off(vsi->netdev);
12784 #ifdef CONFIG_I40E_DCB
12785 /* Setup DCB netlink interface */
12786 i40e_dcbnl_setup(vsi);
12787 #endif /* CONFIG_I40E_DCB */
12790 case I40E_VSI_FDIR:
12791 /* set up vectors and rings if needed */
12792 ret = i40e_vsi_setup_vectors(vsi);
12796 ret = i40e_alloc_rings(vsi);
12800 /* map all of the rings to the q_vectors */
12801 i40e_vsi_map_rings_to_vectors(vsi);
12803 i40e_vsi_reset_stats(vsi);
12807 /* no netdev or rings for the other VSI types */
12811 if ((pf->hw_features & I40E_HW_RSS_AQ_CAPABLE) &&
12812 (vsi->type == I40E_VSI_VMDQ2)) {
12813 ret = i40e_vsi_config_rss(vsi);
12818 i40e_vsi_free_q_vectors(vsi);
12820 if (vsi->netdev_registered) {
12821 vsi->netdev_registered = false;
12822 unregister_netdev(vsi->netdev);
12823 free_netdev(vsi->netdev);
12824 vsi->netdev = NULL;
12827 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
12829 i40e_vsi_clear(vsi);
12835 * i40e_veb_get_bw_info - Query VEB BW information
12836 * @veb: the veb to query
12838 * Query the Tx scheduler BW configuration data for given VEB
12840 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
12842 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
12843 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
12844 struct i40e_pf *pf = veb->pf;
12845 struct i40e_hw *hw = &pf->hw;
12850 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
12853 dev_info(&pf->pdev->dev,
12854 "query veb bw config failed, err %s aq_err %s\n",
12855 i40e_stat_str(&pf->hw, ret),
12856 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
12860 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
12863 dev_info(&pf->pdev->dev,
12864 "query veb bw ets config failed, err %s aq_err %s\n",
12865 i40e_stat_str(&pf->hw, ret),
12866 i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
12870 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
12871 veb->bw_max_quanta = ets_data.tc_bw_max;
12872 veb->is_abs_credits = bw_data.absolute_credits_enable;
12873 veb->enabled_tc = ets_data.tc_valid_bits;
12874 tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
12875 (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
12876 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
12877 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
12878 veb->bw_tc_limit_credits[i] =
12879 le16_to_cpu(bw_data.tc_bw_limits[i]);
12880 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
12888 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
12889 * @pf: board private structure
12891 * On error: returns error code (negative)
12892 * On success: returns vsi index in PF (positive)
12894 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
12897 struct i40e_veb *veb;
12900 /* Need to protect the allocation of switch elements at the PF level */
12901 mutex_lock(&pf->switch_mutex);
12903 /* VEB list may be fragmented if VEB creation/destruction has
12904 * been happening. We can afford to do a quick scan to look
12905 * for any free slots in the list.
12907 * find next empty veb slot, looping back around if necessary
12910 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
12912 if (i >= I40E_MAX_VEB) {
12914 goto err_alloc_veb; /* out of VEB slots! */
12917 veb = kzalloc(sizeof(*veb), GFP_KERNEL);
12920 goto err_alloc_veb;
12924 veb->enabled_tc = 1;
12929 mutex_unlock(&pf->switch_mutex);
12934 * i40e_switch_branch_release - Delete a branch of the switch tree
12935 * @branch: where to start deleting
12937 * This uses recursion to find the tips of the branch to be
12938 * removed, deleting until we get back to and can delete this VEB.
12940 static void i40e_switch_branch_release(struct i40e_veb *branch)
12942 struct i40e_pf *pf = branch->pf;
12943 u16 branch_seid = branch->seid;
12944 u16 veb_idx = branch->idx;
12947 /* release any VEBs on this VEB - RECURSION */
12948 for (i = 0; i < I40E_MAX_VEB; i++) {
12951 if (pf->veb[i]->uplink_seid == branch->seid)
12952 i40e_switch_branch_release(pf->veb[i]);
12955 /* Release the VSIs on this VEB, but not the owner VSI.
12957 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
12958 * the VEB itself, so don't use (*branch) after this loop.
12960 for (i = 0; i < pf->num_alloc_vsi; i++) {
12963 if (pf->vsi[i]->uplink_seid == branch_seid &&
12964 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
12965 i40e_vsi_release(pf->vsi[i]);
12969 /* There's one corner case where the VEB might not have been
12970 * removed, so double check it here and remove it if needed.
12971 * This case happens if the veb was created from the debugfs
12972 * commands and no VSIs were added to it.
12974 if (pf->veb[veb_idx])
12975 i40e_veb_release(pf->veb[veb_idx]);
12979 * i40e_veb_clear - remove veb struct
12980 * @veb: the veb to remove
12982 static void i40e_veb_clear(struct i40e_veb *veb)
12988 struct i40e_pf *pf = veb->pf;
12990 mutex_lock(&pf->switch_mutex);
12991 if (pf->veb[veb->idx] == veb)
12992 pf->veb[veb->idx] = NULL;
12993 mutex_unlock(&pf->switch_mutex);
13000 * i40e_veb_release - Delete a VEB and free its resources
13001 * @veb: the VEB being removed
13003 void i40e_veb_release(struct i40e_veb *veb)
13005 struct i40e_vsi *vsi = NULL;
13006 struct i40e_pf *pf;
13011 /* find the remaining VSI and check for extras */
13012 for (i = 0; i < pf->num_alloc_vsi; i++) {
13013 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
13019 dev_info(&pf->pdev->dev,
13020 "can't remove VEB %d with %d VSIs left\n",
13025 /* move the remaining VSI to uplink veb */
13026 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
13027 if (veb->uplink_seid) {
13028 vsi->uplink_seid = veb->uplink_seid;
13029 if (veb->uplink_seid == pf->mac_seid)
13030 vsi->veb_idx = I40E_NO_VEB;
13032 vsi->veb_idx = veb->veb_idx;
13035 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
13036 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
13039 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13040 i40e_veb_clear(veb);
13044 * i40e_add_veb - create the VEB in the switch
13045 * @veb: the VEB to be instantiated
13046 * @vsi: the controlling VSI
13048 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
13050 struct i40e_pf *pf = veb->pf;
13051 bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
13054 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
13055 veb->enabled_tc, false,
13056 &veb->seid, enable_stats, NULL);
13058 /* get a VEB from the hardware */
13060 dev_info(&pf->pdev->dev,
13061 "couldn't add VEB, err %s aq_err %s\n",
13062 i40e_stat_str(&pf->hw, ret),
13063 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13067 /* get statistics counter */
13068 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
13069 &veb->stats_idx, NULL, NULL, NULL);
13071 dev_info(&pf->pdev->dev,
13072 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13073 i40e_stat_str(&pf->hw, ret),
13074 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13077 ret = i40e_veb_get_bw_info(veb);
13079 dev_info(&pf->pdev->dev,
13080 "couldn't get VEB bw info, err %s aq_err %s\n",
13081 i40e_stat_str(&pf->hw, ret),
13082 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13083 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
13087 vsi->uplink_seid = veb->seid;
13088 vsi->veb_idx = veb->idx;
13089 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
13095 * i40e_veb_setup - Set up a VEB
13096 * @pf: board private structure
13097 * @flags: VEB setup flags
13098 * @uplink_seid: the switch element to link to
13099 * @vsi_seid: the initial VSI seid
13100 * @enabled_tc: Enabled TC bit-map
13102 * This allocates the sw VEB structure and links it into the switch
13103 * It is possible and legal for this to be a duplicate of an already
13104 * existing VEB. It is also possible for both uplink and vsi seids
13105 * to be zero, in order to create a floating VEB.
13107 * Returns pointer to the successfully allocated VEB sw struct on
13108 * success, otherwise returns NULL on failure.
13110 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
13111 u16 uplink_seid, u16 vsi_seid,
13114 struct i40e_veb *veb, *uplink_veb = NULL;
13115 int vsi_idx, veb_idx;
13118 /* if one seid is 0, the other must be 0 to create a floating relay */
13119 if ((uplink_seid == 0 || vsi_seid == 0) &&
13120 (uplink_seid + vsi_seid != 0)) {
13121 dev_info(&pf->pdev->dev,
13122 "one, not both seid's are 0: uplink=%d vsi=%d\n",
13123 uplink_seid, vsi_seid);
13127 /* make sure there is such a vsi and uplink */
13128 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
13129 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
13131 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
13132 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
13137 if (uplink_seid && uplink_seid != pf->mac_seid) {
13138 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
13139 if (pf->veb[veb_idx] &&
13140 pf->veb[veb_idx]->seid == uplink_seid) {
13141 uplink_veb = pf->veb[veb_idx];
13146 dev_info(&pf->pdev->dev,
13147 "uplink seid %d not found\n", uplink_seid);
13152 /* get veb sw struct */
13153 veb_idx = i40e_veb_mem_alloc(pf);
13156 veb = pf->veb[veb_idx];
13157 veb->flags = flags;
13158 veb->uplink_seid = uplink_seid;
13159 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
13160 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
13162 /* create the VEB in the switch */
13163 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
13166 if (vsi_idx == pf->lan_vsi)
13167 pf->lan_veb = veb->idx;
13172 i40e_veb_clear(veb);
13178 * i40e_setup_pf_switch_element - set PF vars based on switch type
13179 * @pf: board private structure
13180 * @ele: element we are building info from
13181 * @num_reported: total number of elements
13182 * @printconfig: should we print the contents
13184 * helper function to assist in extracting a few useful SEID values.
13186 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
13187 struct i40e_aqc_switch_config_element_resp *ele,
13188 u16 num_reported, bool printconfig)
13190 u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
13191 u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
13192 u8 element_type = ele->element_type;
13193 u16 seid = le16_to_cpu(ele->seid);
13196 dev_info(&pf->pdev->dev,
13197 "type=%d seid=%d uplink=%d downlink=%d\n",
13198 element_type, seid, uplink_seid, downlink_seid);
13200 switch (element_type) {
13201 case I40E_SWITCH_ELEMENT_TYPE_MAC:
13202 pf->mac_seid = seid;
13204 case I40E_SWITCH_ELEMENT_TYPE_VEB:
13206 if (uplink_seid != pf->mac_seid)
13208 if (pf->lan_veb == I40E_NO_VEB) {
13211 /* find existing or else empty VEB */
13212 for (v = 0; v < I40E_MAX_VEB; v++) {
13213 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
13218 if (pf->lan_veb == I40E_NO_VEB) {
13219 v = i40e_veb_mem_alloc(pf);
13226 pf->veb[pf->lan_veb]->seid = seid;
13227 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
13228 pf->veb[pf->lan_veb]->pf = pf;
13229 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
13231 case I40E_SWITCH_ELEMENT_TYPE_VSI:
13232 if (num_reported != 1)
13234 /* This is immediately after a reset so we can assume this is
13237 pf->mac_seid = uplink_seid;
13238 pf->pf_seid = downlink_seid;
13239 pf->main_vsi_seid = seid;
13241 dev_info(&pf->pdev->dev,
13242 "pf_seid=%d main_vsi_seid=%d\n",
13243 pf->pf_seid, pf->main_vsi_seid);
13245 case I40E_SWITCH_ELEMENT_TYPE_PF:
13246 case I40E_SWITCH_ELEMENT_TYPE_VF:
13247 case I40E_SWITCH_ELEMENT_TYPE_EMP:
13248 case I40E_SWITCH_ELEMENT_TYPE_BMC:
13249 case I40E_SWITCH_ELEMENT_TYPE_PE:
13250 case I40E_SWITCH_ELEMENT_TYPE_PA:
13251 /* ignore these for now */
13254 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
13255 element_type, seid);
13261 * i40e_fetch_switch_configuration - Get switch config from firmware
13262 * @pf: board private structure
13263 * @printconfig: should we print the contents
13265 * Get the current switch configuration from the device and
13266 * extract a few useful SEID values.
13268 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
13270 struct i40e_aqc_get_switch_config_resp *sw_config;
13276 aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
13280 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
13282 u16 num_reported, num_total;
13284 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
13288 dev_info(&pf->pdev->dev,
13289 "get switch config failed err %s aq_err %s\n",
13290 i40e_stat_str(&pf->hw, ret),
13291 i40e_aq_str(&pf->hw,
13292 pf->hw.aq.asq_last_status));
13297 num_reported = le16_to_cpu(sw_config->header.num_reported);
13298 num_total = le16_to_cpu(sw_config->header.num_total);
13301 dev_info(&pf->pdev->dev,
13302 "header: %d reported %d total\n",
13303 num_reported, num_total);
13305 for (i = 0; i < num_reported; i++) {
13306 struct i40e_aqc_switch_config_element_resp *ele =
13307 &sw_config->element[i];
13309 i40e_setup_pf_switch_element(pf, ele, num_reported,
13312 } while (next_seid != 0);
13319 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13320 * @pf: board private structure
13321 * @reinit: if the Main VSI needs to re-initialized.
13323 * Returns 0 on success, negative value on failure
13325 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
13330 /* find out what's out there already */
13331 ret = i40e_fetch_switch_configuration(pf, false);
13333 dev_info(&pf->pdev->dev,
13334 "couldn't fetch switch config, err %s aq_err %s\n",
13335 i40e_stat_str(&pf->hw, ret),
13336 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13339 i40e_pf_reset_stats(pf);
13341 /* set the switch config bit for the whole device to
13342 * support limited promisc or true promisc
13343 * when user requests promisc. The default is limited
13347 if ((pf->hw.pf_id == 0) &&
13348 !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
13349 flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13350 pf->last_sw_conf_flags = flags;
13353 if (pf->hw.pf_id == 0) {
13356 valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
13357 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0,
13359 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
13360 dev_info(&pf->pdev->dev,
13361 "couldn't set switch config bits, err %s aq_err %s\n",
13362 i40e_stat_str(&pf->hw, ret),
13363 i40e_aq_str(&pf->hw,
13364 pf->hw.aq.asq_last_status));
13365 /* not a fatal problem, just keep going */
13367 pf->last_sw_conf_valid_flags = valid_flags;
13370 /* first time setup */
13371 if (pf->lan_vsi == I40E_NO_VSI || reinit) {
13372 struct i40e_vsi *vsi = NULL;
13375 /* Set up the PF VSI associated with the PF's main VSI
13376 * that is already in the HW switch
13378 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
13379 uplink_seid = pf->veb[pf->lan_veb]->seid;
13381 uplink_seid = pf->mac_seid;
13382 if (pf->lan_vsi == I40E_NO_VSI)
13383 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
13385 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
13387 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
13388 i40e_cloud_filter_exit(pf);
13389 i40e_fdir_teardown(pf);
13393 /* force a reset of TC and queue layout configurations */
13394 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
13396 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
13397 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
13398 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
13400 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
13402 i40e_fdir_sb_setup(pf);
13404 /* Setup static PF queue filter control settings */
13405 ret = i40e_setup_pf_filter_control(pf);
13407 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
13409 /* Failure here should not stop continuing other steps */
13412 /* enable RSS in the HW, even for only one queue, as the stack can use
13415 if ((pf->flags & I40E_FLAG_RSS_ENABLED))
13416 i40e_pf_config_rss(pf);
13418 /* fill in link information and enable LSE reporting */
13419 i40e_link_event(pf);
13421 /* Initialize user-specific link properties */
13422 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
13423 I40E_AQ_AN_COMPLETED) ? true : false);
13427 /* repopulate tunnel port filters */
13428 i40e_sync_udp_filters(pf);
13434 * i40e_determine_queue_usage - Work out queue distribution
13435 * @pf: board private structure
13437 static void i40e_determine_queue_usage(struct i40e_pf *pf)
13442 pf->num_lan_qps = 0;
13444 /* Find the max queues to be put into basic use. We'll always be
13445 * using TC0, whether or not DCB is running, and TC0 will get the
13448 queues_left = pf->hw.func_caps.num_tx_qp;
13450 if ((queues_left == 1) ||
13451 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
13452 /* one qp for PF, no queues for anything else */
13454 pf->alloc_rss_size = pf->num_lan_qps = 1;
13456 /* make sure all the fancies are disabled */
13457 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13458 I40E_FLAG_IWARP_ENABLED |
13459 I40E_FLAG_FD_SB_ENABLED |
13460 I40E_FLAG_FD_ATR_ENABLED |
13461 I40E_FLAG_DCB_CAPABLE |
13462 I40E_FLAG_DCB_ENABLED |
13463 I40E_FLAG_SRIOV_ENABLED |
13464 I40E_FLAG_VMDQ_ENABLED);
13465 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13466 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
13467 I40E_FLAG_FD_SB_ENABLED |
13468 I40E_FLAG_FD_ATR_ENABLED |
13469 I40E_FLAG_DCB_CAPABLE))) {
13470 /* one qp for PF */
13471 pf->alloc_rss_size = pf->num_lan_qps = 1;
13472 queues_left -= pf->num_lan_qps;
13474 pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
13475 I40E_FLAG_IWARP_ENABLED |
13476 I40E_FLAG_FD_SB_ENABLED |
13477 I40E_FLAG_FD_ATR_ENABLED |
13478 I40E_FLAG_DCB_ENABLED |
13479 I40E_FLAG_VMDQ_ENABLED);
13480 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13482 /* Not enough queues for all TCs */
13483 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
13484 (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
13485 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
13486 I40E_FLAG_DCB_ENABLED);
13487 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
13490 /* limit lan qps to the smaller of qps, cpus or msix */
13491 q_max = max_t(int, pf->rss_size_max, num_online_cpus());
13492 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp);
13493 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors);
13494 pf->num_lan_qps = q_max;
13496 queues_left -= pf->num_lan_qps;
13499 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13500 if (queues_left > 1) {
13501 queues_left -= 1; /* save 1 queue for FD */
13503 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
13504 pf->flags |= I40E_FLAG_FD_SB_INACTIVE;
13505 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13509 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13510 pf->num_vf_qps && pf->num_req_vfs && queues_left) {
13511 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
13512 (queues_left / pf->num_vf_qps));
13513 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
13516 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
13517 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
13518 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
13519 (queues_left / pf->num_vmdq_qps));
13520 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
13523 pf->queues_left = queues_left;
13524 dev_dbg(&pf->pdev->dev,
13525 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13526 pf->hw.func_caps.num_tx_qp,
13527 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
13528 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
13529 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
13534 * i40e_setup_pf_filter_control - Setup PF static filter control
13535 * @pf: PF to be setup
13537 * i40e_setup_pf_filter_control sets up a PF's initial filter control
13538 * settings. If PE/FCoE are enabled then it will also set the per PF
13539 * based filter sizes required for them. It also enables Flow director,
13540 * ethertype and macvlan type filter settings for the pf.
13542 * Returns 0 on success, negative on failure
13544 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
13546 struct i40e_filter_control_settings *settings = &pf->filter_settings;
13548 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
13550 /* Flow Director is enabled */
13551 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
13552 settings->enable_fdir = true;
13554 /* Ethtype and MACVLAN filters enabled for PF */
13555 settings->enable_ethtype = true;
13556 settings->enable_macvlan = true;
13558 if (i40e_set_filter_control(&pf->hw, settings))
13564 #define INFO_STRING_LEN 255
13565 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
13566 static void i40e_print_features(struct i40e_pf *pf)
13568 struct i40e_hw *hw = &pf->hw;
13572 buf = kmalloc(INFO_STRING_LEN, GFP_KERNEL);
13576 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id);
13577 #ifdef CONFIG_PCI_IOV
13578 i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
13580 i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
13581 pf->hw.func_caps.num_vsis,
13582 pf->vsi[pf->lan_vsi]->num_queue_pairs);
13583 if (pf->flags & I40E_FLAG_RSS_ENABLED)
13584 i += snprintf(&buf[i], REMAIN(i), " RSS");
13585 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
13586 i += snprintf(&buf[i], REMAIN(i), " FD_ATR");
13587 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
13588 i += snprintf(&buf[i], REMAIN(i), " FD_SB");
13589 i += snprintf(&buf[i], REMAIN(i), " NTUPLE");
13591 if (pf->flags & I40E_FLAG_DCB_CAPABLE)
13592 i += snprintf(&buf[i], REMAIN(i), " DCB");
13593 i += snprintf(&buf[i], REMAIN(i), " VxLAN");
13594 i += snprintf(&buf[i], REMAIN(i), " Geneve");
13595 if (pf->flags & I40E_FLAG_PTP)
13596 i += snprintf(&buf[i], REMAIN(i), " PTP");
13597 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
13598 i += snprintf(&buf[i], REMAIN(i), " VEB");
13600 i += snprintf(&buf[i], REMAIN(i), " VEPA");
13602 dev_info(&pf->pdev->dev, "%s\n", buf);
13604 WARN_ON(i > INFO_STRING_LEN);
13608 * i40e_get_platform_mac_addr - get platform-specific MAC address
13609 * @pdev: PCI device information struct
13610 * @pf: board private structure
13612 * Look up the MAC address for the device. First we'll try
13613 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13614 * specific fallback. Otherwise, we'll default to the stored value in
13617 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
13619 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
13620 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
13624 * i40e_probe - Device initialization routine
13625 * @pdev: PCI device information struct
13626 * @ent: entry in i40e_pci_tbl
13628 * i40e_probe initializes a PF identified by a pci_dev structure.
13629 * The OS initialization, configuring of the PF private structure,
13630 * and a hardware reset occur.
13632 * Returns 0 on success, negative on failure
13634 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
13636 struct i40e_aq_get_phy_abilities_resp abilities;
13637 struct i40e_pf *pf;
13638 struct i40e_hw *hw;
13639 static u16 pfs_found;
13646 err = pci_enable_device_mem(pdev);
13650 /* set up for high or low dma */
13651 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
13653 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
13655 dev_err(&pdev->dev,
13656 "DMA configuration failed: 0x%x\n", err);
13661 /* set up pci connections */
13662 err = pci_request_mem_regions(pdev, i40e_driver_name);
13664 dev_info(&pdev->dev,
13665 "pci_request_selected_regions failed %d\n", err);
13669 pci_enable_pcie_error_reporting(pdev);
13670 pci_set_master(pdev);
13672 /* Now that we have a PCI connection, we need to do the
13673 * low level device setup. This is primarily setting up
13674 * the Admin Queue structures and then querying for the
13675 * device's current profile information.
13677 pf = kzalloc(sizeof(*pf), GFP_KERNEL);
13684 set_bit(__I40E_DOWN, pf->state);
13689 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
13690 I40E_MAX_CSR_SPACE);
13692 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
13693 if (!hw->hw_addr) {
13695 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13696 (unsigned int)pci_resource_start(pdev, 0),
13697 pf->ioremap_len, err);
13700 hw->vendor_id = pdev->vendor;
13701 hw->device_id = pdev->device;
13702 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
13703 hw->subsystem_vendor_id = pdev->subsystem_vendor;
13704 hw->subsystem_device_id = pdev->subsystem_device;
13705 hw->bus.device = PCI_SLOT(pdev->devfn);
13706 hw->bus.func = PCI_FUNC(pdev->devfn);
13707 hw->bus.bus_id = pdev->bus->number;
13708 pf->instance = pfs_found;
13710 /* Select something other than the 802.1ad ethertype for the
13711 * switch to use internally and drop on ingress.
13713 hw->switch_tag = 0xffff;
13714 hw->first_tag = ETH_P_8021AD;
13715 hw->second_tag = ETH_P_8021Q;
13717 INIT_LIST_HEAD(&pf->l3_flex_pit_list);
13718 INIT_LIST_HEAD(&pf->l4_flex_pit_list);
13720 /* set up the locks for the AQ, do this only once in probe
13721 * and destroy them only once in remove
13723 mutex_init(&hw->aq.asq_mutex);
13724 mutex_init(&hw->aq.arq_mutex);
13726 pf->msg_enable = netif_msg_init(debug,
13731 pf->hw.debug_mask = debug;
13733 /* do a special CORER for clearing PXE mode once at init */
13734 if (hw->revision_id == 0 &&
13735 (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
13736 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
13741 i40e_clear_pxe_mode(hw);
13744 /* Reset here to make sure all is clean and to define PF 'n' */
13746 err = i40e_pf_reset(hw);
13748 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
13753 hw->aq.num_arq_entries = I40E_AQ_LEN;
13754 hw->aq.num_asq_entries = I40E_AQ_LEN;
13755 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
13756 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
13757 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
13759 snprintf(pf->int_name, sizeof(pf->int_name) - 1,
13761 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
13763 err = i40e_init_shared_code(hw);
13765 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
13770 /* set up a default setting for link flow control */
13771 pf->hw.fc.requested_mode = I40E_FC_NONE;
13773 err = i40e_init_adminq(hw);
13775 if (err == I40E_ERR_FIRMWARE_API_VERSION)
13776 dev_info(&pdev->dev,
13777 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
13779 dev_info(&pdev->dev,
13780 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
13784 i40e_get_oem_version(hw);
13786 /* provide nvm, fw, api versions */
13787 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
13788 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
13789 hw->aq.api_maj_ver, hw->aq.api_min_ver,
13790 i40e_nvm_version_str(hw));
13792 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
13793 hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
13794 dev_info(&pdev->dev,
13795 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
13796 else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
13797 dev_info(&pdev->dev,
13798 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
13800 i40e_verify_eeprom(pf);
13802 /* Rev 0 hardware was never productized */
13803 if (hw->revision_id < 1)
13804 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
13806 i40e_clear_pxe_mode(hw);
13807 err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
13809 goto err_adminq_setup;
13811 err = i40e_sw_init(pf);
13813 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
13817 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
13818 hw->func_caps.num_rx_qp, 0, 0);
13820 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
13821 goto err_init_lan_hmc;
13824 err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
13826 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
13828 goto err_configure_lan_hmc;
13831 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
13832 * Ignore error return codes because if it was already disabled via
13833 * hardware settings this will fail
13835 if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
13836 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
13837 i40e_aq_stop_lldp(hw, true, NULL);
13840 /* allow a platform config to override the HW addr */
13841 i40e_get_platform_mac_addr(pdev, pf);
13843 if (!is_valid_ether_addr(hw->mac.addr)) {
13844 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
13848 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
13849 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
13850 i40e_get_port_mac_addr(hw, hw->mac.port_addr);
13851 if (is_valid_ether_addr(hw->mac.port_addr))
13852 pf->hw_features |= I40E_HW_PORT_ID_VALID;
13854 pci_set_drvdata(pdev, pf);
13855 pci_save_state(pdev);
13857 /* Enable FW to write default DCB config on link-up */
13858 i40e_aq_set_dcb_parameters(hw, true, NULL);
13860 #ifdef CONFIG_I40E_DCB
13861 err = i40e_init_pf_dcb(pf);
13863 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
13864 pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | I40E_FLAG_DCB_ENABLED);
13865 /* Continue without DCB enabled */
13867 #endif /* CONFIG_I40E_DCB */
13869 /* set up periodic task facility */
13870 timer_setup(&pf->service_timer, i40e_service_timer, 0);
13871 pf->service_timer_period = HZ;
13873 INIT_WORK(&pf->service_task, i40e_service_task);
13874 clear_bit(__I40E_SERVICE_SCHED, pf->state);
13876 /* NVM bit on means WoL disabled for the port */
13877 i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
13878 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
13879 pf->wol_en = false;
13882 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
13884 /* set up the main switch operations */
13885 i40e_determine_queue_usage(pf);
13886 err = i40e_init_interrupt_scheme(pf);
13888 goto err_switch_setup;
13890 /* The number of VSIs reported by the FW is the minimum guaranteed
13891 * to us; HW supports far more and we share the remaining pool with
13892 * the other PFs. We allocate space for more than the guarantee with
13893 * the understanding that we might not get them all later.
13895 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
13896 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
13898 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
13900 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
13901 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
13905 goto err_switch_setup;
13908 #ifdef CONFIG_PCI_IOV
13909 /* prep for VF support */
13910 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13911 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
13912 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
13913 if (pci_num_vf(pdev))
13914 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
13917 err = i40e_setup_pf_switch(pf, false);
13919 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
13922 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
13924 /* if FDIR VSI was set up, start it now */
13925 for (i = 0; i < pf->num_alloc_vsi; i++) {
13926 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
13927 i40e_vsi_open(pf->vsi[i]);
13932 /* The driver only wants link up/down and module qualification
13933 * reports from firmware. Note the negative logic.
13935 err = i40e_aq_set_phy_int_mask(&pf->hw,
13936 ~(I40E_AQ_EVENT_LINK_UPDOWN |
13937 I40E_AQ_EVENT_MEDIA_NA |
13938 I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
13940 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
13941 i40e_stat_str(&pf->hw, err),
13942 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
13944 /* Reconfigure hardware for allowing smaller MSS in the case
13945 * of TSO, so that we avoid the MDD being fired and causing
13946 * a reset in the case of small MSS+TSO.
13948 val = rd32(hw, I40E_REG_MSS);
13949 if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
13950 val &= ~I40E_REG_MSS_MIN_MASK;
13951 val |= I40E_64BYTE_MSS;
13952 wr32(hw, I40E_REG_MSS, val);
13955 if (pf->hw_features & I40E_HW_RESTART_AUTONEG) {
13957 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
13959 dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
13960 i40e_stat_str(&pf->hw, err),
13961 i40e_aq_str(&pf->hw,
13962 pf->hw.aq.asq_last_status));
13964 /* The main driver is (mostly) up and happy. We need to set this state
13965 * before setting up the misc vector or we get a race and the vector
13966 * ends up disabled forever.
13968 clear_bit(__I40E_DOWN, pf->state);
13970 /* In case of MSIX we are going to setup the misc vector right here
13971 * to handle admin queue events etc. In case of legacy and MSI
13972 * the misc functionality and queue processing is combined in
13973 * the same vector and that gets setup at open.
13975 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
13976 err = i40e_setup_misc_vector(pf);
13978 dev_info(&pdev->dev,
13979 "setup of misc vector failed: %d\n", err);
13980 i40e_cloud_filter_exit(pf);
13981 i40e_fdir_teardown(pf);
13986 #ifdef CONFIG_PCI_IOV
13987 /* prep for VF support */
13988 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
13989 (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
13990 !test_bit(__I40E_BAD_EEPROM, pf->state)) {
13991 /* disable link interrupts for VFs */
13992 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
13993 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
13994 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
13997 if (pci_num_vf(pdev)) {
13998 dev_info(&pdev->dev,
13999 "Active VFs found, allocating resources.\n");
14000 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
14002 dev_info(&pdev->dev,
14003 "Error %d allocating resources for existing VFs\n",
14007 #endif /* CONFIG_PCI_IOV */
14009 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14010 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
14011 pf->num_iwarp_msix,
14012 I40E_IWARP_IRQ_PILE_ID);
14013 if (pf->iwarp_base_vector < 0) {
14014 dev_info(&pdev->dev,
14015 "failed to get tracking for %d vectors for IWARP err=%d\n",
14016 pf->num_iwarp_msix, pf->iwarp_base_vector);
14017 pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
14021 i40e_dbg_pf_init(pf);
14023 /* tell the firmware that we're starting */
14024 i40e_send_version(pf);
14026 /* since everything's happy, start the service_task timer */
14027 mod_timer(&pf->service_timer,
14028 round_jiffies(jiffies + pf->service_timer_period));
14030 /* add this PF to client device list and launch a client service task */
14031 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14032 err = i40e_lan_add_device(pf);
14034 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
14038 #define PCI_SPEED_SIZE 8
14039 #define PCI_WIDTH_SIZE 8
14040 /* Devices on the IOSF bus do not have this information
14041 * and will report PCI Gen 1 x 1 by default so don't bother
14044 if (!(pf->hw_features & I40E_HW_NO_PCI_LINK_CHECK)) {
14045 char speed[PCI_SPEED_SIZE] = "Unknown";
14046 char width[PCI_WIDTH_SIZE] = "Unknown";
14048 /* Get the negotiated link width and speed from PCI config
14051 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
14054 i40e_set_pci_config_data(hw, link_status);
14056 switch (hw->bus.speed) {
14057 case i40e_bus_speed_8000:
14058 strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
14059 case i40e_bus_speed_5000:
14060 strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
14061 case i40e_bus_speed_2500:
14062 strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
14066 switch (hw->bus.width) {
14067 case i40e_bus_width_pcie_x8:
14068 strncpy(width, "8", PCI_WIDTH_SIZE); break;
14069 case i40e_bus_width_pcie_x4:
14070 strncpy(width, "4", PCI_WIDTH_SIZE); break;
14071 case i40e_bus_width_pcie_x2:
14072 strncpy(width, "2", PCI_WIDTH_SIZE); break;
14073 case i40e_bus_width_pcie_x1:
14074 strncpy(width, "1", PCI_WIDTH_SIZE); break;
14079 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
14082 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
14083 hw->bus.speed < i40e_bus_speed_8000) {
14084 dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
14085 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
14089 /* get the requested speeds from the fw */
14090 err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
14092 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
14093 i40e_stat_str(&pf->hw, err),
14094 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14095 pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
14097 /* get the supported phy types from the fw */
14098 err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
14100 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
14101 i40e_stat_str(&pf->hw, err),
14102 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
14104 /* Add a filter to drop all Flow control frames from any VSI from being
14105 * transmitted. By doing so we stop a malicious VF from sending out
14106 * PAUSE or PFC frames and potentially controlling traffic for other
14108 * The FW can still send Flow control frames if enabled.
14110 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
14111 pf->main_vsi_seid);
14113 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
14114 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
14115 pf->hw_features |= I40E_HW_PHY_CONTROLS_LEDS;
14116 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722)
14117 pf->hw_features |= I40E_HW_HAVE_CRT_RETIMER;
14118 /* print a string summarizing features */
14119 i40e_print_features(pf);
14123 /* Unwind what we've done if something failed in the setup */
14125 set_bit(__I40E_DOWN, pf->state);
14126 i40e_clear_interrupt_scheme(pf);
14129 i40e_reset_interrupt_capability(pf);
14130 del_timer_sync(&pf->service_timer);
14132 err_configure_lan_hmc:
14133 (void)i40e_shutdown_lan_hmc(hw);
14135 kfree(pf->qp_pile);
14139 iounmap(hw->hw_addr);
14143 pci_disable_pcie_error_reporting(pdev);
14144 pci_release_mem_regions(pdev);
14147 pci_disable_device(pdev);
14152 * i40e_remove - Device removal routine
14153 * @pdev: PCI device information struct
14155 * i40e_remove is called by the PCI subsystem to alert the driver
14156 * that is should release a PCI device. This could be caused by a
14157 * Hot-Plug event, or because the driver is going to be removed from
14160 static void i40e_remove(struct pci_dev *pdev)
14162 struct i40e_pf *pf = pci_get_drvdata(pdev);
14163 struct i40e_hw *hw = &pf->hw;
14164 i40e_status ret_code;
14167 i40e_dbg_pf_exit(pf);
14171 /* Disable RSS in hw */
14172 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
14173 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
14175 while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
14176 usleep_range(1000, 2000);
14178 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
14179 set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
14181 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
14183 /* no more scheduling of any task */
14184 set_bit(__I40E_SUSPENDED, pf->state);
14185 set_bit(__I40E_DOWN, pf->state);
14186 if (pf->service_timer.function)
14187 del_timer_sync(&pf->service_timer);
14188 if (pf->service_task.func)
14189 cancel_work_sync(&pf->service_task);
14191 /* Client close must be called explicitly here because the timer
14192 * has been stopped.
14194 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14196 i40e_fdir_teardown(pf);
14198 /* If there is a switch structure or any orphans, remove them.
14199 * This will leave only the PF's VSI remaining.
14201 for (i = 0; i < I40E_MAX_VEB; i++) {
14205 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
14206 pf->veb[i]->uplink_seid == 0)
14207 i40e_switch_branch_release(pf->veb[i]);
14210 /* Now we can shutdown the PF's VSI, just before we kill
14213 if (pf->vsi[pf->lan_vsi])
14214 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
14216 i40e_cloud_filter_exit(pf);
14218 /* remove attached clients */
14219 if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
14220 ret_code = i40e_lan_del_device(pf);
14222 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
14226 /* shutdown and destroy the HMC */
14227 if (hw->hmc.hmc_obj) {
14228 ret_code = i40e_shutdown_lan_hmc(hw);
14230 dev_warn(&pdev->dev,
14231 "Failed to destroy the HMC resources: %d\n",
14235 /* shutdown the adminq */
14236 i40e_shutdown_adminq(hw);
14238 /* destroy the locks only once, here */
14239 mutex_destroy(&hw->aq.arq_mutex);
14240 mutex_destroy(&hw->aq.asq_mutex);
14242 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
14244 i40e_clear_interrupt_scheme(pf);
14245 for (i = 0; i < pf->num_alloc_vsi; i++) {
14247 i40e_vsi_clear_rings(pf->vsi[i]);
14248 i40e_vsi_clear(pf->vsi[i]);
14254 for (i = 0; i < I40E_MAX_VEB; i++) {
14259 kfree(pf->qp_pile);
14262 iounmap(hw->hw_addr);
14264 pci_release_mem_regions(pdev);
14266 pci_disable_pcie_error_reporting(pdev);
14267 pci_disable_device(pdev);
14271 * i40e_pci_error_detected - warning that something funky happened in PCI land
14272 * @pdev: PCI device information struct
14273 * @error: the type of PCI error
14275 * Called to warn that something happened and the error handling steps
14276 * are in progress. Allows the driver to quiesce things, be ready for
14279 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
14280 enum pci_channel_state error)
14282 struct i40e_pf *pf = pci_get_drvdata(pdev);
14284 dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
14287 dev_info(&pdev->dev,
14288 "Cannot recover - error happened during device probe\n");
14289 return PCI_ERS_RESULT_DISCONNECT;
14292 /* shutdown all operations */
14293 if (!test_bit(__I40E_SUSPENDED, pf->state))
14294 i40e_prep_for_reset(pf, false);
14296 /* Request a slot reset */
14297 return PCI_ERS_RESULT_NEED_RESET;
14301 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14302 * @pdev: PCI device information struct
14304 * Called to find if the driver can work with the device now that
14305 * the pci slot has been reset. If a basic connection seems good
14306 * (registers are readable and have sane content) then return a
14307 * happy little PCI_ERS_RESULT_xxx.
14309 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
14311 struct i40e_pf *pf = pci_get_drvdata(pdev);
14312 pci_ers_result_t result;
14316 dev_dbg(&pdev->dev, "%s\n", __func__);
14317 if (pci_enable_device_mem(pdev)) {
14318 dev_info(&pdev->dev,
14319 "Cannot re-enable PCI device after reset.\n");
14320 result = PCI_ERS_RESULT_DISCONNECT;
14322 pci_set_master(pdev);
14323 pci_restore_state(pdev);
14324 pci_save_state(pdev);
14325 pci_wake_from_d3(pdev, false);
14327 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
14329 result = PCI_ERS_RESULT_RECOVERED;
14331 result = PCI_ERS_RESULT_DISCONNECT;
14334 err = pci_cleanup_aer_uncorrect_error_status(pdev);
14336 dev_info(&pdev->dev,
14337 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
14339 /* non-fatal, continue */
14346 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14347 * @pdev: PCI device information struct
14349 static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
14351 struct i40e_pf *pf = pci_get_drvdata(pdev);
14353 i40e_prep_for_reset(pf, false);
14357 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14358 * @pdev: PCI device information struct
14360 static void i40e_pci_error_reset_done(struct pci_dev *pdev)
14362 struct i40e_pf *pf = pci_get_drvdata(pdev);
14364 i40e_reset_and_rebuild(pf, false, false);
14368 * i40e_pci_error_resume - restart operations after PCI error recovery
14369 * @pdev: PCI device information struct
14371 * Called to allow the driver to bring things back up after PCI error
14372 * and/or reset recovery has finished.
14374 static void i40e_pci_error_resume(struct pci_dev *pdev)
14376 struct i40e_pf *pf = pci_get_drvdata(pdev);
14378 dev_dbg(&pdev->dev, "%s\n", __func__);
14379 if (test_bit(__I40E_SUSPENDED, pf->state))
14382 i40e_handle_reset_warning(pf, false);
14386 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14387 * using the mac_address_write admin q function
14388 * @pf: pointer to i40e_pf struct
14390 static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
14392 struct i40e_hw *hw = &pf->hw;
14397 /* Get current MAC address in case it's an LAA */
14398 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
14399 ether_addr_copy(mac_addr,
14400 pf->vsi[pf->lan_vsi]->netdev->dev_addr);
14402 dev_err(&pf->pdev->dev,
14403 "Failed to retrieve MAC address; using default\n");
14404 ether_addr_copy(mac_addr, hw->mac.addr);
14407 /* The FW expects the mac address write cmd to first be called with
14408 * one of these flags before calling it again with the multicast
14411 flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
14413 if (hw->func_caps.flex10_enable && hw->partition_id != 1)
14414 flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
14416 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14418 dev_err(&pf->pdev->dev,
14419 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14423 flags = I40E_AQC_MC_MAG_EN
14424 | I40E_AQC_WOL_PRESERVE_ON_PFR
14425 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
14426 ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
14428 dev_err(&pf->pdev->dev,
14429 "Failed to enable Multicast Magic Packet wake up\n");
14433 * i40e_shutdown - PCI callback for shutting down
14434 * @pdev: PCI device information struct
14436 static void i40e_shutdown(struct pci_dev *pdev)
14438 struct i40e_pf *pf = pci_get_drvdata(pdev);
14439 struct i40e_hw *hw = &pf->hw;
14441 set_bit(__I40E_SUSPENDED, pf->state);
14442 set_bit(__I40E_DOWN, pf->state);
14444 del_timer_sync(&pf->service_timer);
14445 cancel_work_sync(&pf->service_task);
14446 i40e_cloud_filter_exit(pf);
14447 i40e_fdir_teardown(pf);
14449 /* Client close must be called explicitly here because the timer
14450 * has been stopped.
14452 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14454 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14455 i40e_enable_mc_magic_wake(pf);
14457 i40e_prep_for_reset(pf, false);
14459 wr32(hw, I40E_PFPM_APM,
14460 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14461 wr32(hw, I40E_PFPM_WUFC,
14462 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14464 /* Since we're going to destroy queues during the
14465 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
14469 i40e_clear_interrupt_scheme(pf);
14472 if (system_state == SYSTEM_POWER_OFF) {
14473 pci_wake_from_d3(pdev, pf->wol_en);
14474 pci_set_power_state(pdev, PCI_D3hot);
14479 * i40e_suspend - PM callback for moving to D3
14480 * @dev: generic device information structure
14482 static int __maybe_unused i40e_suspend(struct device *dev)
14484 struct pci_dev *pdev = to_pci_dev(dev);
14485 struct i40e_pf *pf = pci_get_drvdata(pdev);
14486 struct i40e_hw *hw = &pf->hw;
14488 /* If we're already suspended, then there is nothing to do */
14489 if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
14492 set_bit(__I40E_DOWN, pf->state);
14494 /* Ensure service task will not be running */
14495 del_timer_sync(&pf->service_timer);
14496 cancel_work_sync(&pf->service_task);
14498 /* Client close must be called explicitly here because the timer
14499 * has been stopped.
14501 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
14503 if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
14504 i40e_enable_mc_magic_wake(pf);
14506 /* Since we're going to destroy queues during the
14507 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
14512 i40e_prep_for_reset(pf, true);
14514 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
14515 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
14517 /* Clear the interrupt scheme and release our IRQs so that the system
14518 * can safely hibernate even when there are a large number of CPUs.
14519 * Otherwise hibernation might fail when mapping all the vectors back
14522 i40e_clear_interrupt_scheme(pf);
14530 * i40e_resume - PM callback for waking up from D3
14531 * @dev: generic device information structure
14533 static int __maybe_unused i40e_resume(struct device *dev)
14535 struct pci_dev *pdev = to_pci_dev(dev);
14536 struct i40e_pf *pf = pci_get_drvdata(pdev);
14539 /* If we're not suspended, then there is nothing to do */
14540 if (!test_bit(__I40E_SUSPENDED, pf->state))
14543 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
14544 * since we're going to be restoring queues
14548 /* We cleared the interrupt scheme when we suspended, so we need to
14549 * restore it now to resume device functionality.
14551 err = i40e_restore_interrupt_scheme(pf);
14553 dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n",
14557 clear_bit(__I40E_DOWN, pf->state);
14558 i40e_reset_and_rebuild(pf, false, true);
14562 /* Clear suspended state last after everything is recovered */
14563 clear_bit(__I40E_SUSPENDED, pf->state);
14565 /* Restart the service task */
14566 mod_timer(&pf->service_timer,
14567 round_jiffies(jiffies + pf->service_timer_period));
14572 static const struct pci_error_handlers i40e_err_handler = {
14573 .error_detected = i40e_pci_error_detected,
14574 .slot_reset = i40e_pci_error_slot_reset,
14575 .reset_prepare = i40e_pci_error_reset_prepare,
14576 .reset_done = i40e_pci_error_reset_done,
14577 .resume = i40e_pci_error_resume,
14580 static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
14582 static struct pci_driver i40e_driver = {
14583 .name = i40e_driver_name,
14584 .id_table = i40e_pci_tbl,
14585 .probe = i40e_probe,
14586 .remove = i40e_remove,
14588 .pm = &i40e_pm_ops,
14590 .shutdown = i40e_shutdown,
14591 .err_handler = &i40e_err_handler,
14592 .sriov_configure = i40e_pci_sriov_configure,
14596 * i40e_init_module - Driver registration routine
14598 * i40e_init_module is the first routine called when the driver is
14599 * loaded. All it does is register with the PCI subsystem.
14601 static int __init i40e_init_module(void)
14603 pr_info("%s: %s - version %s\n", i40e_driver_name,
14604 i40e_driver_string, i40e_driver_version_str);
14605 pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
14607 /* There is no need to throttle the number of active tasks because
14608 * each device limits its own task using a state bit for scheduling
14609 * the service task, and the device tasks do not interfere with each
14610 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
14611 * since we need to be able to guarantee forward progress even under
14614 i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
14616 pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
14621 return pci_register_driver(&i40e_driver);
14623 module_init(i40e_init_module);
14626 * i40e_exit_module - Driver exit cleanup routine
14628 * i40e_exit_module is called just before the driver is removed
14631 static void __exit i40e_exit_module(void)
14633 pci_unregister_driver(&i40e_driver);
14634 destroy_workqueue(i40e_wq);
14637 module_exit(i40e_exit_module);