1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Intel Corporation */
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/pci.h>
13 #include <net/pkt_sched.h>
21 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
23 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
25 static int debug = -1;
27 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
28 MODULE_DESCRIPTION(DRV_SUMMARY);
29 MODULE_LICENSE("GPL v2");
30 module_param(debug, int, 0);
31 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
33 char igc_driver_name[] = "igc";
34 static const char igc_driver_string[] = DRV_SUMMARY;
35 static const char igc_copyright[] =
36 "Copyright(c) 2018 Intel Corporation.";
38 static const struct igc_info *igc_info_tbl[] = {
39 [board_base] = &igc_base_info,
42 static const struct pci_device_id igc_pci_tbl[] = {
43 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
44 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
45 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
46 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
47 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
48 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
57 /* required last entry */
61 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
70 void igc_reset(struct igc_adapter *adapter)
72 struct net_device *dev = adapter->netdev;
73 struct igc_hw *hw = &adapter->hw;
74 struct igc_fc_info *fc = &hw->fc;
77 /* Repartition PBA for greater than 9k MTU if required */
80 /* flow control settings
81 * The high water mark must be low enough to fit one full frame
82 * after transmitting the pause frame. As such we must have enough
83 * space to allow for us to complete our current transmit and then
84 * receive the frame that is in progress from the link partner.
86 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
88 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
90 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
91 fc->low_water = fc->high_water - 16;
92 fc->pause_time = 0xFFFF;
94 fc->current_mode = fc->requested_mode;
96 hw->mac.ops.reset_hw(hw);
98 if (hw->mac.ops.init_hw(hw))
99 netdev_err(dev, "Error on hardware initialization\n");
101 /* Re-establish EEE setting */
102 igc_set_eee_i225(hw, true, true, true);
104 if (!netif_running(adapter->netdev))
105 igc_power_down_phy_copper_base(&adapter->hw);
107 /* Re-enable PTP, where applicable. */
108 igc_ptp_reset(adapter);
110 /* Re-enable TSN offloading, where applicable. */
111 igc_tsn_offload_apply(adapter);
113 igc_get_phy_info(hw);
117 * igc_power_up_link - Power up the phy link
118 * @adapter: address of board private structure
120 static void igc_power_up_link(struct igc_adapter *adapter)
122 igc_reset_phy(&adapter->hw);
124 igc_power_up_phy_copper(&adapter->hw);
126 igc_setup_link(&adapter->hw);
130 * igc_release_hw_control - release control of the h/w to f/w
131 * @adapter: address of board private structure
133 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
134 * For ASF and Pass Through versions of f/w this means that the
135 * driver is no longer loaded.
137 static void igc_release_hw_control(struct igc_adapter *adapter)
139 struct igc_hw *hw = &adapter->hw;
142 if (!pci_device_is_present(adapter->pdev))
145 /* Let firmware take over control of h/w */
146 ctrl_ext = rd32(IGC_CTRL_EXT);
148 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
152 * igc_get_hw_control - get control of the h/w from f/w
153 * @adapter: address of board private structure
155 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
156 * For ASF and Pass Through versions of f/w this means that
157 * the driver is loaded.
159 static void igc_get_hw_control(struct igc_adapter *adapter)
161 struct igc_hw *hw = &adapter->hw;
164 /* Let firmware know the driver has taken over */
165 ctrl_ext = rd32(IGC_CTRL_EXT);
167 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
171 * igc_clean_tx_ring - Free Tx Buffers
172 * @tx_ring: ring to be cleaned
174 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
176 u16 i = tx_ring->next_to_clean;
177 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
179 while (i != tx_ring->next_to_use) {
180 union igc_adv_tx_desc *eop_desc, *tx_desc;
182 /* Free all the Tx ring sk_buffs */
183 dev_kfree_skb_any(tx_buffer->skb);
185 /* unmap skb header data */
186 dma_unmap_single(tx_ring->dev,
187 dma_unmap_addr(tx_buffer, dma),
188 dma_unmap_len(tx_buffer, len),
191 /* check for eop_desc to determine the end of the packet */
192 eop_desc = tx_buffer->next_to_watch;
193 tx_desc = IGC_TX_DESC(tx_ring, i);
195 /* unmap remaining buffers */
196 while (tx_desc != eop_desc) {
200 if (unlikely(i == tx_ring->count)) {
202 tx_buffer = tx_ring->tx_buffer_info;
203 tx_desc = IGC_TX_DESC(tx_ring, 0);
206 /* unmap any remaining paged data */
207 if (dma_unmap_len(tx_buffer, len))
208 dma_unmap_page(tx_ring->dev,
209 dma_unmap_addr(tx_buffer, dma),
210 dma_unmap_len(tx_buffer, len),
214 tx_buffer->next_to_watch = NULL;
216 /* move us one more past the eop_desc for start of next pkt */
219 if (unlikely(i == tx_ring->count)) {
221 tx_buffer = tx_ring->tx_buffer_info;
225 /* reset BQL for queue */
226 netdev_tx_reset_queue(txring_txq(tx_ring));
228 /* reset next_to_use and next_to_clean */
229 tx_ring->next_to_use = 0;
230 tx_ring->next_to_clean = 0;
234 * igc_free_tx_resources - Free Tx Resources per Queue
235 * @tx_ring: Tx descriptor ring for a specific queue
237 * Free all transmit software resources
239 void igc_free_tx_resources(struct igc_ring *tx_ring)
241 igc_clean_tx_ring(tx_ring);
243 vfree(tx_ring->tx_buffer_info);
244 tx_ring->tx_buffer_info = NULL;
246 /* if not set, then don't free */
250 dma_free_coherent(tx_ring->dev, tx_ring->size,
251 tx_ring->desc, tx_ring->dma);
253 tx_ring->desc = NULL;
257 * igc_free_all_tx_resources - Free Tx Resources for All Queues
258 * @adapter: board private structure
260 * Free all transmit software resources
262 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
266 for (i = 0; i < adapter->num_tx_queues; i++)
267 igc_free_tx_resources(adapter->tx_ring[i]);
271 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
272 * @adapter: board private structure
274 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
278 for (i = 0; i < adapter->num_tx_queues; i++)
279 if (adapter->tx_ring[i])
280 igc_clean_tx_ring(adapter->tx_ring[i]);
284 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
285 * @tx_ring: tx descriptor ring (for a specific queue) to setup
287 * Return 0 on success, negative on failure
289 int igc_setup_tx_resources(struct igc_ring *tx_ring)
291 struct net_device *ndev = tx_ring->netdev;
292 struct device *dev = tx_ring->dev;
295 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
296 tx_ring->tx_buffer_info = vzalloc(size);
297 if (!tx_ring->tx_buffer_info)
300 /* round up to nearest 4K */
301 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
302 tx_ring->size = ALIGN(tx_ring->size, 4096);
304 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
305 &tx_ring->dma, GFP_KERNEL);
310 tx_ring->next_to_use = 0;
311 tx_ring->next_to_clean = 0;
316 vfree(tx_ring->tx_buffer_info);
317 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
322 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
323 * @adapter: board private structure
325 * Return 0 on success, negative on failure
327 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
329 struct net_device *dev = adapter->netdev;
332 for (i = 0; i < adapter->num_tx_queues; i++) {
333 err = igc_setup_tx_resources(adapter->tx_ring[i]);
335 netdev_err(dev, "Error on Tx queue %u setup\n", i);
336 for (i--; i >= 0; i--)
337 igc_free_tx_resources(adapter->tx_ring[i]);
346 * igc_clean_rx_ring - Free Rx Buffers per Queue
347 * @rx_ring: ring to free buffers from
349 static void igc_clean_rx_ring(struct igc_ring *rx_ring)
351 u16 i = rx_ring->next_to_clean;
353 dev_kfree_skb(rx_ring->skb);
356 /* Free all the Rx ring sk_buffs */
357 while (i != rx_ring->next_to_alloc) {
358 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
360 /* Invalidate cache lines that may have been written to by
361 * device so that we avoid corrupting memory.
363 dma_sync_single_range_for_cpu(rx_ring->dev,
365 buffer_info->page_offset,
366 igc_rx_bufsz(rx_ring),
369 /* free resources associated with mapping */
370 dma_unmap_page_attrs(rx_ring->dev,
372 igc_rx_pg_size(rx_ring),
375 __page_frag_cache_drain(buffer_info->page,
376 buffer_info->pagecnt_bias);
379 if (i == rx_ring->count)
383 rx_ring->next_to_alloc = 0;
384 rx_ring->next_to_clean = 0;
385 rx_ring->next_to_use = 0;
389 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
390 * @adapter: board private structure
392 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
396 for (i = 0; i < adapter->num_rx_queues; i++)
397 if (adapter->rx_ring[i])
398 igc_clean_rx_ring(adapter->rx_ring[i]);
402 * igc_free_rx_resources - Free Rx Resources
403 * @rx_ring: ring to clean the resources from
405 * Free all receive software resources
407 void igc_free_rx_resources(struct igc_ring *rx_ring)
409 igc_clean_rx_ring(rx_ring);
411 vfree(rx_ring->rx_buffer_info);
412 rx_ring->rx_buffer_info = NULL;
414 /* if not set, then don't free */
418 dma_free_coherent(rx_ring->dev, rx_ring->size,
419 rx_ring->desc, rx_ring->dma);
421 rx_ring->desc = NULL;
425 * igc_free_all_rx_resources - Free Rx Resources for All Queues
426 * @adapter: board private structure
428 * Free all receive software resources
430 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
434 for (i = 0; i < adapter->num_rx_queues; i++)
435 igc_free_rx_resources(adapter->rx_ring[i]);
439 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
440 * @rx_ring: rx descriptor ring (for a specific queue) to setup
442 * Returns 0 on success, negative on failure
444 int igc_setup_rx_resources(struct igc_ring *rx_ring)
446 struct net_device *ndev = rx_ring->netdev;
447 struct device *dev = rx_ring->dev;
450 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
451 rx_ring->rx_buffer_info = vzalloc(size);
452 if (!rx_ring->rx_buffer_info)
455 desc_len = sizeof(union igc_adv_rx_desc);
457 /* Round up to nearest 4K */
458 rx_ring->size = rx_ring->count * desc_len;
459 rx_ring->size = ALIGN(rx_ring->size, 4096);
461 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
462 &rx_ring->dma, GFP_KERNEL);
467 rx_ring->next_to_alloc = 0;
468 rx_ring->next_to_clean = 0;
469 rx_ring->next_to_use = 0;
474 vfree(rx_ring->rx_buffer_info);
475 rx_ring->rx_buffer_info = NULL;
476 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
481 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
482 * (Descriptors) for all queues
483 * @adapter: board private structure
485 * Return 0 on success, negative on failure
487 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
489 struct net_device *dev = adapter->netdev;
492 for (i = 0; i < adapter->num_rx_queues; i++) {
493 err = igc_setup_rx_resources(adapter->rx_ring[i]);
495 netdev_err(dev, "Error on Rx queue %u setup\n", i);
496 for (i--; i >= 0; i--)
497 igc_free_rx_resources(adapter->rx_ring[i]);
506 * igc_configure_rx_ring - Configure a receive ring after Reset
507 * @adapter: board private structure
508 * @ring: receive ring to be configured
510 * Configure the Rx unit of the MAC after a reset.
512 static void igc_configure_rx_ring(struct igc_adapter *adapter,
513 struct igc_ring *ring)
515 struct igc_hw *hw = &adapter->hw;
516 union igc_adv_rx_desc *rx_desc;
517 int reg_idx = ring->reg_idx;
518 u32 srrctl = 0, rxdctl = 0;
519 u64 rdba = ring->dma;
521 /* disable the queue */
522 wr32(IGC_RXDCTL(reg_idx), 0);
524 /* Set DMA base address registers */
525 wr32(IGC_RDBAL(reg_idx),
526 rdba & 0x00000000ffffffffULL);
527 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
528 wr32(IGC_RDLEN(reg_idx),
529 ring->count * sizeof(union igc_adv_rx_desc));
531 /* initialize head and tail */
532 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
533 wr32(IGC_RDH(reg_idx), 0);
534 writel(0, ring->tail);
536 /* reset next-to- use/clean to place SW in sync with hardware */
537 ring->next_to_clean = 0;
538 ring->next_to_use = 0;
540 /* set descriptor configuration */
541 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
542 if (ring_uses_large_buffer(ring))
543 srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
545 srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
546 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
548 wr32(IGC_SRRCTL(reg_idx), srrctl);
550 rxdctl |= IGC_RX_PTHRESH;
551 rxdctl |= IGC_RX_HTHRESH << 8;
552 rxdctl |= IGC_RX_WTHRESH << 16;
554 /* initialize rx_buffer_info */
555 memset(ring->rx_buffer_info, 0,
556 sizeof(struct igc_rx_buffer) * ring->count);
558 /* initialize Rx descriptor 0 */
559 rx_desc = IGC_RX_DESC(ring, 0);
560 rx_desc->wb.upper.length = 0;
562 /* enable receive descriptor fetching */
563 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
565 wr32(IGC_RXDCTL(reg_idx), rxdctl);
569 * igc_configure_rx - Configure receive Unit after Reset
570 * @adapter: board private structure
572 * Configure the Rx unit of the MAC after a reset.
574 static void igc_configure_rx(struct igc_adapter *adapter)
578 /* Setup the HW Rx Head and Tail Descriptor Pointers and
579 * the Base and Length of the Rx Descriptor Ring
581 for (i = 0; i < adapter->num_rx_queues; i++)
582 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
586 * igc_configure_tx_ring - Configure transmit ring after Reset
587 * @adapter: board private structure
588 * @ring: tx ring to configure
590 * Configure a transmit ring after a reset.
592 static void igc_configure_tx_ring(struct igc_adapter *adapter,
593 struct igc_ring *ring)
595 struct igc_hw *hw = &adapter->hw;
596 int reg_idx = ring->reg_idx;
597 u64 tdba = ring->dma;
600 /* disable the queue */
601 wr32(IGC_TXDCTL(reg_idx), 0);
604 wr32(IGC_TDLEN(reg_idx),
605 ring->count * sizeof(union igc_adv_tx_desc));
606 wr32(IGC_TDBAL(reg_idx),
607 tdba & 0x00000000ffffffffULL);
608 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
610 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
611 wr32(IGC_TDH(reg_idx), 0);
612 writel(0, ring->tail);
614 txdctl |= IGC_TX_PTHRESH;
615 txdctl |= IGC_TX_HTHRESH << 8;
616 txdctl |= IGC_TX_WTHRESH << 16;
618 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
619 wr32(IGC_TXDCTL(reg_idx), txdctl);
623 * igc_configure_tx - Configure transmit Unit after Reset
624 * @adapter: board private structure
626 * Configure the Tx unit of the MAC after a reset.
628 static void igc_configure_tx(struct igc_adapter *adapter)
632 for (i = 0; i < adapter->num_tx_queues; i++)
633 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
637 * igc_setup_mrqc - configure the multiple receive queue control registers
638 * @adapter: Board private structure
640 static void igc_setup_mrqc(struct igc_adapter *adapter)
642 struct igc_hw *hw = &adapter->hw;
643 u32 j, num_rx_queues;
647 netdev_rss_key_fill(rss_key, sizeof(rss_key));
648 for (j = 0; j < 10; j++)
649 wr32(IGC_RSSRK(j), rss_key[j]);
651 num_rx_queues = adapter->rss_queues;
653 if (adapter->rss_indir_tbl_init != num_rx_queues) {
654 for (j = 0; j < IGC_RETA_SIZE; j++)
655 adapter->rss_indir_tbl[j] =
656 (j * num_rx_queues) / IGC_RETA_SIZE;
657 adapter->rss_indir_tbl_init = num_rx_queues;
659 igc_write_rss_indir_tbl(adapter);
661 /* Disable raw packet checksumming so that RSS hash is placed in
662 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
663 * offloads as they are enabled by default
665 rxcsum = rd32(IGC_RXCSUM);
666 rxcsum |= IGC_RXCSUM_PCSD;
668 /* Enable Receive Checksum Offload for SCTP */
669 rxcsum |= IGC_RXCSUM_CRCOFL;
671 /* Don't need to set TUOFL or IPOFL, they default to 1 */
672 wr32(IGC_RXCSUM, rxcsum);
674 /* Generate RSS hash based on packet types, TCP/UDP
675 * port numbers and/or IPv4/v6 src and dst addresses
677 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
678 IGC_MRQC_RSS_FIELD_IPV4_TCP |
679 IGC_MRQC_RSS_FIELD_IPV6 |
680 IGC_MRQC_RSS_FIELD_IPV6_TCP |
681 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
683 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
684 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
685 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
686 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
688 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
690 wr32(IGC_MRQC, mrqc);
694 * igc_setup_rctl - configure the receive control registers
695 * @adapter: Board private structure
697 static void igc_setup_rctl(struct igc_adapter *adapter)
699 struct igc_hw *hw = &adapter->hw;
702 rctl = rd32(IGC_RCTL);
704 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
705 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
707 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
708 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
710 /* enable stripping of CRC. Newer features require
711 * that the HW strips the CRC.
713 rctl |= IGC_RCTL_SECRC;
715 /* disable store bad packets and clear size bits. */
716 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
718 /* enable LPE to allow for reception of jumbo frames */
719 rctl |= IGC_RCTL_LPE;
721 /* disable queue 0 to prevent tail write w/o re-config */
722 wr32(IGC_RXDCTL(0), 0);
724 /* This is useful for sniffing bad packets. */
725 if (adapter->netdev->features & NETIF_F_RXALL) {
726 /* UPE and MPE will be handled by normal PROMISC logic
729 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
730 IGC_RCTL_BAM | /* RX All Bcast Pkts */
731 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
733 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
734 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
737 wr32(IGC_RCTL, rctl);
741 * igc_setup_tctl - configure the transmit control registers
742 * @adapter: Board private structure
744 static void igc_setup_tctl(struct igc_adapter *adapter)
746 struct igc_hw *hw = &adapter->hw;
749 /* disable queue 0 which icould be enabled by default */
750 wr32(IGC_TXDCTL(0), 0);
752 /* Program the Transmit Control Register */
753 tctl = rd32(IGC_TCTL);
754 tctl &= ~IGC_TCTL_CT;
755 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
756 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
758 /* Enable transmits */
761 wr32(IGC_TCTL, tctl);
765 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
766 * @adapter: Pointer to adapter where the filter should be set
767 * @index: Filter index
768 * @type: MAC address filter type (source or destination)
770 * @queue: If non-negative, queue assignment feature is enabled and frames
771 * matching the filter are enqueued onto 'queue'. Otherwise, queue
772 * assignment is disabled.
774 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
775 enum igc_mac_filter_type type,
776 const u8 *addr, int queue)
778 struct net_device *dev = adapter->netdev;
779 struct igc_hw *hw = &adapter->hw;
782 if (WARN_ON(index >= hw->mac.rar_entry_count))
785 ral = le32_to_cpup((__le32 *)(addr));
786 rah = le16_to_cpup((__le16 *)(addr + 4));
788 if (type == IGC_MAC_FILTER_TYPE_SRC) {
789 rah &= ~IGC_RAH_ASEL_MASK;
790 rah |= IGC_RAH_ASEL_SRC_ADDR;
794 rah &= ~IGC_RAH_QSEL_MASK;
795 rah |= (queue << IGC_RAH_QSEL_SHIFT);
796 rah |= IGC_RAH_QSEL_ENABLE;
801 wr32(IGC_RAL(index), ral);
802 wr32(IGC_RAH(index), rah);
804 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
808 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
809 * @adapter: Pointer to adapter where the filter should be cleared
810 * @index: Filter index
812 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
814 struct net_device *dev = adapter->netdev;
815 struct igc_hw *hw = &adapter->hw;
817 if (WARN_ON(index >= hw->mac.rar_entry_count))
820 wr32(IGC_RAL(index), 0);
821 wr32(IGC_RAH(index), 0);
823 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
826 /* Set default MAC address for the PF in the first RAR entry */
827 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
829 struct net_device *dev = adapter->netdev;
830 u8 *addr = adapter->hw.mac.addr;
832 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
834 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
838 * igc_set_mac - Change the Ethernet Address of the NIC
839 * @netdev: network interface device structure
840 * @p: pointer to an address structure
842 * Returns 0 on success, negative on failure
844 static int igc_set_mac(struct net_device *netdev, void *p)
846 struct igc_adapter *adapter = netdev_priv(netdev);
847 struct igc_hw *hw = &adapter->hw;
848 struct sockaddr *addr = p;
850 if (!is_valid_ether_addr(addr->sa_data))
851 return -EADDRNOTAVAIL;
853 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
854 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
856 /* set the correct pool for the new PF MAC address in entry 0 */
857 igc_set_default_mac_filter(adapter);
863 * igc_write_mc_addr_list - write multicast addresses to MTA
864 * @netdev: network interface device structure
866 * Writes multicast address list to the MTA hash table.
867 * Returns: -ENOMEM on failure
868 * 0 on no addresses written
869 * X on writing X addresses to MTA
871 static int igc_write_mc_addr_list(struct net_device *netdev)
873 struct igc_adapter *adapter = netdev_priv(netdev);
874 struct igc_hw *hw = &adapter->hw;
875 struct netdev_hw_addr *ha;
879 if (netdev_mc_empty(netdev)) {
880 /* nothing to program, so clear mc list */
881 igc_update_mc_addr_list(hw, NULL, 0);
885 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
889 /* The shared function expects a packed array of only addresses. */
891 netdev_for_each_mc_addr(ha, netdev)
892 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
894 igc_update_mc_addr_list(hw, mta_list, i);
897 return netdev_mc_count(netdev);
900 static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
901 bool *first_flag, bool *insert_empty)
903 struct igc_adapter *adapter = netdev_priv(ring->netdev);
904 ktime_t cycle_time = adapter->cycle_time;
905 ktime_t base_time = adapter->base_time;
906 ktime_t now = ktime_get_clocktai();
907 ktime_t baset_est, end_of_cycle;
911 n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
913 baset_est = ktime_add_ns(base_time, cycle_time * (n));
914 end_of_cycle = ktime_add_ns(baset_est, cycle_time);
916 if (ktime_compare(txtime, end_of_cycle) >= 0) {
917 if (baset_est != ring->last_ff_cycle) {
919 ring->last_ff_cycle = baset_est;
921 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0)
922 *insert_empty = true;
926 /* Introducing a window at end of cycle on which packets
927 * potentially not honor launchtime. Window of 5us chosen
928 * considering software update the tail pointer and packets
929 * are dma'ed to packet buffer.
931 if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
932 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
935 ring->last_tx_cycle = end_of_cycle;
937 launchtime = ktime_sub_ns(txtime, baset_est);
939 div_s64_rem(launchtime, cycle_time, &launchtime);
943 return cpu_to_le32(launchtime);
946 static int igc_init_empty_frame(struct igc_ring *ring,
947 struct igc_tx_buffer *buffer,
953 size = skb_headlen(skb);
955 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
956 if (dma_mapping_error(ring->dev, dma)) {
957 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
962 buffer->protocol = 0;
963 buffer->bytecount = skb->len;
964 buffer->gso_segs = 1;
965 buffer->time_stamp = jiffies;
966 dma_unmap_len_set(buffer, len, skb->len);
967 dma_unmap_addr_set(buffer, dma, dma);
972 static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
974 struct igc_tx_buffer *first)
976 union igc_adv_tx_desc *desc;
977 u32 cmd_type, olinfo_status;
980 if (!igc_desc_unused(ring))
983 err = igc_init_empty_frame(ring, first, skb);
987 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
988 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
990 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
992 desc = IGC_TX_DESC(ring, ring->next_to_use);
993 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
994 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
995 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
997 netdev_tx_sent_queue(txring_txq(ring), skb->len);
999 first->next_to_watch = desc;
1001 ring->next_to_use++;
1002 if (ring->next_to_use == ring->count)
1003 ring->next_to_use = 0;
1008 #define IGC_EMPTY_FRAME_SIZE 60
1010 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1011 __le32 launch_time, bool first_flag,
1012 u32 vlan_macip_lens, u32 type_tucmd,
1015 struct igc_adv_tx_context_desc *context_desc;
1016 u16 i = tx_ring->next_to_use;
1018 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1021 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1023 /* set bits to identify this as an advanced context descriptor */
1024 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1026 /* For i225, context index must be unique per ring. */
1027 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1028 mss_l4len_idx |= tx_ring->reg_idx << 4;
1031 mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
1033 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1034 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1035 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1036 context_desc->launch_time = launch_time;
1039 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
1040 __le32 launch_time, bool first_flag)
1042 struct sk_buff *skb = first->skb;
1043 u32 vlan_macip_lens = 0;
1046 if (skb->ip_summed != CHECKSUM_PARTIAL) {
1048 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1049 !tx_ring->launchtime_enable)
1054 switch (skb->csum_offset) {
1055 case offsetof(struct tcphdr, check):
1056 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1058 case offsetof(struct udphdr, check):
1060 case offsetof(struct sctphdr, checksum):
1061 /* validate that this is actually an SCTP request */
1062 if (skb_csum_is_sctp(skb)) {
1063 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1068 skb_checksum_help(skb);
1072 /* update TX checksum flag */
1073 first->tx_flags |= IGC_TX_FLAGS_CSUM;
1074 vlan_macip_lens = skb_checksum_start_offset(skb) -
1075 skb_network_offset(skb);
1077 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1078 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1080 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1081 vlan_macip_lens, type_tucmd, 0);
1084 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1086 struct net_device *netdev = tx_ring->netdev;
1088 netif_stop_subqueue(netdev, tx_ring->queue_index);
1090 /* memory barriier comment */
1093 /* We need to check again in a case another CPU has just
1094 * made room available.
1096 if (igc_desc_unused(tx_ring) < size)
1100 netif_wake_subqueue(netdev, tx_ring->queue_index);
1102 u64_stats_update_begin(&tx_ring->tx_syncp2);
1103 tx_ring->tx_stats.restart_queue2++;
1104 u64_stats_update_end(&tx_ring->tx_syncp2);
1109 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1111 if (igc_desc_unused(tx_ring) >= size)
1113 return __igc_maybe_stop_tx(tx_ring, size);
1116 #define IGC_SET_FLAG(_input, _flag, _result) \
1117 (((_flag) <= (_result)) ? \
1118 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1119 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1121 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1123 /* set type for advanced descriptor with frame checksum insertion */
1124 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1125 IGC_ADVTXD_DCMD_DEXT |
1126 IGC_ADVTXD_DCMD_IFCS;
1128 /* set segmentation bits for TSO */
1129 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1130 (IGC_ADVTXD_DCMD_TSE));
1132 /* set timestamp bit if present */
1133 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1134 (IGC_ADVTXD_MAC_TSTAMP));
1139 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1140 union igc_adv_tx_desc *tx_desc,
1141 u32 tx_flags, unsigned int paylen)
1143 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1145 /* insert L4 checksum */
1146 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1147 ((IGC_TXD_POPTS_TXSM << 8) /
1150 /* insert IPv4 checksum */
1151 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1152 (((IGC_TXD_POPTS_IXSM << 8)) /
1155 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1158 static int igc_tx_map(struct igc_ring *tx_ring,
1159 struct igc_tx_buffer *first,
1162 struct sk_buff *skb = first->skb;
1163 struct igc_tx_buffer *tx_buffer;
1164 union igc_adv_tx_desc *tx_desc;
1165 u32 tx_flags = first->tx_flags;
1167 u16 i = tx_ring->next_to_use;
1168 unsigned int data_len, size;
1170 u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1172 tx_desc = IGC_TX_DESC(tx_ring, i);
1174 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1176 size = skb_headlen(skb);
1177 data_len = skb->data_len;
1179 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1183 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1184 if (dma_mapping_error(tx_ring->dev, dma))
1187 /* record length, and DMA address */
1188 dma_unmap_len_set(tx_buffer, len, size);
1189 dma_unmap_addr_set(tx_buffer, dma, dma);
1191 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1193 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1194 tx_desc->read.cmd_type_len =
1195 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1199 if (i == tx_ring->count) {
1200 tx_desc = IGC_TX_DESC(tx_ring, 0);
1203 tx_desc->read.olinfo_status = 0;
1205 dma += IGC_MAX_DATA_PER_TXD;
1206 size -= IGC_MAX_DATA_PER_TXD;
1208 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1211 if (likely(!data_len))
1214 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1218 if (i == tx_ring->count) {
1219 tx_desc = IGC_TX_DESC(tx_ring, 0);
1222 tx_desc->read.olinfo_status = 0;
1224 size = skb_frag_size(frag);
1227 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1228 size, DMA_TO_DEVICE);
1230 tx_buffer = &tx_ring->tx_buffer_info[i];
1233 /* write last descriptor with RS and EOP bits */
1234 cmd_type |= size | IGC_TXD_DCMD;
1235 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1237 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1239 /* set the timestamp */
1240 first->time_stamp = jiffies;
1242 skb_tx_timestamp(skb);
1244 /* Force memory writes to complete before letting h/w know there
1245 * are new descriptors to fetch. (Only applicable for weak-ordered
1246 * memory model archs, such as IA-64).
1248 * We also need this memory barrier to make certain all of the
1249 * status bits have been updated before next_to_watch is written.
1253 /* set next_to_watch value indicating a packet is present */
1254 first->next_to_watch = tx_desc;
1257 if (i == tx_ring->count)
1260 tx_ring->next_to_use = i;
1262 /* Make sure there is space in the ring for the next send. */
1263 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1265 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1266 writel(i, tx_ring->tail);
1271 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1272 tx_buffer = &tx_ring->tx_buffer_info[i];
1274 /* clear dma mappings for failed tx_buffer_info map */
1275 while (tx_buffer != first) {
1276 if (dma_unmap_len(tx_buffer, len))
1277 dma_unmap_page(tx_ring->dev,
1278 dma_unmap_addr(tx_buffer, dma),
1279 dma_unmap_len(tx_buffer, len),
1281 dma_unmap_len_set(tx_buffer, len, 0);
1284 i += tx_ring->count;
1285 tx_buffer = &tx_ring->tx_buffer_info[i];
1288 if (dma_unmap_len(tx_buffer, len))
1289 dma_unmap_single(tx_ring->dev,
1290 dma_unmap_addr(tx_buffer, dma),
1291 dma_unmap_len(tx_buffer, len),
1293 dma_unmap_len_set(tx_buffer, len, 0);
1295 dev_kfree_skb_any(tx_buffer->skb);
1296 tx_buffer->skb = NULL;
1298 tx_ring->next_to_use = i;
1303 static int igc_tso(struct igc_ring *tx_ring,
1304 struct igc_tx_buffer *first,
1305 __le32 launch_time, bool first_flag,
1308 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1309 struct sk_buff *skb = first->skb;
1320 u32 paylen, l4_offset;
1323 if (skb->ip_summed != CHECKSUM_PARTIAL)
1326 if (!skb_is_gso(skb))
1329 err = skb_cow_head(skb, 0);
1333 ip.hdr = skb_network_header(skb);
1334 l4.hdr = skb_checksum_start(skb);
1336 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1337 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1339 /* initialize outer IP header fields */
1340 if (ip.v4->version == 4) {
1341 unsigned char *csum_start = skb_checksum_start(skb);
1342 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1344 /* IP header will have to cancel out any data that
1345 * is not a part of the outer IP header
1347 ip.v4->check = csum_fold(csum_partial(trans_start,
1348 csum_start - trans_start,
1350 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1353 first->tx_flags |= IGC_TX_FLAGS_TSO |
1357 ip.v6->payload_len = 0;
1358 first->tx_flags |= IGC_TX_FLAGS_TSO |
1362 /* determine offset of inner transport header */
1363 l4_offset = l4.hdr - skb->data;
1365 /* remove payload length from inner checksum */
1366 paylen = skb->len - l4_offset;
1367 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1368 /* compute length of segmentation header */
1369 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1370 csum_replace_by_diff(&l4.tcp->check,
1371 (__force __wsum)htonl(paylen));
1373 /* compute length of segmentation header */
1374 *hdr_len = sizeof(*l4.udp) + l4_offset;
1375 csum_replace_by_diff(&l4.udp->check,
1376 (__force __wsum)htonl(paylen));
1379 /* update gso size and bytecount with header size */
1380 first->gso_segs = skb_shinfo(skb)->gso_segs;
1381 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1384 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1385 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1387 /* VLAN MACLEN IPLEN */
1388 vlan_macip_lens = l4.hdr - ip.hdr;
1389 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1390 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1392 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1393 vlan_macip_lens, type_tucmd, mss_l4len_idx);
1398 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1399 struct igc_ring *tx_ring)
1401 bool first_flag = false, insert_empty = false;
1402 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1403 __be16 protocol = vlan_get_protocol(skb);
1404 struct igc_tx_buffer *first;
1405 __le32 launch_time = 0;
1412 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1413 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1414 * + 2 desc gap to keep tail from touching head,
1415 * + 1 desc for context descriptor,
1416 * otherwise try next time
1418 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1419 count += TXD_USE_COUNT(skb_frag_size(
1420 &skb_shinfo(skb)->frags[f]));
1422 if (igc_maybe_stop_tx(tx_ring, count + 5)) {
1423 /* this is a hard error */
1424 return NETDEV_TX_BUSY;
1427 if (!tx_ring->launchtime_enable)
1430 txtime = skb->tstamp;
1431 skb->tstamp = ktime_set(0, 0);
1432 launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
1435 struct igc_tx_buffer *empty_info;
1436 struct sk_buff *empty;
1439 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1440 empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
1444 data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
1445 memset(data, 0, IGC_EMPTY_FRAME_SIZE);
1447 igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
1449 if (igc_init_tx_empty_descriptor(tx_ring,
1452 dev_kfree_skb_any(empty);
1456 /* record the location of the first descriptor for this packet */
1457 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1459 first->bytecount = skb->len;
1460 first->gso_segs = 1;
1462 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1463 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1465 /* FIXME: add support for retrieving timestamps from
1466 * the other timer registers before skipping the
1467 * timestamping request.
1469 unsigned long flags;
1471 spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
1472 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && !adapter->ptp_tx_skb) {
1473 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1474 tx_flags |= IGC_TX_FLAGS_TSTAMP;
1476 adapter->ptp_tx_skb = skb_get(skb);
1477 adapter->ptp_tx_start = jiffies;
1479 adapter->tx_hwtstamp_skipped++;
1482 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
1485 /* record initial flags and protocol */
1486 first->tx_flags = tx_flags;
1487 first->protocol = protocol;
1489 tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
1493 igc_tx_csum(tx_ring, first, launch_time, first_flag);
1495 igc_tx_map(tx_ring, first, hdr_len);
1497 return NETDEV_TX_OK;
1500 dev_kfree_skb_any(first->skb);
1503 return NETDEV_TX_OK;
1506 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1507 struct sk_buff *skb)
1509 unsigned int r_idx = skb->queue_mapping;
1511 if (r_idx >= adapter->num_tx_queues)
1512 r_idx = r_idx % adapter->num_tx_queues;
1514 return adapter->tx_ring[r_idx];
1517 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1518 struct net_device *netdev)
1520 struct igc_adapter *adapter = netdev_priv(netdev);
1522 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1523 * in order to meet this minimum size requirement.
1525 if (skb->len < 17) {
1526 if (skb_padto(skb, 17))
1527 return NETDEV_TX_OK;
1531 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1534 static void igc_rx_checksum(struct igc_ring *ring,
1535 union igc_adv_rx_desc *rx_desc,
1536 struct sk_buff *skb)
1538 skb_checksum_none_assert(skb);
1540 /* Ignore Checksum bit is set */
1541 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1544 /* Rx checksum disabled via ethtool */
1545 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1548 /* TCP/UDP checksum error bit is set */
1549 if (igc_test_staterr(rx_desc,
1550 IGC_RXDEXT_STATERR_L4E |
1551 IGC_RXDEXT_STATERR_IPE)) {
1552 /* work around errata with sctp packets where the TCPE aka
1553 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1554 * packets (aka let the stack check the crc32c)
1556 if (!(skb->len == 60 &&
1557 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1558 u64_stats_update_begin(&ring->rx_syncp);
1559 ring->rx_stats.csum_err++;
1560 u64_stats_update_end(&ring->rx_syncp);
1562 /* let the stack verify checksum errors */
1565 /* It must be a TCP or UDP packet with a valid checksum */
1566 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1567 IGC_RXD_STAT_UDPCS))
1568 skb->ip_summed = CHECKSUM_UNNECESSARY;
1570 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1571 le32_to_cpu(rx_desc->wb.upper.status_error));
1574 /* Mapping HW RSS Type to enum pkt_hash_types */
1575 static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
1576 [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2,
1577 [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4,
1578 [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3,
1579 [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4,
1580 [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3,
1581 [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3,
1582 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
1583 [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4,
1584 [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4,
1585 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
1586 [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
1587 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
1588 [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */
1589 [13] = PKT_HASH_TYPE_NONE,
1590 [14] = PKT_HASH_TYPE_NONE,
1591 [15] = PKT_HASH_TYPE_NONE,
1594 static inline void igc_rx_hash(struct igc_ring *ring,
1595 union igc_adv_rx_desc *rx_desc,
1596 struct sk_buff *skb)
1598 if (ring->netdev->features & NETIF_F_RXHASH) {
1599 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1600 u32 rss_type = igc_rss_type(rx_desc);
1602 skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
1607 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1608 * @rx_ring: rx descriptor ring packet is being transacted on
1609 * @rx_desc: pointer to the EOP Rx descriptor
1610 * @skb: pointer to current skb being populated
1612 * This function checks the ring, descriptor, and packet information in order
1613 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1616 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1617 union igc_adv_rx_desc *rx_desc,
1618 struct sk_buff *skb)
1620 igc_rx_hash(rx_ring, rx_desc, skb);
1622 igc_rx_checksum(rx_ring, rx_desc, skb);
1624 skb_record_rx_queue(skb, rx_ring->queue_index);
1626 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1629 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1630 const unsigned int size)
1632 struct igc_rx_buffer *rx_buffer;
1634 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1635 prefetchw(rx_buffer->page);
1637 /* we are reusing so sync this buffer for CPU use */
1638 dma_sync_single_range_for_cpu(rx_ring->dev,
1640 rx_buffer->page_offset,
1644 rx_buffer->pagecnt_bias--;
1650 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1651 * @rx_ring: rx descriptor ring to transact packets on
1652 * @rx_buffer: buffer containing page to add
1653 * @skb: sk_buff to place the data into
1654 * @size: size of buffer to be added
1656 * This function will add the data contained in rx_buffer->page to the skb.
1658 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1659 struct igc_rx_buffer *rx_buffer,
1660 struct sk_buff *skb,
1663 #if (PAGE_SIZE < 8192)
1664 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1666 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1667 rx_buffer->page_offset, size, truesize);
1668 rx_buffer->page_offset ^= truesize;
1670 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1671 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1672 SKB_DATA_ALIGN(size);
1673 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1674 rx_buffer->page_offset, size, truesize);
1675 rx_buffer->page_offset += truesize;
1679 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1680 struct igc_rx_buffer *rx_buffer,
1681 union igc_adv_rx_desc *rx_desc,
1684 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1685 #if (PAGE_SIZE < 8192)
1686 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1688 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1689 SKB_DATA_ALIGN(IGC_SKB_PAD + size);
1691 struct sk_buff *skb;
1693 /* prefetch first cache line of first page */
1696 /* build an skb around the page buffer */
1697 skb = build_skb(va - IGC_SKB_PAD, truesize);
1701 /* update pointers within the skb to store the data */
1702 skb_reserve(skb, IGC_SKB_PAD);
1703 __skb_put(skb, size);
1705 /* update buffer offset */
1706 #if (PAGE_SIZE < 8192)
1707 rx_buffer->page_offset ^= truesize;
1709 rx_buffer->page_offset += truesize;
1715 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1716 struct igc_rx_buffer *rx_buffer,
1717 union igc_adv_rx_desc *rx_desc,
1720 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1721 #if (PAGE_SIZE < 8192)
1722 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1724 unsigned int truesize = SKB_DATA_ALIGN(size);
1726 unsigned int headlen;
1727 struct sk_buff *skb;
1729 /* prefetch first cache line of first page */
1732 /* allocate a skb to store the frags */
1733 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
1737 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) {
1738 igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
1739 va += IGC_TS_HDR_LEN;
1740 size -= IGC_TS_HDR_LEN;
1743 /* Determine available headroom for copy */
1745 if (headlen > IGC_RX_HDR_LEN)
1746 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1748 /* align pull length to size of long to optimize memcpy performance */
1749 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1751 /* update all of the pointers */
1754 skb_add_rx_frag(skb, 0, rx_buffer->page,
1755 (va + headlen) - page_address(rx_buffer->page),
1757 #if (PAGE_SIZE < 8192)
1758 rx_buffer->page_offset ^= truesize;
1760 rx_buffer->page_offset += truesize;
1763 rx_buffer->pagecnt_bias++;
1770 * igc_reuse_rx_page - page flip buffer and store it back on the ring
1771 * @rx_ring: rx descriptor ring to store buffers on
1772 * @old_buff: donor buffer to have page reused
1774 * Synchronizes page for reuse by the adapter
1776 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1777 struct igc_rx_buffer *old_buff)
1779 u16 nta = rx_ring->next_to_alloc;
1780 struct igc_rx_buffer *new_buff;
1782 new_buff = &rx_ring->rx_buffer_info[nta];
1784 /* update, and store next to alloc */
1786 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1788 /* Transfer page from old buffer to new buffer.
1789 * Move each member individually to avoid possible store
1790 * forwarding stalls.
1792 new_buff->dma = old_buff->dma;
1793 new_buff->page = old_buff->page;
1794 new_buff->page_offset = old_buff->page_offset;
1795 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1798 static inline bool igc_page_is_reserved(struct page *page)
1800 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1803 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
1805 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1806 struct page *page = rx_buffer->page;
1808 /* avoid re-using remote pages */
1809 if (unlikely(igc_page_is_reserved(page)))
1812 #if (PAGE_SIZE < 8192)
1813 /* if we are only owner of page we can reuse it */
1814 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1817 #define IGC_LAST_OFFSET \
1818 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1820 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1824 /* If we have drained the page fragment pool we need to update
1825 * the pagecnt_bias and page count so that we fully restock the
1826 * number of references the driver holds.
1828 if (unlikely(!pagecnt_bias)) {
1829 page_ref_add(page, USHRT_MAX);
1830 rx_buffer->pagecnt_bias = USHRT_MAX;
1837 * igc_is_non_eop - process handling of non-EOP buffers
1838 * @rx_ring: Rx ring being processed
1839 * @rx_desc: Rx descriptor for current buffer
1841 * This function updates next to clean. If the buffer is an EOP buffer
1842 * this function exits returning false, otherwise it will place the
1843 * sk_buff in the next buffer to be chained and return true indicating
1844 * that this is in fact a non-EOP buffer.
1846 static bool igc_is_non_eop(struct igc_ring *rx_ring,
1847 union igc_adv_rx_desc *rx_desc)
1849 u32 ntc = rx_ring->next_to_clean + 1;
1851 /* fetch, update, and store next to clean */
1852 ntc = (ntc < rx_ring->count) ? ntc : 0;
1853 rx_ring->next_to_clean = ntc;
1855 prefetch(IGC_RX_DESC(rx_ring, ntc));
1857 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
1864 * igc_cleanup_headers - Correct corrupted or empty headers
1865 * @rx_ring: rx descriptor ring packet is being transacted on
1866 * @rx_desc: pointer to the EOP Rx descriptor
1867 * @skb: pointer to current skb being fixed
1869 * Address the case where we are pulling data in on pages only
1870 * and as such no data is present in the skb header.
1872 * In addition if skb is not at least 60 bytes we need to pad it so that
1873 * it is large enough to qualify as a valid Ethernet frame.
1875 * Returns true if an error was encountered and skb was freed.
1877 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1878 union igc_adv_rx_desc *rx_desc,
1879 struct sk_buff *skb)
1881 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
1882 struct net_device *netdev = rx_ring->netdev;
1884 if (!(netdev->features & NETIF_F_RXALL)) {
1885 dev_kfree_skb_any(skb);
1890 /* if eth_skb_pad returns an error the skb was freed */
1891 if (eth_skb_pad(skb))
1897 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
1898 struct igc_rx_buffer *rx_buffer)
1900 if (igc_can_reuse_rx_page(rx_buffer)) {
1901 /* hand second half of page back to the ring */
1902 igc_reuse_rx_page(rx_ring, rx_buffer);
1904 /* We are not reusing the buffer so unmap it and free
1905 * any references we are holding to it
1907 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1908 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1910 __page_frag_cache_drain(rx_buffer->page,
1911 rx_buffer->pagecnt_bias);
1914 /* clear contents of rx_buffer */
1915 rx_buffer->page = NULL;
1918 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1920 return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
1923 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1924 struct igc_rx_buffer *bi)
1926 struct page *page = bi->page;
1929 /* since we are recycling buffers we should seldom need to alloc */
1933 /* alloc new page for storage */
1934 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1935 if (unlikely(!page)) {
1936 rx_ring->rx_stats.alloc_failed++;
1940 /* map page for use */
1941 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1942 igc_rx_pg_size(rx_ring),
1946 /* if mapping failed free memory back to system since
1947 * there isn't much point in holding memory we can't use
1949 if (dma_mapping_error(rx_ring->dev, dma)) {
1952 rx_ring->rx_stats.alloc_failed++;
1958 bi->page_offset = igc_rx_offset(rx_ring);
1959 bi->pagecnt_bias = 1;
1965 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
1966 * @rx_ring: rx descriptor ring
1967 * @cleaned_count: number of buffers to clean
1969 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1971 union igc_adv_rx_desc *rx_desc;
1972 u16 i = rx_ring->next_to_use;
1973 struct igc_rx_buffer *bi;
1980 rx_desc = IGC_RX_DESC(rx_ring, i);
1981 bi = &rx_ring->rx_buffer_info[i];
1982 i -= rx_ring->count;
1984 bufsz = igc_rx_bufsz(rx_ring);
1987 if (!igc_alloc_mapped_page(rx_ring, bi))
1990 /* sync the buffer for use by the device */
1991 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1992 bi->page_offset, bufsz,
1995 /* Refresh the desc even if buffer_addrs didn't change
1996 * because each write-back erases this info.
1998 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2004 rx_desc = IGC_RX_DESC(rx_ring, 0);
2005 bi = rx_ring->rx_buffer_info;
2006 i -= rx_ring->count;
2009 /* clear the length for the next_to_use descriptor */
2010 rx_desc->wb.upper.length = 0;
2013 } while (cleaned_count);
2015 i += rx_ring->count;
2017 if (rx_ring->next_to_use != i) {
2018 /* record the next descriptor to use */
2019 rx_ring->next_to_use = i;
2021 /* update next to alloc since we have filled the ring */
2022 rx_ring->next_to_alloc = i;
2024 /* Force memory writes to complete before letting h/w
2025 * know there are new descriptors to fetch. (Only
2026 * applicable for weak-ordered memory model archs,
2030 writel(i, rx_ring->tail);
2034 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2036 unsigned int total_bytes = 0, total_packets = 0;
2037 struct igc_ring *rx_ring = q_vector->rx.ring;
2038 struct sk_buff *skb = rx_ring->skb;
2039 u16 cleaned_count = igc_desc_unused(rx_ring);
2041 while (likely(total_packets < budget)) {
2042 union igc_adv_rx_desc *rx_desc;
2043 struct igc_rx_buffer *rx_buffer;
2046 /* return some buffers to hardware, one at a time is too slow */
2047 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2048 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2052 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2053 size = le16_to_cpu(rx_desc->wb.upper.length);
2057 /* This memory barrier is needed to keep us from reading
2058 * any other fields out of the rx_desc until we know the
2059 * descriptor has been written back
2063 rx_buffer = igc_get_rx_buffer(rx_ring, size);
2065 /* retrieve a buffer from the ring */
2067 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2068 else if (ring_uses_build_skb(rx_ring))
2069 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
2071 skb = igc_construct_skb(rx_ring, rx_buffer,
2074 /* exit if we failed to retrieve a buffer */
2076 rx_ring->rx_stats.alloc_failed++;
2077 rx_buffer->pagecnt_bias++;
2081 igc_put_rx_buffer(rx_ring, rx_buffer);
2084 /* fetch next buffer in frame if non-eop */
2085 if (igc_is_non_eop(rx_ring, rx_desc))
2088 /* verify the packet layout is correct */
2089 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2094 /* probably a little skewed due to removing CRC */
2095 total_bytes += skb->len;
2097 /* populate checksum, VLAN, and protocol */
2098 igc_process_skb_fields(rx_ring, rx_desc, skb);
2100 napi_gro_receive(&q_vector->napi, skb);
2102 /* reset skb pointer */
2105 /* update budget accounting */
2109 /* place incomplete frames back on ring for completion */
2112 u64_stats_update_begin(&rx_ring->rx_syncp);
2113 rx_ring->rx_stats.packets += total_packets;
2114 rx_ring->rx_stats.bytes += total_bytes;
2115 u64_stats_update_end(&rx_ring->rx_syncp);
2116 q_vector->rx.total_packets += total_packets;
2117 q_vector->rx.total_bytes += total_bytes;
2120 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2122 return total_packets;
2126 * igc_clean_tx_irq - Reclaim resources after transmit completes
2127 * @q_vector: pointer to q_vector containing needed info
2128 * @napi_budget: Used to determine if we are in netpoll
2130 * returns true if ring is completely cleaned
2132 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2134 struct igc_adapter *adapter = q_vector->adapter;
2135 unsigned int total_bytes = 0, total_packets = 0;
2136 unsigned int budget = q_vector->tx.work_limit;
2137 struct igc_ring *tx_ring = q_vector->tx.ring;
2138 unsigned int i = tx_ring->next_to_clean;
2139 struct igc_tx_buffer *tx_buffer;
2140 union igc_adv_tx_desc *tx_desc;
2142 if (test_bit(__IGC_DOWN, &adapter->state))
2145 tx_buffer = &tx_ring->tx_buffer_info[i];
2146 tx_desc = IGC_TX_DESC(tx_ring, i);
2147 i -= tx_ring->count;
2150 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2152 /* if next_to_watch is not set then there is no work pending */
2156 /* prevent any other reads prior to eop_desc */
2159 /* if DD is not set pending work has not been completed */
2160 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2163 /* clear next_to_watch to prevent false hangs */
2164 tx_buffer->next_to_watch = NULL;
2166 /* update the statistics for this packet */
2167 total_bytes += tx_buffer->bytecount;
2168 total_packets += tx_buffer->gso_segs;
2171 napi_consume_skb(tx_buffer->skb, napi_budget);
2173 /* unmap skb header data */
2174 dma_unmap_single(tx_ring->dev,
2175 dma_unmap_addr(tx_buffer, dma),
2176 dma_unmap_len(tx_buffer, len),
2179 /* clear tx_buffer data */
2180 dma_unmap_len_set(tx_buffer, len, 0);
2182 /* clear last DMA location and unmap remaining buffers */
2183 while (tx_desc != eop_desc) {
2188 i -= tx_ring->count;
2189 tx_buffer = tx_ring->tx_buffer_info;
2190 tx_desc = IGC_TX_DESC(tx_ring, 0);
2193 /* unmap any remaining paged data */
2194 if (dma_unmap_len(tx_buffer, len)) {
2195 dma_unmap_page(tx_ring->dev,
2196 dma_unmap_addr(tx_buffer, dma),
2197 dma_unmap_len(tx_buffer, len),
2199 dma_unmap_len_set(tx_buffer, len, 0);
2203 /* move us one more past the eop_desc for start of next pkt */
2208 i -= tx_ring->count;
2209 tx_buffer = tx_ring->tx_buffer_info;
2210 tx_desc = IGC_TX_DESC(tx_ring, 0);
2213 /* issue prefetch for next Tx descriptor */
2216 /* update budget accounting */
2218 } while (likely(budget));
2220 netdev_tx_completed_queue(txring_txq(tx_ring),
2221 total_packets, total_bytes);
2223 i += tx_ring->count;
2224 tx_ring->next_to_clean = i;
2225 u64_stats_update_begin(&tx_ring->tx_syncp);
2226 tx_ring->tx_stats.bytes += total_bytes;
2227 tx_ring->tx_stats.packets += total_packets;
2228 u64_stats_update_end(&tx_ring->tx_syncp);
2229 q_vector->tx.total_bytes += total_bytes;
2230 q_vector->tx.total_packets += total_packets;
2232 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2233 struct igc_hw *hw = &adapter->hw;
2235 /* Detect a transmit hang in hardware, this serializes the
2236 * check with the clearing of time_stamp and movement of i
2238 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2239 if (tx_buffer->next_to_watch &&
2240 time_after(jiffies, tx_buffer->time_stamp +
2241 (adapter->tx_timeout_factor * HZ)) &&
2242 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
2243 /* detected Tx unit hang */
2244 netdev_err(tx_ring->netdev,
2245 "Detected Tx Unit Hang\n"
2249 " next_to_use <%x>\n"
2250 " next_to_clean <%x>\n"
2251 "buffer_info[next_to_clean]\n"
2252 " time_stamp <%lx>\n"
2253 " next_to_watch <%p>\n"
2255 " desc.status <%x>\n",
2256 tx_ring->queue_index,
2257 rd32(IGC_TDH(tx_ring->reg_idx)),
2258 readl(tx_ring->tail),
2259 tx_ring->next_to_use,
2260 tx_ring->next_to_clean,
2261 tx_buffer->time_stamp,
2262 tx_buffer->next_to_watch,
2264 tx_buffer->next_to_watch->wb.status);
2265 netif_stop_subqueue(tx_ring->netdev,
2266 tx_ring->queue_index);
2268 /* we are about to reset, no point in enabling stuff */
2273 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2274 if (unlikely(total_packets &&
2275 netif_carrier_ok(tx_ring->netdev) &&
2276 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
2277 /* Make sure that anybody stopping the queue after this
2278 * sees the new next_to_clean.
2281 if (__netif_subqueue_stopped(tx_ring->netdev,
2282 tx_ring->queue_index) &&
2283 !(test_bit(__IGC_DOWN, &adapter->state))) {
2284 netif_wake_subqueue(tx_ring->netdev,
2285 tx_ring->queue_index);
2287 u64_stats_update_begin(&tx_ring->tx_syncp);
2288 tx_ring->tx_stats.restart_queue++;
2289 u64_stats_update_end(&tx_ring->tx_syncp);
2296 static int igc_find_mac_filter(struct igc_adapter *adapter,
2297 enum igc_mac_filter_type type, const u8 *addr)
2299 struct igc_hw *hw = &adapter->hw;
2300 int max_entries = hw->mac.rar_entry_count;
2304 for (i = 0; i < max_entries; i++) {
2305 ral = rd32(IGC_RAL(i));
2306 rah = rd32(IGC_RAH(i));
2308 if (!(rah & IGC_RAH_AV))
2310 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
2312 if ((rah & IGC_RAH_RAH_MASK) !=
2313 le16_to_cpup((__le16 *)(addr + 4)))
2315 if (ral != le32_to_cpup((__le32 *)(addr)))
2324 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
2326 struct igc_hw *hw = &adapter->hw;
2327 int max_entries = hw->mac.rar_entry_count;
2331 for (i = 0; i < max_entries; i++) {
2332 rah = rd32(IGC_RAH(i));
2334 if (!(rah & IGC_RAH_AV))
2342 * igc_add_mac_filter() - Add MAC address filter
2343 * @adapter: Pointer to adapter where the filter should be added
2344 * @type: MAC address filter type (source or destination)
2345 * @addr: MAC address
2346 * @queue: If non-negative, queue assignment feature is enabled and frames
2347 * matching the filter are enqueued onto 'queue'. Otherwise, queue
2348 * assignment is disabled.
2350 * Return: 0 in case of success, negative errno code otherwise.
2352 static int igc_add_mac_filter(struct igc_adapter *adapter,
2353 enum igc_mac_filter_type type, const u8 *addr,
2356 struct net_device *dev = adapter->netdev;
2359 index = igc_find_mac_filter(adapter, type, addr);
2363 index = igc_get_avail_mac_filter_slot(adapter);
2367 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
2368 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2372 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
2377 * igc_del_mac_filter() - Delete MAC address filter
2378 * @adapter: Pointer to adapter where the filter should be deleted from
2379 * @type: MAC address filter type (source or destination)
2380 * @addr: MAC address
2382 static void igc_del_mac_filter(struct igc_adapter *adapter,
2383 enum igc_mac_filter_type type, const u8 *addr)
2385 struct net_device *dev = adapter->netdev;
2388 index = igc_find_mac_filter(adapter, type, addr);
2393 /* If this is the default filter, we don't actually delete it.
2394 * We just reset to its default value i.e. disable queue
2397 netdev_dbg(dev, "Disable default MAC filter queue assignment");
2399 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
2401 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
2403 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2406 igc_clear_mac_filter_hw(adapter, index);
2411 * igc_add_vlan_prio_filter() - Add VLAN priority filter
2412 * @adapter: Pointer to adapter where the filter should be added
2413 * @prio: VLAN priority value
2414 * @queue: Queue number which matching frames are assigned to
2416 * Return: 0 in case of success, negative errno code otherwise.
2418 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
2421 struct net_device *dev = adapter->netdev;
2422 struct igc_hw *hw = &adapter->hw;
2425 vlanpqf = rd32(IGC_VLANPQF);
2427 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
2428 netdev_dbg(dev, "VLAN priority filter already in use\n");
2432 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
2433 vlanpqf |= IGC_VLANPQF_VALID(prio);
2435 wr32(IGC_VLANPQF, vlanpqf);
2437 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
2443 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
2444 * @adapter: Pointer to adapter where the filter should be deleted from
2445 * @prio: VLAN priority value
2447 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
2449 struct igc_hw *hw = &adapter->hw;
2452 vlanpqf = rd32(IGC_VLANPQF);
2454 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
2455 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
2457 wr32(IGC_VLANPQF, vlanpqf);
2459 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
2463 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
2465 struct igc_hw *hw = &adapter->hw;
2468 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2469 u32 etqf = rd32(IGC_ETQF(i));
2471 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
2479 * igc_add_etype_filter() - Add ethertype filter
2480 * @adapter: Pointer to adapter where the filter should be added
2481 * @etype: Ethertype value
2482 * @queue: If non-negative, queue assignment feature is enabled and frames
2483 * matching the filter are enqueued onto 'queue'. Otherwise, queue
2484 * assignment is disabled.
2486 * Return: 0 in case of success, negative errno code otherwise.
2488 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
2491 struct igc_hw *hw = &adapter->hw;
2495 index = igc_get_avail_etype_filter_slot(adapter);
2499 etqf = rd32(IGC_ETQF(index));
2501 etqf &= ~IGC_ETQF_ETYPE_MASK;
2505 etqf &= ~IGC_ETQF_QUEUE_MASK;
2506 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
2507 etqf |= IGC_ETQF_QUEUE_ENABLE;
2510 etqf |= IGC_ETQF_FILTER_ENABLE;
2512 wr32(IGC_ETQF(index), etqf);
2514 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
2519 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
2521 struct igc_hw *hw = &adapter->hw;
2524 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2525 u32 etqf = rd32(IGC_ETQF(i));
2527 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
2535 * igc_del_etype_filter() - Delete ethertype filter
2536 * @adapter: Pointer to adapter where the filter should be deleted from
2537 * @etype: Ethertype value
2539 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
2541 struct igc_hw *hw = &adapter->hw;
2544 index = igc_find_etype_filter(adapter, etype);
2548 wr32(IGC_ETQF(index), 0);
2550 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
2554 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
2555 const struct igc_nfc_rule *rule)
2559 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
2560 err = igc_add_etype_filter(adapter, rule->filter.etype,
2566 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
2567 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
2568 rule->filter.src_addr, rule->action);
2573 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
2574 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
2575 rule->filter.dst_addr, rule->action);
2580 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
2581 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
2584 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
2592 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
2593 const struct igc_nfc_rule *rule)
2595 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
2596 igc_del_etype_filter(adapter, rule->filter.etype);
2598 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
2599 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
2602 igc_del_vlan_prio_filter(adapter, prio);
2605 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
2606 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
2607 rule->filter.src_addr);
2609 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
2610 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
2611 rule->filter.dst_addr);
2615 * igc_get_nfc_rule() - Get NFC rule
2616 * @adapter: Pointer to adapter
2617 * @location: Rule location
2619 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2621 * Return: Pointer to NFC rule at @location. If not found, NULL.
2623 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
2626 struct igc_nfc_rule *rule;
2628 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
2629 if (rule->location == location)
2631 if (rule->location > location)
2639 * igc_del_nfc_rule() - Delete NFC rule
2640 * @adapter: Pointer to adapter
2641 * @rule: Pointer to rule to be deleted
2643 * Disable NFC rule in hardware and delete it from adapter.
2645 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2647 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
2649 igc_disable_nfc_rule(adapter, rule);
2651 list_del(&rule->list);
2652 adapter->nfc_rule_count--;
2657 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
2659 struct igc_nfc_rule *rule, *tmp;
2661 mutex_lock(&adapter->nfc_rule_lock);
2663 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
2664 igc_del_nfc_rule(adapter, rule);
2666 mutex_unlock(&adapter->nfc_rule_lock);
2670 * igc_add_nfc_rule() - Add NFC rule
2671 * @adapter: Pointer to adapter
2672 * @rule: Pointer to rule to be added
2674 * Enable NFC rule in hardware and add it to adapter.
2676 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2678 * Return: 0 on success, negative errno on failure.
2680 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
2682 struct igc_nfc_rule *pred, *cur;
2685 err = igc_enable_nfc_rule(adapter, rule);
2690 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
2691 if (cur->location >= rule->location)
2696 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
2697 adapter->nfc_rule_count++;
2701 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
2703 struct igc_nfc_rule *rule;
2705 mutex_lock(&adapter->nfc_rule_lock);
2707 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
2708 igc_enable_nfc_rule(adapter, rule);
2710 mutex_unlock(&adapter->nfc_rule_lock);
2713 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
2715 struct igc_adapter *adapter = netdev_priv(netdev);
2717 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
2720 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
2722 struct igc_adapter *adapter = netdev_priv(netdev);
2724 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
2729 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2730 * @netdev: network interface device structure
2732 * The set_rx_mode entry point is called whenever the unicast or multicast
2733 * address lists or the network interface flags are updated. This routine is
2734 * responsible for configuring the hardware for proper unicast, multicast,
2735 * promiscuous mode, and all-multi behavior.
2737 static void igc_set_rx_mode(struct net_device *netdev)
2739 struct igc_adapter *adapter = netdev_priv(netdev);
2740 struct igc_hw *hw = &adapter->hw;
2741 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
2744 /* Check for Promiscuous and All Multicast modes */
2745 if (netdev->flags & IFF_PROMISC) {
2746 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
2748 if (netdev->flags & IFF_ALLMULTI) {
2749 rctl |= IGC_RCTL_MPE;
2751 /* Write addresses to the MTA, if the attempt fails
2752 * then we should just turn on promiscuous mode so
2753 * that we can at least receive multicast traffic
2755 count = igc_write_mc_addr_list(netdev);
2757 rctl |= IGC_RCTL_MPE;
2761 /* Write addresses to available RAR registers, if there is not
2762 * sufficient space to store all the addresses then enable
2763 * unicast promiscuous mode
2765 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
2766 rctl |= IGC_RCTL_UPE;
2768 /* update state of unicast and multicast */
2769 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
2770 wr32(IGC_RCTL, rctl);
2772 #if (PAGE_SIZE < 8192)
2773 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
2774 rlpml = IGC_MAX_FRAME_BUILD_SKB;
2776 wr32(IGC_RLPML, rlpml);
2780 * igc_configure - configure the hardware for RX and TX
2781 * @adapter: private board structure
2783 static void igc_configure(struct igc_adapter *adapter)
2785 struct net_device *netdev = adapter->netdev;
2788 igc_get_hw_control(adapter);
2789 igc_set_rx_mode(netdev);
2791 igc_setup_tctl(adapter);
2792 igc_setup_mrqc(adapter);
2793 igc_setup_rctl(adapter);
2795 igc_set_default_mac_filter(adapter);
2796 igc_restore_nfc_rules(adapter);
2798 igc_configure_tx(adapter);
2799 igc_configure_rx(adapter);
2801 igc_rx_fifo_flush_base(&adapter->hw);
2803 /* call igc_desc_unused which always leaves
2804 * at least 1 descriptor unused to make sure
2805 * next_to_use != next_to_clean
2807 for (i = 0; i < adapter->num_rx_queues; i++) {
2808 struct igc_ring *ring = adapter->rx_ring[i];
2810 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
2815 * igc_write_ivar - configure ivar for given MSI-X vector
2816 * @hw: pointer to the HW structure
2817 * @msix_vector: vector number we are allocating to a given ring
2818 * @index: row index of IVAR register to write within IVAR table
2819 * @offset: column offset of in IVAR, should be multiple of 8
2821 * The IVAR table consists of 2 columns,
2822 * each containing an cause allocation for an Rx and Tx ring, and a
2823 * variable number of rows depending on the number of queues supported.
2825 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
2826 int index, int offset)
2828 u32 ivar = array_rd32(IGC_IVAR0, index);
2830 /* clear any bits that are currently set */
2831 ivar &= ~((u32)0xFF << offset);
2833 /* write vector and valid bit */
2834 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
2836 array_wr32(IGC_IVAR0, index, ivar);
2839 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
2841 struct igc_adapter *adapter = q_vector->adapter;
2842 struct igc_hw *hw = &adapter->hw;
2843 int rx_queue = IGC_N0_QUEUE;
2844 int tx_queue = IGC_N0_QUEUE;
2846 if (q_vector->rx.ring)
2847 rx_queue = q_vector->rx.ring->reg_idx;
2848 if (q_vector->tx.ring)
2849 tx_queue = q_vector->tx.ring->reg_idx;
2851 switch (hw->mac.type) {
2853 if (rx_queue > IGC_N0_QUEUE)
2854 igc_write_ivar(hw, msix_vector,
2856 (rx_queue & 0x1) << 4);
2857 if (tx_queue > IGC_N0_QUEUE)
2858 igc_write_ivar(hw, msix_vector,
2860 ((tx_queue & 0x1) << 4) + 8);
2861 q_vector->eims_value = BIT(msix_vector);
2864 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
2868 /* add q_vector eims value to global eims_enable_mask */
2869 adapter->eims_enable_mask |= q_vector->eims_value;
2871 /* configure q_vector to set itr on first interrupt */
2872 q_vector->set_itr = 1;
2876 * igc_configure_msix - Configure MSI-X hardware
2877 * @adapter: Pointer to adapter structure
2879 * igc_configure_msix sets up the hardware to properly
2880 * generate MSI-X interrupts.
2882 static void igc_configure_msix(struct igc_adapter *adapter)
2884 struct igc_hw *hw = &adapter->hw;
2888 adapter->eims_enable_mask = 0;
2890 /* set vector for other causes, i.e. link changes */
2891 switch (hw->mac.type) {
2893 /* Turn on MSI-X capability first, or our settings
2894 * won't stick. And it will take days to debug.
2896 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
2897 IGC_GPIE_PBA | IGC_GPIE_EIAME |
2900 /* enable msix_other interrupt */
2901 adapter->eims_other = BIT(vector);
2902 tmp = (vector++ | IGC_IVAR_VALID) << 8;
2904 wr32(IGC_IVAR_MISC, tmp);
2907 /* do nothing, since nothing else supports MSI-X */
2909 } /* switch (hw->mac.type) */
2911 adapter->eims_enable_mask |= adapter->eims_other;
2913 for (i = 0; i < adapter->num_q_vectors; i++)
2914 igc_assign_vector(adapter->q_vector[i], vector++);
2920 * igc_irq_enable - Enable default interrupt generation settings
2921 * @adapter: board private structure
2923 static void igc_irq_enable(struct igc_adapter *adapter)
2925 struct igc_hw *hw = &adapter->hw;
2927 if (adapter->msix_entries) {
2928 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
2929 u32 regval = rd32(IGC_EIAC);
2931 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
2932 regval = rd32(IGC_EIAM);
2933 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
2934 wr32(IGC_EIMS, adapter->eims_enable_mask);
2937 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
2938 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
2943 * igc_irq_disable - Mask off interrupt generation on the NIC
2944 * @adapter: board private structure
2946 static void igc_irq_disable(struct igc_adapter *adapter)
2948 struct igc_hw *hw = &adapter->hw;
2950 if (adapter->msix_entries) {
2951 u32 regval = rd32(IGC_EIAM);
2953 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
2954 wr32(IGC_EIMC, adapter->eims_enable_mask);
2955 regval = rd32(IGC_EIAC);
2956 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
2963 if (adapter->msix_entries) {
2966 synchronize_irq(adapter->msix_entries[vector++].vector);
2968 for (i = 0; i < adapter->num_q_vectors; i++)
2969 synchronize_irq(adapter->msix_entries[vector++].vector);
2971 synchronize_irq(adapter->pdev->irq);
2975 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
2976 const u32 max_rss_queues)
2978 /* Determine if we need to pair queues. */
2979 /* If rss_queues > half of max_rss_queues, pair the queues in
2980 * order to conserve interrupts due to limited supply.
2982 if (adapter->rss_queues > (max_rss_queues / 2))
2983 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
2985 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
2988 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
2990 return IGC_MAX_RX_QUEUES;
2993 static void igc_init_queue_configuration(struct igc_adapter *adapter)
2997 max_rss_queues = igc_get_max_rss_queues(adapter);
2998 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3000 igc_set_flag_queue_pairs(adapter, max_rss_queues);
3004 * igc_reset_q_vector - Reset config for interrupt vector
3005 * @adapter: board private structure to initialize
3006 * @v_idx: Index of vector to be reset
3008 * If NAPI is enabled it will delete any references to the
3009 * NAPI struct. This is preparation for igc_free_q_vector.
3011 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
3013 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
3015 /* if we're coming from igc_set_interrupt_capability, the vectors are
3021 if (q_vector->tx.ring)
3022 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
3024 if (q_vector->rx.ring)
3025 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
3027 netif_napi_del(&q_vector->napi);
3031 * igc_free_q_vector - Free memory allocated for specific interrupt vector
3032 * @adapter: board private structure to initialize
3033 * @v_idx: Index of vector to be freed
3035 * This function frees the memory allocated to the q_vector.
3037 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
3039 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
3041 adapter->q_vector[v_idx] = NULL;
3043 /* igc_get_stats64() might access the rings on this vector,
3044 * we must wait a grace period before freeing it.
3047 kfree_rcu(q_vector, rcu);
3051 * igc_free_q_vectors - Free memory allocated for interrupt vectors
3052 * @adapter: board private structure to initialize
3054 * This function frees the memory allocated to the q_vectors. In addition if
3055 * NAPI is enabled it will delete any references to the NAPI struct prior
3056 * to freeing the q_vector.
3058 static void igc_free_q_vectors(struct igc_adapter *adapter)
3060 int v_idx = adapter->num_q_vectors;
3062 adapter->num_tx_queues = 0;
3063 adapter->num_rx_queues = 0;
3064 adapter->num_q_vectors = 0;
3067 igc_reset_q_vector(adapter, v_idx);
3068 igc_free_q_vector(adapter, v_idx);
3073 * igc_update_itr - update the dynamic ITR value based on statistics
3074 * @q_vector: pointer to q_vector
3075 * @ring_container: ring info to update the itr for
3077 * Stores a new ITR value based on packets and byte
3078 * counts during the last interrupt. The advantage of per interrupt
3079 * computation is faster updates and more accurate ITR for the current
3080 * traffic pattern. Constants in this function were computed
3081 * based on theoretical maximum wire speed and thresholds were set based
3082 * on testing data as well as attempting to minimize response time
3083 * while increasing bulk throughput.
3084 * NOTE: These calculations are only valid when operating in a single-
3085 * queue environment.
3087 static void igc_update_itr(struct igc_q_vector *q_vector,
3088 struct igc_ring_container *ring_container)
3090 unsigned int packets = ring_container->total_packets;
3091 unsigned int bytes = ring_container->total_bytes;
3092 u8 itrval = ring_container->itr;
3094 /* no packets, exit with status unchanged */
3099 case lowest_latency:
3100 /* handle TSO and jumbo frames */
3101 if (bytes / packets > 8000)
3102 itrval = bulk_latency;
3103 else if ((packets < 5) && (bytes > 512))
3104 itrval = low_latency;
3106 case low_latency: /* 50 usec aka 20000 ints/s */
3107 if (bytes > 10000) {
3108 /* this if handles the TSO accounting */
3109 if (bytes / packets > 8000)
3110 itrval = bulk_latency;
3111 else if ((packets < 10) || ((bytes / packets) > 1200))
3112 itrval = bulk_latency;
3113 else if ((packets > 35))
3114 itrval = lowest_latency;
3115 } else if (bytes / packets > 2000) {
3116 itrval = bulk_latency;
3117 } else if (packets <= 2 && bytes < 512) {
3118 itrval = lowest_latency;
3121 case bulk_latency: /* 250 usec aka 4000 ints/s */
3122 if (bytes > 25000) {
3124 itrval = low_latency;
3125 } else if (bytes < 1500) {
3126 itrval = low_latency;
3131 /* clear work counters since we have the values we need */
3132 ring_container->total_bytes = 0;
3133 ring_container->total_packets = 0;
3135 /* write updated itr to ring container */
3136 ring_container->itr = itrval;
3139 static void igc_set_itr(struct igc_q_vector *q_vector)
3141 struct igc_adapter *adapter = q_vector->adapter;
3142 u32 new_itr = q_vector->itr_val;
3145 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3146 switch (adapter->link_speed) {
3150 new_itr = IGC_4K_ITR;
3156 igc_update_itr(q_vector, &q_vector->tx);
3157 igc_update_itr(q_vector, &q_vector->rx);
3159 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
3161 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3162 if (current_itr == lowest_latency &&
3163 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3164 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3165 current_itr = low_latency;
3167 switch (current_itr) {
3168 /* counts and packets in update_itr are dependent on these numbers */
3169 case lowest_latency:
3170 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
3173 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
3176 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
3183 if (new_itr != q_vector->itr_val) {
3184 /* this attempts to bias the interrupt rate towards Bulk
3185 * by adding intermediate steps when interrupt rate is
3188 new_itr = new_itr > q_vector->itr_val ?
3189 max((new_itr * q_vector->itr_val) /
3190 (new_itr + (q_vector->itr_val >> 2)),
3192 /* Don't write the value here; it resets the adapter's
3193 * internal timer, and causes us to delay far longer than
3194 * we should between interrupts. Instead, we write the ITR
3195 * value at the beginning of the next interrupt so the timing
3196 * ends up being correct.
3198 q_vector->itr_val = new_itr;
3199 q_vector->set_itr = 1;
3203 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
3205 int v_idx = adapter->num_q_vectors;
3207 if (adapter->msix_entries) {
3208 pci_disable_msix(adapter->pdev);
3209 kfree(adapter->msix_entries);
3210 adapter->msix_entries = NULL;
3211 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
3212 pci_disable_msi(adapter->pdev);
3216 igc_reset_q_vector(adapter, v_idx);
3220 * igc_set_interrupt_capability - set MSI or MSI-X if supported
3221 * @adapter: Pointer to adapter structure
3222 * @msix: boolean value for MSI-X capability
3224 * Attempt to configure interrupts using the best available
3225 * capabilities of the hardware and kernel.
3227 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
3235 adapter->flags |= IGC_FLAG_HAS_MSIX;
3237 /* Number of supported queues. */
3238 adapter->num_rx_queues = adapter->rss_queues;
3240 adapter->num_tx_queues = adapter->rss_queues;
3242 /* start with one vector for every Rx queue */
3243 numvecs = adapter->num_rx_queues;
3245 /* if Tx handler is separate add 1 for every Tx queue */
3246 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
3247 numvecs += adapter->num_tx_queues;
3249 /* store the number of vectors reserved for queues */
3250 adapter->num_q_vectors = numvecs;
3252 /* add 1 vector for link status interrupts */
3255 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
3258 if (!adapter->msix_entries)
3261 /* populate entry values */
3262 for (i = 0; i < numvecs; i++)
3263 adapter->msix_entries[i].entry = i;
3265 err = pci_enable_msix_range(adapter->pdev,
3266 adapter->msix_entries,
3272 kfree(adapter->msix_entries);
3273 adapter->msix_entries = NULL;
3275 igc_reset_interrupt_capability(adapter);
3278 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
3280 adapter->rss_queues = 1;
3281 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3282 adapter->num_rx_queues = 1;
3283 adapter->num_tx_queues = 1;
3284 adapter->num_q_vectors = 1;
3285 if (!pci_enable_msi(adapter->pdev))
3286 adapter->flags |= IGC_FLAG_HAS_MSI;
3290 * igc_update_ring_itr - update the dynamic ITR value based on packet size
3291 * @q_vector: pointer to q_vector
3293 * Stores a new ITR value based on strictly on packet size. This
3294 * algorithm is less sophisticated than that used in igc_update_itr,
3295 * due to the difficulty of synchronizing statistics across multiple
3296 * receive rings. The divisors and thresholds used by this function
3297 * were determined based on theoretical maximum wire speed and testing
3298 * data, in order to minimize response time while increasing bulk
3300 * NOTE: This function is called only when operating in a multiqueue
3301 * receive environment.
3303 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
3305 struct igc_adapter *adapter = q_vector->adapter;
3306 int new_val = q_vector->itr_val;
3307 int avg_wire_size = 0;
3308 unsigned int packets;
3310 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3311 * ints/sec - ITR timer value of 120 ticks.
3313 switch (adapter->link_speed) {
3316 new_val = IGC_4K_ITR;
3322 packets = q_vector->rx.total_packets;
3324 avg_wire_size = q_vector->rx.total_bytes / packets;
3326 packets = q_vector->tx.total_packets;
3328 avg_wire_size = max_t(u32, avg_wire_size,
3329 q_vector->tx.total_bytes / packets);
3331 /* if avg_wire_size isn't set no work was done */
3335 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3336 avg_wire_size += 24;
3338 /* Don't starve jumbo frames */
3339 avg_wire_size = min(avg_wire_size, 3000);
3341 /* Give a little boost to mid-size frames */
3342 if (avg_wire_size > 300 && avg_wire_size < 1200)
3343 new_val = avg_wire_size / 3;
3345 new_val = avg_wire_size / 2;
3347 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3348 if (new_val < IGC_20K_ITR &&
3349 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3350 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3351 new_val = IGC_20K_ITR;
3354 if (new_val != q_vector->itr_val) {
3355 q_vector->itr_val = new_val;
3356 q_vector->set_itr = 1;
3359 q_vector->rx.total_bytes = 0;
3360 q_vector->rx.total_packets = 0;
3361 q_vector->tx.total_bytes = 0;
3362 q_vector->tx.total_packets = 0;
3365 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
3367 struct igc_adapter *adapter = q_vector->adapter;
3368 struct igc_hw *hw = &adapter->hw;
3370 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
3371 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
3372 if (adapter->num_q_vectors == 1)
3373 igc_set_itr(q_vector);
3375 igc_update_ring_itr(q_vector);
3378 if (!test_bit(__IGC_DOWN, &adapter->state)) {
3379 if (adapter->msix_entries)
3380 wr32(IGC_EIMS, q_vector->eims_value);
3382 igc_irq_enable(adapter);
3386 static void igc_add_ring(struct igc_ring *ring,
3387 struct igc_ring_container *head)
3394 * igc_cache_ring_register - Descriptor ring to register mapping
3395 * @adapter: board private structure to initialize
3397 * Once we know the feature-set enabled for the device, we'll cache
3398 * the register offset the descriptor ring is assigned to.
3400 static void igc_cache_ring_register(struct igc_adapter *adapter)
3404 switch (adapter->hw.mac.type) {
3407 for (; i < adapter->num_rx_queues; i++)
3408 adapter->rx_ring[i]->reg_idx = i;
3409 for (; j < adapter->num_tx_queues; j++)
3410 adapter->tx_ring[j]->reg_idx = j;
3416 * igc_poll - NAPI Rx polling callback
3417 * @napi: napi polling structure
3418 * @budget: count of how many packets we should handle
3420 static int igc_poll(struct napi_struct *napi, int budget)
3422 struct igc_q_vector *q_vector = container_of(napi,
3423 struct igc_q_vector,
3425 bool clean_complete = true;
3428 if (q_vector->tx.ring)
3429 clean_complete = igc_clean_tx_irq(q_vector, budget);
3431 if (q_vector->rx.ring) {
3432 int cleaned = igc_clean_rx_irq(q_vector, budget);
3434 work_done += cleaned;
3435 if (cleaned >= budget)
3436 clean_complete = false;
3439 /* If all work not completed, return budget and keep polling */
3440 if (!clean_complete)
3443 /* Exit the polling mode, but don't re-enable interrupts if stack might
3444 * poll us due to busy-polling
3446 if (likely(napi_complete_done(napi, work_done)))
3447 igc_ring_irq_enable(q_vector);
3449 return min(work_done, budget - 1);
3453 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
3454 * @adapter: board private structure to initialize
3455 * @v_count: q_vectors allocated on adapter, used for ring interleaving
3456 * @v_idx: index of vector in adapter struct
3457 * @txr_count: total number of Tx rings to allocate
3458 * @txr_idx: index of first Tx ring to allocate
3459 * @rxr_count: total number of Rx rings to allocate
3460 * @rxr_idx: index of first Rx ring to allocate
3462 * We allocate one q_vector. If allocation fails we return -ENOMEM.
3464 static int igc_alloc_q_vector(struct igc_adapter *adapter,
3465 unsigned int v_count, unsigned int v_idx,
3466 unsigned int txr_count, unsigned int txr_idx,
3467 unsigned int rxr_count, unsigned int rxr_idx)
3469 struct igc_q_vector *q_vector;
3470 struct igc_ring *ring;
3473 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
3474 if (txr_count > 1 || rxr_count > 1)
3477 ring_count = txr_count + rxr_count;
3479 /* allocate q_vector and rings */
3480 q_vector = adapter->q_vector[v_idx];
3482 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
3485 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
3489 /* initialize NAPI */
3490 netif_napi_add(adapter->netdev, &q_vector->napi,
3493 /* tie q_vector and adapter together */
3494 adapter->q_vector[v_idx] = q_vector;
3495 q_vector->adapter = adapter;
3497 /* initialize work limits */
3498 q_vector->tx.work_limit = adapter->tx_work_limit;
3500 /* initialize ITR configuration */
3501 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
3502 q_vector->itr_val = IGC_START_ITR;
3504 /* initialize pointer to rings */
3505 ring = q_vector->ring;
3507 /* initialize ITR */
3509 /* rx or rx/tx vector */
3510 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
3511 q_vector->itr_val = adapter->rx_itr_setting;
3513 /* tx only vector */
3514 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
3515 q_vector->itr_val = adapter->tx_itr_setting;
3519 /* assign generic ring traits */
3520 ring->dev = &adapter->pdev->dev;
3521 ring->netdev = adapter->netdev;
3523 /* configure backlink on ring */
3524 ring->q_vector = q_vector;
3526 /* update q_vector Tx values */
3527 igc_add_ring(ring, &q_vector->tx);
3529 /* apply Tx specific ring traits */
3530 ring->count = adapter->tx_ring_count;
3531 ring->queue_index = txr_idx;
3533 /* assign ring to adapter */
3534 adapter->tx_ring[txr_idx] = ring;
3536 /* push pointer to next ring */
3541 /* assign generic ring traits */
3542 ring->dev = &adapter->pdev->dev;
3543 ring->netdev = adapter->netdev;
3545 /* configure backlink on ring */
3546 ring->q_vector = q_vector;
3548 /* update q_vector Rx values */
3549 igc_add_ring(ring, &q_vector->rx);
3551 /* apply Rx specific ring traits */
3552 ring->count = adapter->rx_ring_count;
3553 ring->queue_index = rxr_idx;
3555 /* assign ring to adapter */
3556 adapter->rx_ring[rxr_idx] = ring;
3563 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
3564 * @adapter: board private structure to initialize
3566 * We allocate one q_vector per queue interrupt. If allocation fails we
3569 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
3571 int rxr_remaining = adapter->num_rx_queues;
3572 int txr_remaining = adapter->num_tx_queues;
3573 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
3574 int q_vectors = adapter->num_q_vectors;
3577 if (q_vectors >= (rxr_remaining + txr_remaining)) {
3578 for (; rxr_remaining; v_idx++) {
3579 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3585 /* update counts and index */
3591 for (; v_idx < q_vectors; v_idx++) {
3592 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
3593 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
3595 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3596 tqpv, txr_idx, rqpv, rxr_idx);
3601 /* update counts and index */
3602 rxr_remaining -= rqpv;
3603 txr_remaining -= tqpv;
3611 adapter->num_tx_queues = 0;
3612 adapter->num_rx_queues = 0;
3613 adapter->num_q_vectors = 0;
3616 igc_free_q_vector(adapter, v_idx);
3622 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
3623 * @adapter: Pointer to adapter structure
3624 * @msix: boolean for MSI-X capability
3626 * This function initializes the interrupts and allocates all of the queues.
3628 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
3630 struct net_device *dev = adapter->netdev;
3633 igc_set_interrupt_capability(adapter, msix);
3635 err = igc_alloc_q_vectors(adapter);
3637 netdev_err(dev, "Unable to allocate memory for vectors\n");
3638 goto err_alloc_q_vectors;
3641 igc_cache_ring_register(adapter);
3645 err_alloc_q_vectors:
3646 igc_reset_interrupt_capability(adapter);
3651 * igc_sw_init - Initialize general software structures (struct igc_adapter)
3652 * @adapter: board private structure to initialize
3654 * igc_sw_init initializes the Adapter private data structure.
3655 * Fields are initialized based on PCI device information and
3656 * OS network device settings (MTU size).
3658 static int igc_sw_init(struct igc_adapter *adapter)
3660 struct net_device *netdev = adapter->netdev;
3661 struct pci_dev *pdev = adapter->pdev;
3662 struct igc_hw *hw = &adapter->hw;
3664 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3666 /* set default ring sizes */
3667 adapter->tx_ring_count = IGC_DEFAULT_TXD;
3668 adapter->rx_ring_count = IGC_DEFAULT_RXD;
3670 /* set default ITR values */
3671 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
3672 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
3674 /* set default work limits */
3675 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
3677 /* adjust max frame to be at least the size of a standard frame */
3678 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3680 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3682 mutex_init(&adapter->nfc_rule_lock);
3683 INIT_LIST_HEAD(&adapter->nfc_rule_list);
3684 adapter->nfc_rule_count = 0;
3686 spin_lock_init(&adapter->stats64_lock);
3687 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
3688 adapter->flags |= IGC_FLAG_HAS_MSIX;
3690 igc_init_queue_configuration(adapter);
3692 /* This call may decrease the number of queues */
3693 if (igc_init_interrupt_scheme(adapter, true)) {
3694 netdev_err(netdev, "Unable to allocate memory for queues\n");
3698 /* Explicitly disable IRQ since the NIC can be in any state. */
3699 igc_irq_disable(adapter);
3701 set_bit(__IGC_DOWN, &adapter->state);
3707 * igc_up - Open the interface and prepare it to handle traffic
3708 * @adapter: board private structure
3710 void igc_up(struct igc_adapter *adapter)
3712 struct igc_hw *hw = &adapter->hw;
3715 /* hardware has been reset, we need to reload some things */
3716 igc_configure(adapter);
3718 clear_bit(__IGC_DOWN, &adapter->state);
3720 for (i = 0; i < adapter->num_q_vectors; i++)
3721 napi_enable(&adapter->q_vector[i]->napi);
3723 if (adapter->msix_entries)
3724 igc_configure_msix(adapter);
3726 igc_assign_vector(adapter->q_vector[0], 0);
3728 /* Clear any pending interrupts. */
3730 igc_irq_enable(adapter);
3732 netif_tx_start_all_queues(adapter->netdev);
3734 /* start the watchdog. */
3735 hw->mac.get_link_status = 1;
3736 schedule_work(&adapter->watchdog_task);
3740 * igc_update_stats - Update the board statistics counters
3741 * @adapter: board private structure
3743 void igc_update_stats(struct igc_adapter *adapter)
3745 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
3746 struct pci_dev *pdev = adapter->pdev;
3747 struct igc_hw *hw = &adapter->hw;
3748 u64 _bytes, _packets;
3754 /* Prevent stats update while adapter is being reset, or if the pci
3755 * connection is down.
3757 if (adapter->link_speed == 0)
3759 if (pci_channel_offline(pdev))
3766 for (i = 0; i < adapter->num_rx_queues; i++) {
3767 struct igc_ring *ring = adapter->rx_ring[i];
3768 u32 rqdpc = rd32(IGC_RQDPC(i));
3770 if (hw->mac.type >= igc_i225)
3771 wr32(IGC_RQDPC(i), 0);
3774 ring->rx_stats.drops += rqdpc;
3775 net_stats->rx_fifo_errors += rqdpc;
3779 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
3780 _bytes = ring->rx_stats.bytes;
3781 _packets = ring->rx_stats.packets;
3782 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
3784 packets += _packets;
3787 net_stats->rx_bytes = bytes;
3788 net_stats->rx_packets = packets;
3792 for (i = 0; i < adapter->num_tx_queues; i++) {
3793 struct igc_ring *ring = adapter->tx_ring[i];
3796 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
3797 _bytes = ring->tx_stats.bytes;
3798 _packets = ring->tx_stats.packets;
3799 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
3801 packets += _packets;
3803 net_stats->tx_bytes = bytes;
3804 net_stats->tx_packets = packets;
3807 /* read stats registers */
3808 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
3809 adapter->stats.gprc += rd32(IGC_GPRC);
3810 adapter->stats.gorc += rd32(IGC_GORCL);
3811 rd32(IGC_GORCH); /* clear GORCL */
3812 adapter->stats.bprc += rd32(IGC_BPRC);
3813 adapter->stats.mprc += rd32(IGC_MPRC);
3814 adapter->stats.roc += rd32(IGC_ROC);
3816 adapter->stats.prc64 += rd32(IGC_PRC64);
3817 adapter->stats.prc127 += rd32(IGC_PRC127);
3818 adapter->stats.prc255 += rd32(IGC_PRC255);
3819 adapter->stats.prc511 += rd32(IGC_PRC511);
3820 adapter->stats.prc1023 += rd32(IGC_PRC1023);
3821 adapter->stats.prc1522 += rd32(IGC_PRC1522);
3822 adapter->stats.tlpic += rd32(IGC_TLPIC);
3823 adapter->stats.rlpic += rd32(IGC_RLPIC);
3825 mpc = rd32(IGC_MPC);
3826 adapter->stats.mpc += mpc;
3827 net_stats->rx_fifo_errors += mpc;
3828 adapter->stats.scc += rd32(IGC_SCC);
3829 adapter->stats.ecol += rd32(IGC_ECOL);
3830 adapter->stats.mcc += rd32(IGC_MCC);
3831 adapter->stats.latecol += rd32(IGC_LATECOL);
3832 adapter->stats.dc += rd32(IGC_DC);
3833 adapter->stats.rlec += rd32(IGC_RLEC);
3834 adapter->stats.xonrxc += rd32(IGC_XONRXC);
3835 adapter->stats.xontxc += rd32(IGC_XONTXC);
3836 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
3837 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
3838 adapter->stats.fcruc += rd32(IGC_FCRUC);
3839 adapter->stats.gptc += rd32(IGC_GPTC);
3840 adapter->stats.gotc += rd32(IGC_GOTCL);
3841 rd32(IGC_GOTCH); /* clear GOTCL */
3842 adapter->stats.rnbc += rd32(IGC_RNBC);
3843 adapter->stats.ruc += rd32(IGC_RUC);
3844 adapter->stats.rfc += rd32(IGC_RFC);
3845 adapter->stats.rjc += rd32(IGC_RJC);
3846 adapter->stats.tor += rd32(IGC_TORH);
3847 adapter->stats.tot += rd32(IGC_TOTH);
3848 adapter->stats.tpr += rd32(IGC_TPR);
3850 adapter->stats.ptc64 += rd32(IGC_PTC64);
3851 adapter->stats.ptc127 += rd32(IGC_PTC127);
3852 adapter->stats.ptc255 += rd32(IGC_PTC255);
3853 adapter->stats.ptc511 += rd32(IGC_PTC511);
3854 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
3855 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
3857 adapter->stats.mptc += rd32(IGC_MPTC);
3858 adapter->stats.bptc += rd32(IGC_BPTC);
3860 adapter->stats.tpt += rd32(IGC_TPT);
3861 adapter->stats.colc += rd32(IGC_COLC);
3862 adapter->stats.colc += rd32(IGC_RERC);
3864 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
3866 adapter->stats.tsctc += rd32(IGC_TSCTC);
3868 adapter->stats.iac += rd32(IGC_IAC);
3870 /* Fill out the OS statistics structure */
3871 net_stats->multicast = adapter->stats.mprc;
3872 net_stats->collisions = adapter->stats.colc;
3876 /* RLEC on some newer hardware can be incorrect so build
3877 * our own version based on RUC and ROC
3879 net_stats->rx_errors = adapter->stats.rxerrc +
3880 adapter->stats.crcerrs + adapter->stats.algnerrc +
3881 adapter->stats.ruc + adapter->stats.roc +
3882 adapter->stats.cexterr;
3883 net_stats->rx_length_errors = adapter->stats.ruc +
3885 net_stats->rx_crc_errors = adapter->stats.crcerrs;
3886 net_stats->rx_frame_errors = adapter->stats.algnerrc;
3887 net_stats->rx_missed_errors = adapter->stats.mpc;
3890 net_stats->tx_errors = adapter->stats.ecol +
3891 adapter->stats.latecol;
3892 net_stats->tx_aborted_errors = adapter->stats.ecol;
3893 net_stats->tx_window_errors = adapter->stats.latecol;
3894 net_stats->tx_carrier_errors = adapter->stats.tncrs;
3896 /* Tx Dropped needs to be maintained elsewhere */
3898 /* Management Stats */
3899 adapter->stats.mgptc += rd32(IGC_MGTPTC);
3900 adapter->stats.mgprc += rd32(IGC_MGTPRC);
3901 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
3905 * igc_down - Close the interface
3906 * @adapter: board private structure
3908 void igc_down(struct igc_adapter *adapter)
3910 struct net_device *netdev = adapter->netdev;
3911 struct igc_hw *hw = &adapter->hw;
3915 set_bit(__IGC_DOWN, &adapter->state);
3917 igc_ptp_suspend(adapter);
3919 if (pci_device_is_present(adapter->pdev)) {
3920 /* disable receives in the hardware */
3921 rctl = rd32(IGC_RCTL);
3922 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
3923 /* flush and sleep below */
3925 /* set trans_start so we don't get spurious watchdogs during reset */
3926 netif_trans_update(netdev);
3928 netif_carrier_off(netdev);
3929 netif_tx_stop_all_queues(netdev);
3931 if (pci_device_is_present(adapter->pdev)) {
3932 /* disable transmits in the hardware */
3933 tctl = rd32(IGC_TCTL);
3934 tctl &= ~IGC_TCTL_EN;
3935 wr32(IGC_TCTL, tctl);
3936 /* flush both disables and wait for them to finish */
3938 usleep_range(10000, 20000);
3940 igc_irq_disable(adapter);
3943 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
3945 for (i = 0; i < adapter->num_q_vectors; i++) {
3946 if (adapter->q_vector[i]) {
3947 napi_synchronize(&adapter->q_vector[i]->napi);
3948 napi_disable(&adapter->q_vector[i]->napi);
3952 del_timer_sync(&adapter->watchdog_timer);
3953 del_timer_sync(&adapter->phy_info_timer);
3955 /* record the stats before reset*/
3956 spin_lock(&adapter->stats64_lock);
3957 igc_update_stats(adapter);
3958 spin_unlock(&adapter->stats64_lock);
3960 adapter->link_speed = 0;
3961 adapter->link_duplex = 0;
3963 if (!pci_channel_offline(adapter->pdev))
3966 /* clear VLAN promisc flag so VFTA will be updated if necessary */
3967 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
3969 igc_clean_all_tx_rings(adapter);
3970 igc_clean_all_rx_rings(adapter);
3973 void igc_reinit_locked(struct igc_adapter *adapter)
3975 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
3976 usleep_range(1000, 2000);
3979 clear_bit(__IGC_RESETTING, &adapter->state);
3982 static void igc_reset_task(struct work_struct *work)
3984 struct igc_adapter *adapter;
3986 adapter = container_of(work, struct igc_adapter, reset_task);
3989 /* If we're already down or resetting, just bail */
3990 if (test_bit(__IGC_DOWN, &adapter->state) ||
3991 test_bit(__IGC_RESETTING, &adapter->state)) {
3996 igc_rings_dump(adapter);
3997 igc_regs_dump(adapter);
3998 netdev_err(adapter->netdev, "Reset adapter\n");
3999 igc_reinit_locked(adapter);
4004 * igc_change_mtu - Change the Maximum Transfer Unit
4005 * @netdev: network interface device structure
4006 * @new_mtu: new value for maximum frame size
4008 * Returns 0 on success, negative on failure
4010 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
4012 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4013 struct igc_adapter *adapter = netdev_priv(netdev);
4015 /* adjust max frame to be at least the size of a standard frame */
4016 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4017 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
4019 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
4020 usleep_range(1000, 2000);
4022 /* igc_down has a dependency on max_frame_size */
4023 adapter->max_frame_size = max_frame;
4025 if (netif_running(netdev))
4028 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4029 netdev->mtu = new_mtu;
4031 if (netif_running(netdev))
4036 clear_bit(__IGC_RESETTING, &adapter->state);
4042 * igc_get_stats64 - Get System Network Statistics
4043 * @netdev: network interface device structure
4044 * @stats: rtnl_link_stats64 pointer
4046 * Returns the address of the device statistics structure.
4047 * The statistics are updated here and also from the timer callback.
4049 static void igc_get_stats64(struct net_device *netdev,
4050 struct rtnl_link_stats64 *stats)
4052 struct igc_adapter *adapter = netdev_priv(netdev);
4054 spin_lock(&adapter->stats64_lock);
4055 if (!test_bit(__IGC_RESETTING, &adapter->state))
4056 igc_update_stats(adapter);
4057 memcpy(stats, &adapter->stats64, sizeof(*stats));
4058 spin_unlock(&adapter->stats64_lock);
4061 static netdev_features_t igc_fix_features(struct net_device *netdev,
4062 netdev_features_t features)
4064 /* Since there is no support for separate Rx/Tx vlan accel
4065 * enable/disable make sure Tx flag is always in same state as Rx.
4067 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4068 features |= NETIF_F_HW_VLAN_CTAG_TX;
4070 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4075 static int igc_set_features(struct net_device *netdev,
4076 netdev_features_t features)
4078 netdev_features_t changed = netdev->features ^ features;
4079 struct igc_adapter *adapter = netdev_priv(netdev);
4081 /* Add VLAN support */
4082 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
4085 if (!(features & NETIF_F_NTUPLE))
4086 igc_flush_nfc_rules(adapter);
4088 netdev->features = features;
4090 if (netif_running(netdev))
4091 igc_reinit_locked(adapter);
4098 static netdev_features_t
4099 igc_features_check(struct sk_buff *skb, struct net_device *dev,
4100 netdev_features_t features)
4102 unsigned int network_hdr_len, mac_hdr_len;
4104 /* Make certain the headers can be described by a context descriptor */
4105 mac_hdr_len = skb_network_header(skb) - skb->data;
4106 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
4107 return features & ~(NETIF_F_HW_CSUM |
4109 NETIF_F_HW_VLAN_CTAG_TX |
4113 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4114 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
4115 return features & ~(NETIF_F_HW_CSUM |
4120 /* We can only support IPv4 TSO in tunnels if we can mangle the
4121 * inner IP ID field, so strip TSO if MANGLEID is not supported.
4123 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4124 features &= ~NETIF_F_TSO;
4129 static void igc_tsync_interrupt(struct igc_adapter *adapter)
4131 struct igc_hw *hw = &adapter->hw;
4132 u32 tsicr = rd32(IGC_TSICR);
4135 if (tsicr & IGC_TSICR_TXTS) {
4136 /* retrieve hardware timestamp */
4137 schedule_work(&adapter->ptp_tx_work);
4138 ack |= IGC_TSICR_TXTS;
4141 /* acknowledge the interrupts */
4142 wr32(IGC_TSICR, ack);
4146 * igc_msix_other - msix other interrupt handler
4147 * @irq: interrupt number
4148 * @data: pointer to a q_vector
4150 static irqreturn_t igc_msix_other(int irq, void *data)
4152 struct igc_adapter *adapter = data;
4153 struct igc_hw *hw = &adapter->hw;
4154 u32 icr = rd32(IGC_ICR);
4156 /* reading ICR causes bit 31 of EICR to be cleared */
4157 if (icr & IGC_ICR_DRSTA)
4158 schedule_work(&adapter->reset_task);
4160 if (icr & IGC_ICR_DOUTSYNC) {
4161 /* HW is reporting DMA is out of sync */
4162 adapter->stats.doosync++;
4165 if (icr & IGC_ICR_LSC) {
4166 hw->mac.get_link_status = 1;
4167 /* guard against interrupt when we're going down */
4168 if (!test_bit(__IGC_DOWN, &adapter->state))
4169 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4172 if (icr & IGC_ICR_TS)
4173 igc_tsync_interrupt(adapter);
4175 wr32(IGC_EIMS, adapter->eims_other);
4180 static void igc_write_itr(struct igc_q_vector *q_vector)
4182 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
4184 if (!q_vector->set_itr)
4188 itr_val = IGC_ITR_VAL_MASK;
4190 itr_val |= IGC_EITR_CNT_IGNR;
4192 writel(itr_val, q_vector->itr_register);
4193 q_vector->set_itr = 0;
4196 static irqreturn_t igc_msix_ring(int irq, void *data)
4198 struct igc_q_vector *q_vector = data;
4200 /* Write the ITR value calculated from the previous interrupt. */
4201 igc_write_itr(q_vector);
4203 napi_schedule(&q_vector->napi);
4209 * igc_request_msix - Initialize MSI-X interrupts
4210 * @adapter: Pointer to adapter structure
4212 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
4215 static int igc_request_msix(struct igc_adapter *adapter)
4217 unsigned int num_q_vectors = adapter->num_q_vectors;
4218 int i = 0, err = 0, vector = 0, free_vector = 0;
4219 struct net_device *netdev = adapter->netdev;
4221 err = request_irq(adapter->msix_entries[vector].vector,
4222 &igc_msix_other, 0, netdev->name, adapter);
4226 if (num_q_vectors > MAX_Q_VECTORS) {
4227 num_q_vectors = MAX_Q_VECTORS;
4228 dev_warn(&adapter->pdev->dev,
4229 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
4230 adapter->num_q_vectors, MAX_Q_VECTORS);
4232 for (i = 0; i < num_q_vectors; i++) {
4233 struct igc_q_vector *q_vector = adapter->q_vector[i];
4237 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
4239 if (q_vector->rx.ring && q_vector->tx.ring)
4240 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
4241 q_vector->rx.ring->queue_index);
4242 else if (q_vector->tx.ring)
4243 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
4244 q_vector->tx.ring->queue_index);
4245 else if (q_vector->rx.ring)
4246 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
4247 q_vector->rx.ring->queue_index);
4249 sprintf(q_vector->name, "%s-unused", netdev->name);
4251 err = request_irq(adapter->msix_entries[vector].vector,
4252 igc_msix_ring, 0, q_vector->name,
4258 igc_configure_msix(adapter);
4262 /* free already assigned IRQs */
4263 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
4266 for (i = 0; i < vector; i++) {
4267 free_irq(adapter->msix_entries[free_vector++].vector,
4268 adapter->q_vector[i]);
4275 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
4276 * @adapter: Pointer to adapter structure
4278 * This function resets the device so that it has 0 rx queues, tx queues, and
4279 * MSI-X interrupts allocated.
4281 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
4283 igc_free_q_vectors(adapter);
4284 igc_reset_interrupt_capability(adapter);
4287 /* Need to wait a few seconds after link up to get diagnostic information from
4290 static void igc_update_phy_info(struct timer_list *t)
4292 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
4294 igc_get_phy_info(&adapter->hw);
4298 * igc_has_link - check shared code for link and determine up/down
4299 * @adapter: pointer to driver private info
4301 bool igc_has_link(struct igc_adapter *adapter)
4303 struct igc_hw *hw = &adapter->hw;
4304 bool link_active = false;
4306 /* get_link_status is set on LSC (link status) interrupt or
4307 * rx sequence error interrupt. get_link_status will stay
4308 * false until the igc_check_for_link establishes link
4309 * for copper adapters ONLY
4311 if (!hw->mac.get_link_status)
4313 hw->mac.ops.check_for_link(hw);
4314 link_active = !hw->mac.get_link_status;
4316 if (hw->mac.type == igc_i225) {
4317 if (!netif_carrier_ok(adapter->netdev)) {
4318 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4319 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
4320 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
4321 adapter->link_check_timeout = jiffies;
4329 * igc_watchdog - Timer Call-back
4330 * @t: timer for the watchdog
4332 static void igc_watchdog(struct timer_list *t)
4334 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
4335 /* Do the rest outside of interrupt context */
4336 schedule_work(&adapter->watchdog_task);
4339 static void igc_watchdog_task(struct work_struct *work)
4341 struct igc_adapter *adapter = container_of(work,
4344 struct net_device *netdev = adapter->netdev;
4345 struct igc_hw *hw = &adapter->hw;
4346 struct igc_phy_info *phy = &hw->phy;
4347 u16 phy_data, retry_count = 20;
4351 link = igc_has_link(adapter);
4353 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
4354 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4355 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4361 /* Cancel scheduled suspend requests. */
4362 pm_runtime_resume(netdev->dev.parent);
4364 if (!netif_carrier_ok(netdev)) {
4367 hw->mac.ops.get_speed_and_duplex(hw,
4368 &adapter->link_speed,
4369 &adapter->link_duplex);
4371 ctrl = rd32(IGC_CTRL);
4372 /* Link status message must follow this format */
4374 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4375 adapter->link_speed,
4376 adapter->link_duplex == FULL_DUPLEX ?
4378 (ctrl & IGC_CTRL_TFCE) &&
4379 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
4380 (ctrl & IGC_CTRL_RFCE) ? "RX" :
4381 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
4383 /* disable EEE if enabled */
4384 if ((adapter->flags & IGC_FLAG_EEE) &&
4385 adapter->link_duplex == HALF_DUPLEX) {
4387 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
4388 adapter->hw.dev_spec._base.eee_enable = false;
4389 adapter->flags &= ~IGC_FLAG_EEE;
4392 /* check if SmartSpeed worked */
4393 igc_check_downshift(hw);
4394 if (phy->speed_downgraded)
4395 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4397 /* adjust timeout factor according to speed/duplex */
4398 adapter->tx_timeout_factor = 1;
4399 switch (adapter->link_speed) {
4401 adapter->tx_timeout_factor = 14;
4404 /* maybe add some timeout factor ? */
4408 if (adapter->link_speed != SPEED_1000)
4411 /* wait for Remote receiver status OK */
4413 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
4415 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
4419 goto retry_read_status;
4420 } else if (!retry_count) {
4421 netdev_err(netdev, "exceed max 2 second\n");
4424 netdev_err(netdev, "read 1000Base-T Status Reg\n");
4427 netif_carrier_on(netdev);
4429 /* link state has changed, schedule phy info update */
4430 if (!test_bit(__IGC_DOWN, &adapter->state))
4431 mod_timer(&adapter->phy_info_timer,
4432 round_jiffies(jiffies + 2 * HZ));
4435 if (netif_carrier_ok(netdev)) {
4436 adapter->link_speed = 0;
4437 adapter->link_duplex = 0;
4439 /* Links status message must follow this format */
4440 netdev_info(netdev, "NIC Link is Down\n");
4441 netif_carrier_off(netdev);
4443 /* link state has changed, schedule phy info update */
4444 if (!test_bit(__IGC_DOWN, &adapter->state))
4445 mod_timer(&adapter->phy_info_timer,
4446 round_jiffies(jiffies + 2 * HZ));
4448 /* link is down, time to check for alternate media */
4449 if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
4450 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
4451 schedule_work(&adapter->reset_task);
4452 /* return immediately */
4456 pm_schedule_suspend(netdev->dev.parent,
4459 /* also check for alternate media here */
4460 } else if (!netif_carrier_ok(netdev) &&
4461 (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
4462 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
4463 schedule_work(&adapter->reset_task);
4464 /* return immediately */
4470 spin_lock(&adapter->stats64_lock);
4471 igc_update_stats(adapter);
4472 spin_unlock(&adapter->stats64_lock);
4474 for (i = 0; i < adapter->num_tx_queues; i++) {
4475 struct igc_ring *tx_ring = adapter->tx_ring[i];
4477 if (!netif_carrier_ok(netdev)) {
4478 /* We've lost link, so the controller stops DMA,
4479 * but we've got queued Tx work that's never going
4480 * to get done, so reset controller to flush Tx.
4481 * (Do the reset outside of interrupt context).
4483 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
4484 adapter->tx_timeout_count++;
4485 schedule_work(&adapter->reset_task);
4486 /* return immediately since reset is imminent */
4491 /* Force detection of hung controller every watchdog period */
4492 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
4495 /* Cause software interrupt to ensure Rx ring is cleaned */
4496 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
4499 for (i = 0; i < adapter->num_q_vectors; i++)
4500 eics |= adapter->q_vector[i]->eims_value;
4501 wr32(IGC_EICS, eics);
4503 wr32(IGC_ICS, IGC_ICS_RXDMT0);
4506 igc_ptp_tx_hang(adapter);
4508 /* Reset the timer */
4509 if (!test_bit(__IGC_DOWN, &adapter->state)) {
4510 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
4511 mod_timer(&adapter->watchdog_timer,
4512 round_jiffies(jiffies + HZ));
4514 mod_timer(&adapter->watchdog_timer,
4515 round_jiffies(jiffies + 2 * HZ));
4520 * igc_intr_msi - Interrupt Handler
4521 * @irq: interrupt number
4522 * @data: pointer to a network interface device structure
4524 static irqreturn_t igc_intr_msi(int irq, void *data)
4526 struct igc_adapter *adapter = data;
4527 struct igc_q_vector *q_vector = adapter->q_vector[0];
4528 struct igc_hw *hw = &adapter->hw;
4529 /* read ICR disables interrupts using IAM */
4530 u32 icr = rd32(IGC_ICR);
4532 igc_write_itr(q_vector);
4534 if (icr & IGC_ICR_DRSTA)
4535 schedule_work(&adapter->reset_task);
4537 if (icr & IGC_ICR_DOUTSYNC) {
4538 /* HW is reporting DMA is out of sync */
4539 adapter->stats.doosync++;
4542 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
4543 hw->mac.get_link_status = 1;
4544 if (!test_bit(__IGC_DOWN, &adapter->state))
4545 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4548 if (icr & IGC_ICR_TS)
4549 igc_tsync_interrupt(adapter);
4551 napi_schedule(&q_vector->napi);
4557 * igc_intr - Legacy Interrupt Handler
4558 * @irq: interrupt number
4559 * @data: pointer to a network interface device structure
4561 static irqreturn_t igc_intr(int irq, void *data)
4563 struct igc_adapter *adapter = data;
4564 struct igc_q_vector *q_vector = adapter->q_vector[0];
4565 struct igc_hw *hw = &adapter->hw;
4566 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4567 * need for the IMC write
4569 u32 icr = rd32(IGC_ICR);
4571 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4572 * not set, then the adapter didn't send an interrupt
4574 if (!(icr & IGC_ICR_INT_ASSERTED))
4577 igc_write_itr(q_vector);
4579 if (icr & IGC_ICR_DRSTA)
4580 schedule_work(&adapter->reset_task);
4582 if (icr & IGC_ICR_DOUTSYNC) {
4583 /* HW is reporting DMA is out of sync */
4584 adapter->stats.doosync++;
4587 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
4588 hw->mac.get_link_status = 1;
4589 /* guard against interrupt when we're going down */
4590 if (!test_bit(__IGC_DOWN, &adapter->state))
4591 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4594 if (icr & IGC_ICR_TS)
4595 igc_tsync_interrupt(adapter);
4597 napi_schedule(&q_vector->napi);
4602 static void igc_free_irq(struct igc_adapter *adapter)
4604 if (adapter->msix_entries) {
4607 free_irq(adapter->msix_entries[vector++].vector, adapter);
4609 for (i = 0; i < adapter->num_q_vectors; i++)
4610 free_irq(adapter->msix_entries[vector++].vector,
4611 adapter->q_vector[i]);
4613 free_irq(adapter->pdev->irq, adapter);
4618 * igc_request_irq - initialize interrupts
4619 * @adapter: Pointer to adapter structure
4621 * Attempts to configure interrupts using the best available
4622 * capabilities of the hardware and kernel.
4624 static int igc_request_irq(struct igc_adapter *adapter)
4626 struct net_device *netdev = adapter->netdev;
4627 struct pci_dev *pdev = adapter->pdev;
4630 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
4631 err = igc_request_msix(adapter);
4634 /* fall back to MSI */
4635 igc_free_all_tx_resources(adapter);
4636 igc_free_all_rx_resources(adapter);
4638 igc_clear_interrupt_scheme(adapter);
4639 err = igc_init_interrupt_scheme(adapter, false);
4642 igc_setup_all_tx_resources(adapter);
4643 igc_setup_all_rx_resources(adapter);
4644 igc_configure(adapter);
4647 igc_assign_vector(adapter->q_vector[0], 0);
4649 if (adapter->flags & IGC_FLAG_HAS_MSI) {
4650 err = request_irq(pdev->irq, &igc_intr_msi, 0,
4651 netdev->name, adapter);
4655 /* fall back to legacy interrupts */
4656 igc_reset_interrupt_capability(adapter);
4657 adapter->flags &= ~IGC_FLAG_HAS_MSI;
4660 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
4661 netdev->name, adapter);
4664 netdev_err(netdev, "Error %d getting interrupt\n", err);
4671 * __igc_open - Called when a network interface is made active
4672 * @netdev: network interface device structure
4673 * @resuming: boolean indicating if the device is resuming
4675 * Returns 0 on success, negative value on failure
4677 * The open entry point is called when a network interface is made
4678 * active by the system (IFF_UP). At this point all resources needed
4679 * for transmit and receive operations are allocated, the interrupt
4680 * handler is registered with the OS, the watchdog timer is started,
4681 * and the stack is notified that the interface is ready.
4683 static int __igc_open(struct net_device *netdev, bool resuming)
4685 struct igc_adapter *adapter = netdev_priv(netdev);
4686 struct pci_dev *pdev = adapter->pdev;
4687 struct igc_hw *hw = &adapter->hw;
4691 /* disallow open during test */
4693 if (test_bit(__IGC_TESTING, &adapter->state)) {
4699 pm_runtime_get_sync(&pdev->dev);
4701 netif_carrier_off(netdev);
4703 /* allocate transmit descriptors */
4704 err = igc_setup_all_tx_resources(adapter);
4708 /* allocate receive descriptors */
4709 err = igc_setup_all_rx_resources(adapter);
4713 igc_power_up_link(adapter);
4715 igc_configure(adapter);
4717 err = igc_request_irq(adapter);
4721 /* Notify the stack of the actual queue counts. */
4722 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
4724 goto err_set_queues;
4726 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
4728 goto err_set_queues;
4730 clear_bit(__IGC_DOWN, &adapter->state);
4732 for (i = 0; i < adapter->num_q_vectors; i++)
4733 napi_enable(&adapter->q_vector[i]->napi);
4735 /* Clear any pending interrupts. */
4737 igc_irq_enable(adapter);
4740 pm_runtime_put(&pdev->dev);
4742 netif_tx_start_all_queues(netdev);
4744 /* start the watchdog. */
4745 hw->mac.get_link_status = 1;
4746 schedule_work(&adapter->watchdog_task);
4751 igc_free_irq(adapter);
4753 igc_release_hw_control(adapter);
4754 igc_power_down_phy_copper_base(&adapter->hw);
4755 igc_free_all_rx_resources(adapter);
4757 igc_free_all_tx_resources(adapter);
4761 pm_runtime_put(&pdev->dev);
4766 int igc_open(struct net_device *netdev)
4768 return __igc_open(netdev, false);
4772 * __igc_close - Disables a network interface
4773 * @netdev: network interface device structure
4774 * @suspending: boolean indicating the device is suspending
4776 * Returns 0, this is not allowed to fail
4778 * The close entry point is called when an interface is de-activated
4779 * by the OS. The hardware is still under the driver's control, but
4780 * needs to be disabled. A global MAC reset is issued to stop the
4781 * hardware, and all transmit and receive resources are freed.
4783 static int __igc_close(struct net_device *netdev, bool suspending)
4785 struct igc_adapter *adapter = netdev_priv(netdev);
4786 struct pci_dev *pdev = adapter->pdev;
4788 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
4791 pm_runtime_get_sync(&pdev->dev);
4795 igc_release_hw_control(adapter);
4797 igc_free_irq(adapter);
4799 igc_free_all_tx_resources(adapter);
4800 igc_free_all_rx_resources(adapter);
4803 pm_runtime_put_sync(&pdev->dev);
4808 int igc_close(struct net_device *netdev)
4810 if (netif_device_present(netdev) || netdev->dismantle)
4811 return __igc_close(netdev, false);
4816 * igc_ioctl - Access the hwtstamp interface
4817 * @netdev: network interface device structure
4818 * @ifr: interface request data
4819 * @cmd: ioctl command
4821 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4825 return igc_ptp_get_ts_config(netdev, ifr);
4827 return igc_ptp_set_ts_config(netdev, ifr);
4833 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
4836 struct igc_ring *ring;
4839 if (queue < 0 || queue >= adapter->num_tx_queues)
4842 ring = adapter->tx_ring[queue];
4843 ring->launchtime_enable = enable;
4845 if (adapter->base_time)
4848 adapter->cycle_time = NSEC_PER_SEC;
4850 for (i = 0; i < adapter->num_tx_queues; i++) {
4851 ring = adapter->tx_ring[i];
4852 ring->start_time = 0;
4853 ring->end_time = NSEC_PER_SEC;
4859 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
4861 struct timespec64 b;
4863 b = ktime_to_timespec64(base_time);
4865 return timespec64_compare(now, &b) > 0;
4868 static bool validate_schedule(struct igc_adapter *adapter,
4869 const struct tc_taprio_qopt_offload *qopt)
4871 int queue_uses[IGC_MAX_TX_QUEUES] = { };
4872 struct timespec64 now;
4875 if (qopt->cycle_time_extension)
4878 igc_ptp_read(adapter, &now);
4880 /* If we program the controller's BASET registers with a time
4881 * in the future, it will hold all the packets until that
4882 * time, causing a lot of TX Hangs, so to avoid that, we
4883 * reject schedules that would start in the future.
4885 if (!is_base_time_past(qopt->base_time, &now))
4888 for (n = 0; n < qopt->num_entries; n++) {
4889 const struct tc_taprio_sched_entry *e, *prev;
4892 prev = n ? &qopt->entries[n - 1] : NULL;
4893 e = &qopt->entries[n];
4895 /* i225 only supports "global" frame preemption
4898 if (e->command != TC_TAPRIO_CMD_SET_GATES)
4901 for (i = 0; i < adapter->num_tx_queues; i++)
4902 if (e->gate_mask & BIT(i)) {
4905 /* There are limitations: A single queue cannot
4906 * be opened and closed multiple times per cycle
4907 * unless the gate stays open. Check for it.
4909 if (queue_uses[i] > 1 &&
4910 !(prev->gate_mask & BIT(i)))
4918 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
4919 struct tc_etf_qopt_offload *qopt)
4921 struct igc_hw *hw = &adapter->hw;
4924 if (hw->mac.type != igc_i225)
4927 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
4931 return igc_tsn_offload_apply(adapter);
4934 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
4935 struct tc_taprio_qopt_offload *qopt)
4937 bool queue_configured[IGC_MAX_TX_QUEUES] = { };
4938 u32 start_time = 0, end_time = 0;
4942 if (!qopt->enable) {
4943 adapter->base_time = 0;
4947 if (qopt->base_time < 0)
4950 if (adapter->base_time)
4953 if (!validate_schedule(adapter, qopt))
4956 adapter->cycle_time = qopt->cycle_time;
4957 adapter->base_time = qopt->base_time;
4959 for (n = 0; n < qopt->num_entries; n++) {
4960 struct tc_taprio_sched_entry *e = &qopt->entries[n];
4962 end_time += e->interval;
4964 /* If any of the conditions below are true, we need to manually
4965 * control the end time of the cycle.
4966 * 1. Qbv users can specify a cycle time that is not equal
4967 * to the total GCL intervals. Hence, recalculation is
4968 * necessary here to exclude the time interval that
4969 * exceeds the cycle time.
4970 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
4971 * once the end of the list is reached, it will switch
4972 * to the END_OF_CYCLE state and leave the gates in the
4973 * same state until the next cycle is started.
4975 if (end_time > adapter->cycle_time ||
4976 n + 1 == qopt->num_entries)
4977 end_time = adapter->cycle_time;
4979 for (i = 0; i < adapter->num_tx_queues; i++) {
4980 struct igc_ring *ring = adapter->tx_ring[i];
4982 if (!(e->gate_mask & BIT(i)))
4985 /* Check whether a queue stays open for more than one
4986 * entry. If so, keep the start and advance the end
4989 if (!queue_configured[i])
4990 ring->start_time = start_time;
4991 ring->end_time = end_time;
4993 queue_configured[i] = true;
4996 start_time += e->interval;
4999 /* Check whether a queue gets configured.
5000 * If not, set the start and end time to be end time.
5002 for (i = 0; i < adapter->num_tx_queues; i++) {
5003 if (!queue_configured[i]) {
5004 struct igc_ring *ring = adapter->tx_ring[i];
5006 ring->start_time = end_time;
5007 ring->end_time = end_time;
5014 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
5015 struct tc_taprio_qopt_offload *qopt)
5017 struct igc_hw *hw = &adapter->hw;
5020 if (hw->mac.type != igc_i225)
5023 err = igc_save_qbv_schedule(adapter, qopt);
5027 return igc_tsn_offload_apply(adapter);
5030 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
5033 struct igc_adapter *adapter = netdev_priv(dev);
5036 case TC_SETUP_QDISC_TAPRIO:
5037 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
5039 case TC_SETUP_QDISC_ETF:
5040 return igc_tsn_enable_launchtime(adapter, type_data);
5047 static const struct net_device_ops igc_netdev_ops = {
5048 .ndo_open = igc_open,
5049 .ndo_stop = igc_close,
5050 .ndo_start_xmit = igc_xmit_frame,
5051 .ndo_set_rx_mode = igc_set_rx_mode,
5052 .ndo_set_mac_address = igc_set_mac,
5053 .ndo_change_mtu = igc_change_mtu,
5054 .ndo_get_stats64 = igc_get_stats64,
5055 .ndo_fix_features = igc_fix_features,
5056 .ndo_set_features = igc_set_features,
5057 .ndo_features_check = igc_features_check,
5058 .ndo_do_ioctl = igc_ioctl,
5059 .ndo_setup_tc = igc_setup_tc,
5062 /* PCIe configuration access */
5063 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
5065 struct igc_adapter *adapter = hw->back;
5067 pci_read_config_word(adapter->pdev, reg, value);
5070 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
5072 struct igc_adapter *adapter = hw->back;
5074 pci_write_config_word(adapter->pdev, reg, *value);
5077 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
5079 struct igc_adapter *adapter = hw->back;
5081 if (!pci_is_pcie(adapter->pdev))
5082 return -IGC_ERR_CONFIG;
5084 pcie_capability_read_word(adapter->pdev, reg, value);
5089 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
5091 struct igc_adapter *adapter = hw->back;
5093 if (!pci_is_pcie(adapter->pdev))
5094 return -IGC_ERR_CONFIG;
5096 pcie_capability_write_word(adapter->pdev, reg, *value);
5101 u32 igc_rd32(struct igc_hw *hw, u32 reg)
5103 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
5104 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
5107 if (IGC_REMOVED(hw_addr))
5110 value = readl(&hw_addr[reg]);
5112 /* reads should not return all F's */
5113 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
5114 struct net_device *netdev = igc->netdev;
5117 netif_device_detach(netdev);
5118 netdev_err(netdev, "PCIe link lost, device now detached\n");
5119 WARN(pci_device_is_present(igc->pdev),
5120 "igc: Failed to read reg 0x%x!\n", reg);
5126 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
5128 struct igc_mac_info *mac = &adapter->hw.mac;
5132 /* Make sure dplx is at most 1 bit and lsb of speed is not set
5133 * for the switch() below to work
5135 if ((spd & 1) || (dplx & ~1))
5138 switch (spd + dplx) {
5139 case SPEED_10 + DUPLEX_HALF:
5140 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5142 case SPEED_10 + DUPLEX_FULL:
5143 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5145 case SPEED_100 + DUPLEX_HALF:
5146 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5148 case SPEED_100 + DUPLEX_FULL:
5149 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5151 case SPEED_1000 + DUPLEX_FULL:
5153 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5155 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5157 case SPEED_2500 + DUPLEX_FULL:
5159 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
5161 case SPEED_2500 + DUPLEX_HALF: /* not supported */
5166 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5167 adapter->hw.phy.mdix = AUTO_ALL_MODES;
5172 netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
5177 * igc_probe - Device Initialization Routine
5178 * @pdev: PCI device information struct
5179 * @ent: entry in igc_pci_tbl
5181 * Returns 0 on success, negative on failure
5183 * igc_probe initializes an adapter identified by a pci_dev structure.
5184 * The OS initialization, configuring the adapter private structure,
5185 * and a hardware reset occur.
5187 static int igc_probe(struct pci_dev *pdev,
5188 const struct pci_device_id *ent)
5190 struct igc_adapter *adapter;
5191 struct net_device *netdev;
5193 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
5194 int err, pci_using_dac;
5196 err = pci_enable_device_mem(pdev);
5201 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5205 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5208 "No usable DMA configuration, aborting\n");
5213 err = pci_request_mem_regions(pdev, igc_driver_name);
5217 pci_enable_pcie_error_reporting(pdev);
5219 err = pci_enable_ptm(pdev, NULL);
5221 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
5223 pci_set_master(pdev);
5226 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
5230 goto err_alloc_etherdev;
5232 SET_NETDEV_DEV(netdev, &pdev->dev);
5234 pci_set_drvdata(pdev, netdev);
5235 adapter = netdev_priv(netdev);
5236 adapter->netdev = netdev;
5237 adapter->pdev = pdev;
5240 adapter->port_num = hw->bus.func;
5241 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
5243 err = pci_save_state(pdev);
5248 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
5249 pci_resource_len(pdev, 0));
5250 if (!adapter->io_addr)
5253 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
5254 hw->hw_addr = adapter->io_addr;
5256 netdev->netdev_ops = &igc_netdev_ops;
5257 igc_ethtool_set_ops(netdev);
5258 netdev->watchdog_timeo = 5 * HZ;
5260 netdev->mem_start = pci_resource_start(pdev, 0);
5261 netdev->mem_end = pci_resource_end(pdev, 0);
5263 /* PCI config space info */
5264 hw->vendor_id = pdev->vendor;
5265 hw->device_id = pdev->device;
5266 hw->revision_id = pdev->revision;
5267 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5268 hw->subsystem_device_id = pdev->subsystem_device;
5270 /* Copy the default MAC and PHY function pointers */
5271 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5272 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5274 /* Initialize skew-specific constants */
5275 err = ei->get_invariants(hw);
5279 /* Add supported features to the features list*/
5280 netdev->features |= NETIF_F_SG;
5281 netdev->features |= NETIF_F_TSO;
5282 netdev->features |= NETIF_F_TSO6;
5283 netdev->features |= NETIF_F_TSO_ECN;
5284 netdev->features |= NETIF_F_RXHASH;
5285 netdev->features |= NETIF_F_RXCSUM;
5286 netdev->features |= NETIF_F_HW_CSUM;
5287 netdev->features |= NETIF_F_SCTP_CRC;
5288 netdev->features |= NETIF_F_HW_TC;
5290 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
5291 NETIF_F_GSO_GRE_CSUM | \
5292 NETIF_F_GSO_IPXIP4 | \
5293 NETIF_F_GSO_IPXIP6 | \
5294 NETIF_F_GSO_UDP_TUNNEL | \
5295 NETIF_F_GSO_UDP_TUNNEL_CSUM)
5297 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
5298 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
5300 /* setup the private structure */
5301 err = igc_sw_init(adapter);
5305 /* copy netdev features into list of user selectable features */
5306 netdev->hw_features |= NETIF_F_NTUPLE;
5307 netdev->hw_features |= netdev->features;
5310 netdev->features |= NETIF_F_HIGHDMA;
5312 /* MTU range: 68 - 9216 */
5313 netdev->min_mtu = ETH_MIN_MTU;
5314 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
5316 /* before reading the NVM, reset the controller to put the device in a
5317 * known good starting state
5319 hw->mac.ops.reset_hw(hw);
5321 if (igc_get_flash_presence_i225(hw)) {
5322 if (hw->nvm.ops.validate(hw) < 0) {
5323 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
5329 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
5330 /* copy the MAC address out of the NVM */
5331 if (hw->mac.ops.read_mac_addr(hw))
5332 dev_err(&pdev->dev, "NVM Read Error\n");
5335 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
5337 if (!is_valid_ether_addr(netdev->dev_addr)) {
5338 dev_err(&pdev->dev, "Invalid MAC Address\n");
5343 /* configure RXPBSIZE and TXPBSIZE */
5344 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
5345 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
5347 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
5348 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
5350 INIT_WORK(&adapter->reset_task, igc_reset_task);
5351 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
5353 /* Initialize link properties that are user-changeable */
5354 adapter->fc_autoneg = true;
5355 hw->mac.autoneg = true;
5356 hw->phy.autoneg_advertised = 0xaf;
5358 hw->fc.requested_mode = igc_fc_default;
5359 hw->fc.current_mode = igc_fc_default;
5361 /* By default, support wake on port A */
5362 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
5364 /* initialize the wol settings based on the eeprom settings */
5365 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
5366 adapter->wol |= IGC_WUFC_MAG;
5368 device_set_wakeup_enable(&adapter->pdev->dev,
5369 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
5371 igc_ptp_init(adapter);
5373 /* reset the hardware with the new settings */
5376 /* let the f/w know that the h/w is now under the control of the
5379 igc_get_hw_control(adapter);
5381 strncpy(netdev->name, "eth%d", IFNAMSIZ);
5382 err = register_netdev(netdev);
5386 /* carrier off reporting is important to ethtool even BEFORE open */
5387 netif_carrier_off(netdev);
5389 /* Check if Media Autosense is enabled */
5392 /* print pcie link status and MAC address */
5393 pcie_print_link_status(pdev);
5394 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
5396 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
5397 /* Disable EEE for internal PHY devices */
5398 hw->dev_spec._base.eee_enable = false;
5399 adapter->flags &= ~IGC_FLAG_EEE;
5400 igc_set_eee_i225(hw, false, false, false);
5402 pm_runtime_put_noidle(&pdev->dev);
5407 igc_release_hw_control(adapter);
5409 if (!igc_check_reset_block(hw))
5412 igc_clear_interrupt_scheme(adapter);
5413 iounmap(adapter->io_addr);
5415 free_netdev(netdev);
5417 pci_disable_pcie_error_reporting(pdev);
5418 pci_release_mem_regions(pdev);
5421 pci_disable_device(pdev);
5426 * igc_remove - Device Removal Routine
5427 * @pdev: PCI device information struct
5429 * igc_remove is called by the PCI subsystem to alert the driver
5430 * that it should release a PCI device. This could be caused by a
5431 * Hot-Plug event, or because the driver is going to be removed from
5434 static void igc_remove(struct pci_dev *pdev)
5436 struct net_device *netdev = pci_get_drvdata(pdev);
5437 struct igc_adapter *adapter = netdev_priv(netdev);
5439 pm_runtime_get_noresume(&pdev->dev);
5441 igc_flush_nfc_rules(adapter);
5443 igc_ptp_stop(adapter);
5445 set_bit(__IGC_DOWN, &adapter->state);
5447 del_timer_sync(&adapter->watchdog_timer);
5448 del_timer_sync(&adapter->phy_info_timer);
5450 cancel_work_sync(&adapter->reset_task);
5451 cancel_work_sync(&adapter->watchdog_task);
5453 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5454 * would have already happened in close and is redundant.
5456 igc_release_hw_control(adapter);
5457 unregister_netdev(netdev);
5459 igc_clear_interrupt_scheme(adapter);
5460 pci_iounmap(pdev, adapter->io_addr);
5461 pci_release_mem_regions(pdev);
5463 free_netdev(netdev);
5465 pci_disable_pcie_error_reporting(pdev);
5467 pci_disable_device(pdev);
5470 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
5473 struct net_device *netdev = pci_get_drvdata(pdev);
5474 struct igc_adapter *adapter = netdev_priv(netdev);
5475 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
5476 struct igc_hw *hw = &adapter->hw;
5477 u32 ctrl, rctl, status;
5481 netif_device_detach(netdev);
5483 if (netif_running(netdev))
5484 __igc_close(netdev, true);
5486 igc_ptp_suspend(adapter);
5488 igc_clear_interrupt_scheme(adapter);
5491 status = rd32(IGC_STATUS);
5492 if (status & IGC_STATUS_LU)
5493 wufc &= ~IGC_WUFC_LNKC;
5496 igc_setup_rctl(adapter);
5497 igc_set_rx_mode(netdev);
5499 /* turn on all-multi mode if wake on multicast is enabled */
5500 if (wufc & IGC_WUFC_MC) {
5501 rctl = rd32(IGC_RCTL);
5502 rctl |= IGC_RCTL_MPE;
5503 wr32(IGC_RCTL, rctl);
5506 ctrl = rd32(IGC_CTRL);
5507 ctrl |= IGC_CTRL_ADVD3WUC;
5508 wr32(IGC_CTRL, ctrl);
5510 /* Allow time for pending master requests to run */
5511 igc_disable_pcie_master(hw);
5513 wr32(IGC_WUC, IGC_WUC_PME_EN);
5514 wr32(IGC_WUFC, wufc);
5520 wake = wufc || adapter->en_mng_pt;
5522 igc_power_down_phy_copper_base(&adapter->hw);
5524 igc_power_up_link(adapter);
5527 *enable_wake = wake;
5529 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5530 * would have already happened in close and is redundant.
5532 igc_release_hw_control(adapter);
5534 pci_disable_device(pdev);
5540 static int __maybe_unused igc_runtime_suspend(struct device *dev)
5542 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
5545 static void igc_deliver_wake_packet(struct net_device *netdev)
5547 struct igc_adapter *adapter = netdev_priv(netdev);
5548 struct igc_hw *hw = &adapter->hw;
5549 struct sk_buff *skb;
5552 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
5554 /* WUPM stores only the first 128 bytes of the wake packet.
5555 * Read the packet only if we have the whole thing.
5557 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
5560 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
5566 /* Ensure reads are 32-bit aligned */
5567 wupl = roundup(wupl, 4);
5569 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
5571 skb->protocol = eth_type_trans(skb, netdev);
5575 static int __maybe_unused igc_resume(struct device *dev)
5577 struct pci_dev *pdev = to_pci_dev(dev);
5578 struct net_device *netdev = pci_get_drvdata(pdev);
5579 struct igc_adapter *adapter = netdev_priv(netdev);
5580 struct igc_hw *hw = &adapter->hw;
5583 pci_set_power_state(pdev, PCI_D0);
5584 pci_restore_state(pdev);
5585 pci_save_state(pdev);
5587 if (!pci_device_is_present(pdev))
5589 err = pci_enable_device_mem(pdev);
5591 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
5594 pci_set_master(pdev);
5596 pci_enable_wake(pdev, PCI_D3hot, 0);
5597 pci_enable_wake(pdev, PCI_D3cold, 0);
5599 if (igc_init_interrupt_scheme(adapter, true)) {
5600 netdev_err(netdev, "Unable to allocate memory for queues\n");
5606 /* let the f/w know that the h/w is now under the control of the
5609 igc_get_hw_control(adapter);
5611 val = rd32(IGC_WUS);
5612 if (val & WAKE_PKT_WUS)
5613 igc_deliver_wake_packet(netdev);
5618 if (!err && netif_running(netdev))
5619 err = __igc_open(netdev, true);
5622 netif_device_attach(netdev);
5628 static int __maybe_unused igc_runtime_resume(struct device *dev)
5630 return igc_resume(dev);
5633 static int __maybe_unused igc_suspend(struct device *dev)
5635 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
5638 static int __maybe_unused igc_runtime_idle(struct device *dev)
5640 struct net_device *netdev = dev_get_drvdata(dev);
5641 struct igc_adapter *adapter = netdev_priv(netdev);
5643 if (!igc_has_link(adapter))
5644 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
5648 #endif /* CONFIG_PM */
5650 static void igc_shutdown(struct pci_dev *pdev)
5654 __igc_shutdown(pdev, &wake, 0);
5656 if (system_state == SYSTEM_POWER_OFF) {
5657 pci_wake_from_d3(pdev, wake);
5658 pci_set_power_state(pdev, PCI_D3hot);
5663 * igc_io_error_detected - called when PCI error is detected
5664 * @pdev: Pointer to PCI device
5665 * @state: The current PCI connection state
5667 * This function is called after a PCI bus error affecting
5668 * this device has been detected.
5670 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
5671 pci_channel_state_t state)
5673 struct net_device *netdev = pci_get_drvdata(pdev);
5674 struct igc_adapter *adapter = netdev_priv(netdev);
5676 netif_device_detach(netdev);
5678 if (state == pci_channel_io_perm_failure)
5679 return PCI_ERS_RESULT_DISCONNECT;
5681 if (netif_running(netdev))
5683 pci_disable_device(pdev);
5685 /* Request a slot reset. */
5686 return PCI_ERS_RESULT_NEED_RESET;
5690 * igc_io_slot_reset - called after the PCI bus has been reset.
5691 * @pdev: Pointer to PCI device
5693 * Restart the card from scratch, as if from a cold-boot. Implementation
5694 * resembles the first-half of the igc_resume routine.
5696 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
5698 struct net_device *netdev = pci_get_drvdata(pdev);
5699 struct igc_adapter *adapter = netdev_priv(netdev);
5700 struct igc_hw *hw = &adapter->hw;
5701 pci_ers_result_t result;
5703 if (pci_enable_device_mem(pdev)) {
5704 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
5705 result = PCI_ERS_RESULT_DISCONNECT;
5707 pci_set_master(pdev);
5708 pci_restore_state(pdev);
5709 pci_save_state(pdev);
5711 pci_enable_wake(pdev, PCI_D3hot, 0);
5712 pci_enable_wake(pdev, PCI_D3cold, 0);
5714 /* In case of PCI error, adapter loses its HW address
5715 * so we should re-assign it here.
5717 hw->hw_addr = adapter->io_addr;
5721 result = PCI_ERS_RESULT_RECOVERED;
5728 * igc_io_resume - called when traffic can start to flow again.
5729 * @pdev: Pointer to PCI device
5731 * This callback is called when the error recovery driver tells us that
5732 * its OK to resume normal operation. Implementation resembles the
5733 * second-half of the igc_resume routine.
5735 static void igc_io_resume(struct pci_dev *pdev)
5737 struct net_device *netdev = pci_get_drvdata(pdev);
5738 struct igc_adapter *adapter = netdev_priv(netdev);
5741 if (netif_running(netdev)) {
5742 if (igc_open(netdev)) {
5743 netdev_err(netdev, "igc_open failed after reset\n");
5748 netif_device_attach(netdev);
5750 /* let the f/w know that the h/w is now under the control of the
5753 igc_get_hw_control(adapter);
5757 static const struct pci_error_handlers igc_err_handler = {
5758 .error_detected = igc_io_error_detected,
5759 .slot_reset = igc_io_slot_reset,
5760 .resume = igc_io_resume,
5764 static const struct dev_pm_ops igc_pm_ops = {
5765 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
5766 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
5771 static struct pci_driver igc_driver = {
5772 .name = igc_driver_name,
5773 .id_table = igc_pci_tbl,
5775 .remove = igc_remove,
5777 .driver.pm = &igc_pm_ops,
5779 .shutdown = igc_shutdown,
5780 .err_handler = &igc_err_handler,
5784 * igc_reinit_queues - return error
5785 * @adapter: pointer to adapter structure
5787 int igc_reinit_queues(struct igc_adapter *adapter)
5789 struct net_device *netdev = adapter->netdev;
5792 if (netif_running(netdev))
5795 igc_reset_interrupt_capability(adapter);
5797 if (igc_init_interrupt_scheme(adapter, true)) {
5798 netdev_err(netdev, "Unable to allocate memory for queues\n");
5802 if (netif_running(netdev))
5803 err = igc_open(netdev);
5809 * igc_get_hw_dev - return device
5810 * @hw: pointer to hardware structure
5812 * used by hardware layer to print debugging information
5814 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
5816 struct igc_adapter *adapter = hw->back;
5818 return adapter->netdev;
5822 * igc_init_module - Driver Registration Routine
5824 * igc_init_module is the first routine called when the driver is
5825 * loaded. All it does is register with the PCI subsystem.
5827 static int __init igc_init_module(void)
5831 pr_info("%s\n", igc_driver_string);
5832 pr_info("%s\n", igc_copyright);
5834 ret = pci_register_driver(&igc_driver);
5838 module_init(igc_init_module);
5841 * igc_exit_module - Driver Exit Cleanup Routine
5843 * igc_exit_module is called just before the driver is removed
5846 static void __exit igc_exit_module(void)
5848 pci_unregister_driver(&igc_driver);
5851 module_exit(igc_exit_module);