1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *******************************************************************************/
27 /******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29 ******************************************************************************/
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/netdevice.h>
38 #include <linux/vmalloc.h>
39 #include <linux/string.h>
42 #include <linux/tcp.h>
43 #include <linux/sctp.h>
44 #include <linux/ipv6.h>
45 #include <linux/slab.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/ethtool.h>
50 #include <linux/if_vlan.h>
51 #include <linux/prefetch.h>
55 const char ixgbevf_driver_name[] = "ixgbevf";
56 static const char ixgbevf_driver_string[] =
57 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
59 #define DRV_VERSION "2.12.1-k"
60 const char ixgbevf_driver_version[] = DRV_VERSION;
61 static char ixgbevf_copyright[] =
62 "Copyright (c) 2009 - 2012 Intel Corporation.";
64 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
65 [board_82599_vf] = &ixgbevf_82599_vf_info,
66 [board_X540_vf] = &ixgbevf_X540_vf_info,
67 [board_X550_vf] = &ixgbevf_X550_vf_info,
68 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static const struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
84 /* required last entry */
87 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
89 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
90 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION);
94 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95 static int debug = -1;
96 module_param(debug, int, 0);
97 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
99 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
101 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
102 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
103 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
104 schedule_work(&adapter->service_task);
107 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
109 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
111 /* flush memory to make sure state is correct before next watchdog */
112 smp_mb__before_atomic();
113 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
117 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
118 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
119 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
121 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
123 struct ixgbevf_adapter *adapter = hw->back;
128 dev_err(&adapter->pdev->dev, "Adapter removed\n");
129 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
130 ixgbevf_service_event_schedule(adapter);
133 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
137 /* The following check not only optimizes a bit by not
138 * performing a read on the status register when the
139 * register just read was a status register read that
140 * returned IXGBE_FAILED_READ_REG. It also blocks any
141 * potential recursion.
143 if (reg == IXGBE_VFSTATUS) {
144 ixgbevf_remove_adapter(hw);
147 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
148 if (value == IXGBE_FAILED_READ_REG)
149 ixgbevf_remove_adapter(hw);
152 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
154 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
157 if (IXGBE_REMOVED(reg_addr))
158 return IXGBE_FAILED_READ_REG;
159 value = readl(reg_addr + reg);
160 if (unlikely(value == IXGBE_FAILED_READ_REG))
161 ixgbevf_check_remove(hw, reg);
166 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
167 * @adapter: pointer to adapter struct
168 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
169 * @queue: queue to map the corresponding interrupt to
170 * @msix_vector: the vector to map to the corresponding queue
172 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
173 u8 queue, u8 msix_vector)
176 struct ixgbe_hw *hw = &adapter->hw;
178 if (direction == -1) {
180 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
181 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
184 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
186 /* Tx or Rx causes */
187 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
188 index = ((16 * (queue & 1)) + (8 * direction));
189 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
190 ivar &= ~(0xFF << index);
191 ivar |= (msix_vector << index);
192 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
196 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
197 struct ixgbevf_tx_buffer *tx_buffer)
199 if (tx_buffer->skb) {
200 dev_kfree_skb_any(tx_buffer->skb);
201 if (dma_unmap_len(tx_buffer, len))
202 dma_unmap_single(tx_ring->dev,
203 dma_unmap_addr(tx_buffer, dma),
204 dma_unmap_len(tx_buffer, len),
206 } else if (dma_unmap_len(tx_buffer, len)) {
207 dma_unmap_page(tx_ring->dev,
208 dma_unmap_addr(tx_buffer, dma),
209 dma_unmap_len(tx_buffer, len),
212 tx_buffer->next_to_watch = NULL;
213 tx_buffer->skb = NULL;
214 dma_unmap_len_set(tx_buffer, len, 0);
215 /* tx_buffer must be completely set up in the transmit path */
218 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
220 return ring->stats.packets;
223 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
225 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
226 struct ixgbe_hw *hw = &adapter->hw;
228 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
229 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
232 return (head < tail) ?
233 tail - head : (tail + ring->count - head);
238 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
240 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
241 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
242 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
244 clear_check_for_tx_hang(tx_ring);
246 /* Check for a hung queue, but be thorough. This verifies
247 * that a transmit has been completed since the previous
248 * check AND there is at least one packet pending. The
249 * ARMED bit is set to indicate a potential hang.
251 if ((tx_done_old == tx_done) && tx_pending) {
252 /* make sure it is true for two checks in a row */
253 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
256 /* reset the countdown */
257 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
259 /* update completed stats and continue */
260 tx_ring->tx_stats.tx_done_old = tx_done;
265 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
267 /* Do the reset outside of interrupt context */
268 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
269 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
270 ixgbevf_service_event_schedule(adapter);
275 * ixgbevf_tx_timeout - Respond to a Tx Hang
276 * @netdev: network interface device structure
278 static void ixgbevf_tx_timeout(struct net_device *netdev)
280 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
282 ixgbevf_tx_timeout_reset(adapter);
286 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
287 * @q_vector: board private structure
288 * @tx_ring: tx ring to clean
290 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
291 struct ixgbevf_ring *tx_ring)
293 struct ixgbevf_adapter *adapter = q_vector->adapter;
294 struct ixgbevf_tx_buffer *tx_buffer;
295 union ixgbe_adv_tx_desc *tx_desc;
296 unsigned int total_bytes = 0, total_packets = 0;
297 unsigned int budget = tx_ring->count / 2;
298 unsigned int i = tx_ring->next_to_clean;
300 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
303 tx_buffer = &tx_ring->tx_buffer_info[i];
304 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
308 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
310 /* if next_to_watch is not set then there is no work pending */
314 /* prevent any other reads prior to eop_desc */
317 /* if DD is not set pending work has not been completed */
318 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
321 /* clear next_to_watch to prevent false hangs */
322 tx_buffer->next_to_watch = NULL;
324 /* update the statistics for this packet */
325 total_bytes += tx_buffer->bytecount;
326 total_packets += tx_buffer->gso_segs;
329 dev_kfree_skb_any(tx_buffer->skb);
331 /* unmap skb header data */
332 dma_unmap_single(tx_ring->dev,
333 dma_unmap_addr(tx_buffer, dma),
334 dma_unmap_len(tx_buffer, len),
337 /* clear tx_buffer data */
338 tx_buffer->skb = NULL;
339 dma_unmap_len_set(tx_buffer, len, 0);
341 /* unmap remaining buffers */
342 while (tx_desc != eop_desc) {
348 tx_buffer = tx_ring->tx_buffer_info;
349 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
352 /* unmap any remaining paged data */
353 if (dma_unmap_len(tx_buffer, len)) {
354 dma_unmap_page(tx_ring->dev,
355 dma_unmap_addr(tx_buffer, dma),
356 dma_unmap_len(tx_buffer, len),
358 dma_unmap_len_set(tx_buffer, len, 0);
362 /* move us one more past the eop_desc for start of next pkt */
368 tx_buffer = tx_ring->tx_buffer_info;
369 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
372 /* issue prefetch for next Tx descriptor */
375 /* update budget accounting */
377 } while (likely(budget));
380 tx_ring->next_to_clean = i;
381 u64_stats_update_begin(&tx_ring->syncp);
382 tx_ring->stats.bytes += total_bytes;
383 tx_ring->stats.packets += total_packets;
384 u64_stats_update_end(&tx_ring->syncp);
385 q_vector->tx.total_bytes += total_bytes;
386 q_vector->tx.total_packets += total_packets;
388 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
389 struct ixgbe_hw *hw = &adapter->hw;
390 union ixgbe_adv_tx_desc *eop_desc;
392 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
394 pr_err("Detected Tx Unit Hang\n"
396 " TDH, TDT <%x>, <%x>\n"
397 " next_to_use <%x>\n"
398 " next_to_clean <%x>\n"
399 "tx_buffer_info[next_to_clean]\n"
400 " next_to_watch <%p>\n"
401 " eop_desc->wb.status <%x>\n"
402 " time_stamp <%lx>\n"
404 tx_ring->queue_index,
405 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
406 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
407 tx_ring->next_to_use, i,
408 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
409 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
411 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
413 /* schedule immediate reset if we believe we hung */
414 ixgbevf_tx_timeout_reset(adapter);
419 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
420 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
421 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
422 /* Make sure that anybody stopping the queue after this
423 * sees the new next_to_clean.
427 if (__netif_subqueue_stopped(tx_ring->netdev,
428 tx_ring->queue_index) &&
429 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
430 netif_wake_subqueue(tx_ring->netdev,
431 tx_ring->queue_index);
432 ++tx_ring->tx_stats.restart_queue;
440 * ixgbevf_rx_skb - Helper function to determine proper Rx method
441 * @q_vector: structure containing interrupt and ring information
442 * @skb: packet to send up
444 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
447 #ifdef CONFIG_NET_RX_BUSY_POLL
448 skb_mark_napi_id(skb, &q_vector->napi);
450 if (ixgbevf_qv_busy_polling(q_vector)) {
451 netif_receive_skb(skb);
452 /* exit early if we busy polled */
455 #endif /* CONFIG_NET_RX_BUSY_POLL */
457 napi_gro_receive(&q_vector->napi, skb);
460 #define IXGBE_RSS_L4_TYPES_MASK \
461 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
462 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
463 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
464 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
466 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
467 union ixgbe_adv_rx_desc *rx_desc,
472 if (!(ring->netdev->features & NETIF_F_RXHASH))
475 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
476 IXGBE_RXDADV_RSSTYPE_MASK;
481 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
482 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
483 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
487 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
488 * @ring: structure containig ring specific data
489 * @rx_desc: current Rx descriptor being processed
490 * @skb: skb currently being received and modified
492 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
493 union ixgbe_adv_rx_desc *rx_desc,
496 skb_checksum_none_assert(skb);
498 /* Rx csum disabled */
499 if (!(ring->netdev->features & NETIF_F_RXCSUM))
502 /* if IP and error */
503 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
504 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
505 ring->rx_stats.csum_err++;
509 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
512 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
513 ring->rx_stats.csum_err++;
517 /* It must be a TCP or UDP packet with a valid checksum */
518 skb->ip_summed = CHECKSUM_UNNECESSARY;
522 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
523 * @rx_ring: rx descriptor ring packet is being transacted on
524 * @rx_desc: pointer to the EOP Rx descriptor
525 * @skb: pointer to current skb being populated
527 * This function checks the ring, descriptor, and packet information in
528 * order to populate the checksum, VLAN, protocol, and other fields within
531 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
532 union ixgbe_adv_rx_desc *rx_desc,
535 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
536 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
538 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
539 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
540 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
542 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
543 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
546 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
550 * ixgbevf_is_non_eop - process handling of non-EOP buffers
551 * @rx_ring: Rx ring being processed
552 * @rx_desc: Rx descriptor for current buffer
553 * @skb: current socket buffer containing buffer in progress
555 * This function updates next to clean. If the buffer is an EOP buffer
556 * this function exits returning false, otherwise it will place the
557 * sk_buff in the next buffer to be chained and return true indicating
558 * that this is in fact a non-EOP buffer.
560 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
561 union ixgbe_adv_rx_desc *rx_desc)
563 u32 ntc = rx_ring->next_to_clean + 1;
565 /* fetch, update, and store next to clean */
566 ntc = (ntc < rx_ring->count) ? ntc : 0;
567 rx_ring->next_to_clean = ntc;
569 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
571 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
577 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
578 struct ixgbevf_rx_buffer *bi)
580 struct page *page = bi->page;
581 dma_addr_t dma = bi->dma;
583 /* since we are recycling buffers we should seldom need to alloc */
587 /* alloc new page for storage */
588 page = dev_alloc_page();
589 if (unlikely(!page)) {
590 rx_ring->rx_stats.alloc_rx_page_failed++;
594 /* map page for use */
595 dma = dma_map_page(rx_ring->dev, page, 0,
596 PAGE_SIZE, DMA_FROM_DEVICE);
598 /* if mapping failed free memory back to system since
599 * there isn't much point in holding memory we can't use
601 if (dma_mapping_error(rx_ring->dev, dma)) {
604 rx_ring->rx_stats.alloc_rx_buff_failed++;
616 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
617 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
618 * @cleaned_count: number of buffers to replace
620 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
623 union ixgbe_adv_rx_desc *rx_desc;
624 struct ixgbevf_rx_buffer *bi;
625 unsigned int i = rx_ring->next_to_use;
627 /* nothing to do or no valid netdev defined */
628 if (!cleaned_count || !rx_ring->netdev)
631 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
632 bi = &rx_ring->rx_buffer_info[i];
636 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
639 /* Refresh the desc even if pkt_addr didn't change
640 * because each write-back erases this info.
642 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
648 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
649 bi = rx_ring->rx_buffer_info;
653 /* clear the hdr_addr for the next_to_use descriptor */
654 rx_desc->read.hdr_addr = 0;
657 } while (cleaned_count);
661 if (rx_ring->next_to_use != i) {
662 /* record the next descriptor to use */
663 rx_ring->next_to_use = i;
665 /* update next to alloc since we have filled the ring */
666 rx_ring->next_to_alloc = i;
668 /* Force memory writes to complete before letting h/w
669 * know there are new descriptors to fetch. (Only
670 * applicable for weak-ordered memory model archs,
674 ixgbevf_write_tail(rx_ring, i);
679 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
680 * @rx_ring: rx descriptor ring packet is being transacted on
681 * @rx_desc: pointer to the EOP Rx descriptor
682 * @skb: pointer to current skb being fixed
684 * Check for corrupted packet headers caused by senders on the local L2
685 * embedded NIC switch not setting up their Tx Descriptors right. These
686 * should be very rare.
688 * Also address the case where we are pulling data in on pages only
689 * and as such no data is present in the skb header.
691 * In addition if skb is not at least 60 bytes we need to pad it so that
692 * it is large enough to qualify as a valid Ethernet frame.
694 * Returns true if an error was encountered and skb was freed.
696 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
697 union ixgbe_adv_rx_desc *rx_desc,
700 /* verify that the packet does not have any known errors */
701 if (unlikely(ixgbevf_test_staterr(rx_desc,
702 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
703 struct net_device *netdev = rx_ring->netdev;
705 if (!(netdev->features & NETIF_F_RXALL)) {
706 dev_kfree_skb_any(skb);
711 /* if eth_skb_pad returns an error the skb was freed */
712 if (eth_skb_pad(skb))
719 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
720 * @rx_ring: rx descriptor ring to store buffers on
721 * @old_buff: donor buffer to have page reused
723 * Synchronizes page for reuse by the adapter
725 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
726 struct ixgbevf_rx_buffer *old_buff)
728 struct ixgbevf_rx_buffer *new_buff;
729 u16 nta = rx_ring->next_to_alloc;
731 new_buff = &rx_ring->rx_buffer_info[nta];
733 /* update, and store next to alloc */
735 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
737 /* transfer page from old buffer to new buffer */
738 new_buff->page = old_buff->page;
739 new_buff->dma = old_buff->dma;
740 new_buff->page_offset = old_buff->page_offset;
742 /* sync the buffer for use by the device */
743 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
744 new_buff->page_offset,
749 static inline bool ixgbevf_page_is_reserved(struct page *page)
751 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
755 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
756 * @rx_ring: rx descriptor ring to transact packets on
757 * @rx_buffer: buffer containing page to add
758 * @rx_desc: descriptor containing length of buffer written by hardware
759 * @skb: sk_buff to place the data into
761 * This function will add the data contained in rx_buffer->page to the skb.
762 * This is done either through a direct copy if the data in the buffer is
763 * less than the skb header size, otherwise it will just attach the page as
766 * The function will then update the page offset if necessary and return
767 * true if the buffer can be reused by the adapter.
769 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
770 struct ixgbevf_rx_buffer *rx_buffer,
771 union ixgbe_adv_rx_desc *rx_desc,
774 struct page *page = rx_buffer->page;
775 unsigned char *va = page_address(page) + rx_buffer->page_offset;
776 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
777 #if (PAGE_SIZE < 8192)
778 unsigned int truesize = IXGBEVF_RX_BUFSZ;
780 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
782 unsigned int pull_len;
784 if (unlikely(skb_is_nonlinear(skb)))
787 if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
788 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
790 /* page is not reserved, we can reuse buffer as is */
791 if (likely(!ixgbevf_page_is_reserved(page)))
794 /* this page cannot be reused so discard it */
799 /* we need the header to contain the greater of either ETH_HLEN or
800 * 60 bytes if the skb->len is less than 60 for skb_pad.
802 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
804 /* align pull length to size of long to optimize memcpy performance */
805 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
807 /* update all of the pointers */
812 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
813 (unsigned long)va & ~PAGE_MASK, size, truesize);
815 /* avoid re-using remote pages */
816 if (unlikely(ixgbevf_page_is_reserved(page)))
819 #if (PAGE_SIZE < 8192)
820 /* if we are only owner of page we can reuse it */
821 if (unlikely(page_count(page) != 1))
824 /* flip page offset to other buffer */
825 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
828 /* move offset up to the next cache line */
829 rx_buffer->page_offset += truesize;
831 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
835 /* Even if we own the page, we are not allowed to use atomic_set()
836 * This would break get_page_unless_zero() users.
838 atomic_inc(&page->_count);
843 static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
844 union ixgbe_adv_rx_desc *rx_desc,
847 struct ixgbevf_rx_buffer *rx_buffer;
850 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
851 page = rx_buffer->page;
855 void *page_addr = page_address(page) +
856 rx_buffer->page_offset;
858 /* prefetch first cache line of first page */
860 #if L1_CACHE_BYTES < 128
861 prefetch(page_addr + L1_CACHE_BYTES);
864 /* allocate a skb to store the frags */
865 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
866 IXGBEVF_RX_HDR_SIZE);
867 if (unlikely(!skb)) {
868 rx_ring->rx_stats.alloc_rx_buff_failed++;
872 /* we will be copying header into skb->data in
873 * pskb_may_pull so it is in our interest to prefetch
874 * it now to avoid a possible cache miss
876 prefetchw(skb->data);
879 /* we are reusing so sync this buffer for CPU use */
880 dma_sync_single_range_for_cpu(rx_ring->dev,
882 rx_buffer->page_offset,
886 /* pull page into skb */
887 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
888 /* hand second half of page back to the ring */
889 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
891 /* we are not reusing the buffer so unmap it */
892 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
893 PAGE_SIZE, DMA_FROM_DEVICE);
896 /* clear contents of buffer_info */
898 rx_buffer->page = NULL;
903 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
906 struct ixgbe_hw *hw = &adapter->hw;
908 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
911 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
912 struct ixgbevf_ring *rx_ring,
915 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
916 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
917 struct sk_buff *skb = rx_ring->skb;
919 while (likely(total_rx_packets < budget)) {
920 union ixgbe_adv_rx_desc *rx_desc;
922 /* return some buffers to hardware, one at a time is too slow */
923 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
924 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
928 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
930 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
933 /* This memory barrier is needed to keep us from reading
934 * any other fields out of the rx_desc until we know the
935 * RXD_STAT_DD bit is set
939 /* retrieve a buffer from the ring */
940 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
942 /* exit if we failed to retrieve a buffer */
948 /* fetch next buffer in frame if non-eop */
949 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
952 /* verify the packet layout is correct */
953 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
958 /* probably a little skewed due to removing CRC */
959 total_rx_bytes += skb->len;
961 /* Workaround hardware that can't do proper VEPA multicast
964 if ((skb->pkt_type == PACKET_BROADCAST ||
965 skb->pkt_type == PACKET_MULTICAST) &&
966 ether_addr_equal(rx_ring->netdev->dev_addr,
967 eth_hdr(skb)->h_source)) {
968 dev_kfree_skb_irq(skb);
972 /* populate checksum, VLAN, and protocol */
973 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
975 ixgbevf_rx_skb(q_vector, skb);
977 /* reset skb pointer */
980 /* update budget accounting */
984 /* place incomplete frames back on ring for completion */
987 u64_stats_update_begin(&rx_ring->syncp);
988 rx_ring->stats.packets += total_rx_packets;
989 rx_ring->stats.bytes += total_rx_bytes;
990 u64_stats_update_end(&rx_ring->syncp);
991 q_vector->rx.total_packets += total_rx_packets;
992 q_vector->rx.total_bytes += total_rx_bytes;
994 return total_rx_packets;
998 * ixgbevf_poll - NAPI polling calback
999 * @napi: napi struct with our devices info in it
1000 * @budget: amount of work driver is allowed to do this pass, in packets
1002 * This function will clean more than one or more rings associated with a
1005 static int ixgbevf_poll(struct napi_struct *napi, int budget)
1007 struct ixgbevf_q_vector *q_vector =
1008 container_of(napi, struct ixgbevf_q_vector, napi);
1009 struct ixgbevf_adapter *adapter = q_vector->adapter;
1010 struct ixgbevf_ring *ring;
1011 int per_ring_budget, work_done = 0;
1012 bool clean_complete = true;
1014 ixgbevf_for_each_ring(ring, q_vector->tx)
1015 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
1019 #ifdef CONFIG_NET_RX_BUSY_POLL
1020 if (!ixgbevf_qv_lock_napi(q_vector))
1024 /* attempt to distribute budget to each queue fairly, but don't allow
1025 * the budget to go below 1 because we'll exit polling
1027 if (q_vector->rx.count > 1)
1028 per_ring_budget = max(budget/q_vector->rx.count, 1);
1030 per_ring_budget = budget;
1032 ixgbevf_for_each_ring(ring, q_vector->rx) {
1033 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1035 work_done += cleaned;
1036 clean_complete &= (cleaned < per_ring_budget);
1039 #ifdef CONFIG_NET_RX_BUSY_POLL
1040 ixgbevf_qv_unlock_napi(q_vector);
1043 /* If all work not completed, return budget and keep polling */
1044 if (!clean_complete)
1046 /* all work done, exit the polling mode */
1047 napi_complete_done(napi, work_done);
1048 if (adapter->rx_itr_setting & 1)
1049 ixgbevf_set_itr(q_vector);
1050 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1051 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1052 ixgbevf_irq_enable_queues(adapter,
1053 1 << q_vector->v_idx);
1059 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1060 * @q_vector: structure containing interrupt and ring information
1062 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1064 struct ixgbevf_adapter *adapter = q_vector->adapter;
1065 struct ixgbe_hw *hw = &adapter->hw;
1066 int v_idx = q_vector->v_idx;
1067 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1069 /* set the WDIS bit to not clear the timer bits and cause an
1070 * immediate assertion of the interrupt
1072 itr_reg |= IXGBE_EITR_CNT_WDIS;
1074 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1077 #ifdef CONFIG_NET_RX_BUSY_POLL
1078 /* must be called with local_bh_disable()d */
1079 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
1081 struct ixgbevf_q_vector *q_vector =
1082 container_of(napi, struct ixgbevf_q_vector, napi);
1083 struct ixgbevf_adapter *adapter = q_vector->adapter;
1084 struct ixgbevf_ring *ring;
1087 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1088 return LL_FLUSH_FAILED;
1090 if (!ixgbevf_qv_lock_poll(q_vector))
1091 return LL_FLUSH_BUSY;
1093 ixgbevf_for_each_ring(ring, q_vector->rx) {
1094 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
1095 #ifdef BP_EXTENDED_STATS
1097 ring->stats.cleaned += found;
1099 ring->stats.misses++;
1105 ixgbevf_qv_unlock_poll(q_vector);
1109 #endif /* CONFIG_NET_RX_BUSY_POLL */
1112 * ixgbevf_configure_msix - Configure MSI-X hardware
1113 * @adapter: board private structure
1115 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1118 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1120 struct ixgbevf_q_vector *q_vector;
1121 int q_vectors, v_idx;
1123 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1124 adapter->eims_enable_mask = 0;
1126 /* Populate the IVAR table and set the ITR values to the
1127 * corresponding register.
1129 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1130 struct ixgbevf_ring *ring;
1132 q_vector = adapter->q_vector[v_idx];
1134 ixgbevf_for_each_ring(ring, q_vector->rx)
1135 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1137 ixgbevf_for_each_ring(ring, q_vector->tx)
1138 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1140 if (q_vector->tx.ring && !q_vector->rx.ring) {
1141 /* Tx only vector */
1142 if (adapter->tx_itr_setting == 1)
1143 q_vector->itr = IXGBE_10K_ITR;
1145 q_vector->itr = adapter->tx_itr_setting;
1147 /* Rx or Rx/Tx vector */
1148 if (adapter->rx_itr_setting == 1)
1149 q_vector->itr = IXGBE_20K_ITR;
1151 q_vector->itr = adapter->rx_itr_setting;
1154 /* add q_vector eims value to global eims_enable_mask */
1155 adapter->eims_enable_mask |= 1 << v_idx;
1157 ixgbevf_write_eitr(q_vector);
1160 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1161 /* setup eims_other and add value to global eims_enable_mask */
1162 adapter->eims_other = 1 << v_idx;
1163 adapter->eims_enable_mask |= adapter->eims_other;
1166 enum latency_range {
1170 latency_invalid = 255
1174 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1175 * @q_vector: structure containing interrupt and ring information
1176 * @ring_container: structure containing ring performance data
1178 * Stores a new ITR value based on packets and byte
1179 * counts during the last interrupt. The advantage of per interrupt
1180 * computation is faster updates and more accurate ITR for the current
1181 * traffic pattern. Constants in this function were computed
1182 * based on theoretical maximum wire speed and thresholds were set based
1183 * on testing data as well as attempting to minimize response time
1184 * while increasing bulk throughput.
1186 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1187 struct ixgbevf_ring_container *ring_container)
1189 int bytes = ring_container->total_bytes;
1190 int packets = ring_container->total_packets;
1193 u8 itr_setting = ring_container->itr;
1198 /* simple throttle rate management
1199 * 0-20MB/s lowest (100000 ints/s)
1200 * 20-100MB/s low (20000 ints/s)
1201 * 100-1249MB/s bulk (8000 ints/s)
1203 /* what was last interrupt timeslice? */
1204 timepassed_us = q_vector->itr >> 2;
1205 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1207 switch (itr_setting) {
1208 case lowest_latency:
1209 if (bytes_perint > 10)
1210 itr_setting = low_latency;
1213 if (bytes_perint > 20)
1214 itr_setting = bulk_latency;
1215 else if (bytes_perint <= 10)
1216 itr_setting = lowest_latency;
1219 if (bytes_perint <= 20)
1220 itr_setting = low_latency;
1224 /* clear work counters since we have the values we need */
1225 ring_container->total_bytes = 0;
1226 ring_container->total_packets = 0;
1228 /* write updated itr to ring container */
1229 ring_container->itr = itr_setting;
1232 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1234 u32 new_itr = q_vector->itr;
1237 ixgbevf_update_itr(q_vector, &q_vector->tx);
1238 ixgbevf_update_itr(q_vector, &q_vector->rx);
1240 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1242 switch (current_itr) {
1243 /* counts and packets in update_itr are dependent on these numbers */
1244 case lowest_latency:
1245 new_itr = IXGBE_100K_ITR;
1248 new_itr = IXGBE_20K_ITR;
1252 new_itr = IXGBE_8K_ITR;
1256 if (new_itr != q_vector->itr) {
1257 /* do an exponential smoothing */
1258 new_itr = (10 * new_itr * q_vector->itr) /
1259 ((9 * new_itr) + q_vector->itr);
1261 /* save the algorithm value here */
1262 q_vector->itr = new_itr;
1264 ixgbevf_write_eitr(q_vector);
1268 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1270 struct ixgbevf_adapter *adapter = data;
1271 struct ixgbe_hw *hw = &adapter->hw;
1273 hw->mac.get_link_status = 1;
1275 ixgbevf_service_event_schedule(adapter);
1277 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1283 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1285 * @data: pointer to our q_vector struct for this interrupt vector
1287 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1289 struct ixgbevf_q_vector *q_vector = data;
1291 /* EIAM disabled interrupts (on this vector) for us */
1292 if (q_vector->rx.ring || q_vector->tx.ring)
1293 napi_schedule(&q_vector->napi);
1298 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1301 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1303 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1304 q_vector->rx.ring = a->rx_ring[r_idx];
1305 q_vector->rx.count++;
1308 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1311 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1313 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1314 q_vector->tx.ring = a->tx_ring[t_idx];
1315 q_vector->tx.count++;
1319 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1320 * @adapter: board private structure to initialize
1322 * This function maps descriptor rings to the queue-specific vectors
1323 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1324 * one vector per ring/queue, but on a constrained vector budget, we
1325 * group the rings as "efficiently" as possible. You would add new
1326 * mapping configurations in here.
1328 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1332 int rxr_idx = 0, txr_idx = 0;
1333 int rxr_remaining = adapter->num_rx_queues;
1334 int txr_remaining = adapter->num_tx_queues;
1339 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1341 /* The ideal configuration...
1342 * We have enough vectors to map one per queue.
1344 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1345 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1346 map_vector_to_rxq(adapter, v_start, rxr_idx);
1348 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1349 map_vector_to_txq(adapter, v_start, txr_idx);
1353 /* If we don't have enough vectors for a 1-to-1
1354 * mapping, we'll have to group them so there are
1355 * multiple queues per vector.
1357 /* Re-adjusting *qpv takes care of the remainder. */
1358 for (i = v_start; i < q_vectors; i++) {
1359 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1360 for (j = 0; j < rqpv; j++) {
1361 map_vector_to_rxq(adapter, i, rxr_idx);
1366 for (i = v_start; i < q_vectors; i++) {
1367 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1368 for (j = 0; j < tqpv; j++) {
1369 map_vector_to_txq(adapter, i, txr_idx);
1380 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1381 * @adapter: board private structure
1383 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1384 * interrupts from the kernel.
1386 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1388 struct net_device *netdev = adapter->netdev;
1389 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1393 for (vector = 0; vector < q_vectors; vector++) {
1394 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1395 struct msix_entry *entry = &adapter->msix_entries[vector];
1397 if (q_vector->tx.ring && q_vector->rx.ring) {
1398 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1399 "%s-%s-%d", netdev->name, "TxRx", ri++);
1401 } else if (q_vector->rx.ring) {
1402 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1403 "%s-%s-%d", netdev->name, "rx", ri++);
1404 } else if (q_vector->tx.ring) {
1405 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1406 "%s-%s-%d", netdev->name, "tx", ti++);
1408 /* skip this unused q_vector */
1411 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1412 q_vector->name, q_vector);
1414 hw_dbg(&adapter->hw,
1415 "request_irq failed for MSIX interrupt Error: %d\n",
1417 goto free_queue_irqs;
1421 err = request_irq(adapter->msix_entries[vector].vector,
1422 &ixgbevf_msix_other, 0, netdev->name, adapter);
1424 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1426 goto free_queue_irqs;
1434 free_irq(adapter->msix_entries[vector].vector,
1435 adapter->q_vector[vector]);
1437 /* This failure is non-recoverable - it indicates the system is
1438 * out of MSIX vector resources and the VF driver cannot run
1439 * without them. Set the number of msix vectors to zero
1440 * indicating that not enough can be allocated. The error
1441 * will be returned to the user indicating device open failed.
1442 * Any further attempts to force the driver to open will also
1443 * fail. The only way to recover is to unload the driver and
1444 * reload it again. If the system has recovered some MSIX
1445 * vectors then it may succeed.
1447 adapter->num_msix_vectors = 0;
1451 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1453 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1455 for (i = 0; i < q_vectors; i++) {
1456 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1458 q_vector->rx.ring = NULL;
1459 q_vector->tx.ring = NULL;
1460 q_vector->rx.count = 0;
1461 q_vector->tx.count = 0;
1466 * ixgbevf_request_irq - initialize interrupts
1467 * @adapter: board private structure
1469 * Attempts to configure interrupts using the best available
1470 * capabilities of the hardware and kernel.
1472 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1476 err = ixgbevf_request_msix_irqs(adapter);
1479 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1484 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1488 q_vectors = adapter->num_msix_vectors;
1491 free_irq(adapter->msix_entries[i].vector, adapter);
1494 for (; i >= 0; i--) {
1495 /* free only the irqs that were actually requested */
1496 if (!adapter->q_vector[i]->rx.ring &&
1497 !adapter->q_vector[i]->tx.ring)
1500 free_irq(adapter->msix_entries[i].vector,
1501 adapter->q_vector[i]);
1504 ixgbevf_reset_q_vectors(adapter);
1508 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1509 * @adapter: board private structure
1511 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1513 struct ixgbe_hw *hw = &adapter->hw;
1516 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1517 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1518 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1520 IXGBE_WRITE_FLUSH(hw);
1522 for (i = 0; i < adapter->num_msix_vectors; i++)
1523 synchronize_irq(adapter->msix_entries[i].vector);
1527 * ixgbevf_irq_enable - Enable default interrupt generation settings
1528 * @adapter: board private structure
1530 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1532 struct ixgbe_hw *hw = &adapter->hw;
1534 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1535 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1536 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1540 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1541 * @adapter: board private structure
1542 * @ring: structure containing ring specific data
1544 * Configure the Tx descriptor ring after a reset.
1546 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1547 struct ixgbevf_ring *ring)
1549 struct ixgbe_hw *hw = &adapter->hw;
1550 u64 tdba = ring->dma;
1552 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1553 u8 reg_idx = ring->reg_idx;
1555 /* disable queue to avoid issues while updating state */
1556 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1557 IXGBE_WRITE_FLUSH(hw);
1559 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1560 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1561 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1562 ring->count * sizeof(union ixgbe_adv_tx_desc));
1564 /* disable head writeback */
1565 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1566 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1568 /* enable relaxed ordering */
1569 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1570 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1571 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1573 /* reset head and tail pointers */
1574 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1575 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1576 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1578 /* reset ntu and ntc to place SW in sync with hardwdare */
1579 ring->next_to_clean = 0;
1580 ring->next_to_use = 0;
1582 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1583 * to or less than the number of on chip descriptors, which is
1586 txdctl |= (8 << 16); /* WTHRESH = 8 */
1588 /* Setting PTHRESH to 32 both improves performance */
1589 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1590 32; /* PTHRESH = 32 */
1592 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1594 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1596 /* poll to verify queue is enabled */
1598 usleep_range(1000, 2000);
1599 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1600 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1602 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1606 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1607 * @adapter: board private structure
1609 * Configure the Tx unit of the MAC after a reset.
1611 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1615 /* Setup the HW Tx Head and Tail descriptor pointers */
1616 for (i = 0; i < adapter->num_tx_queues; i++)
1617 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1620 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1622 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1624 struct ixgbe_hw *hw = &adapter->hw;
1627 srrctl = IXGBE_SRRCTL_DROP_EN;
1629 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1630 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1631 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1633 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1636 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1638 struct ixgbe_hw *hw = &adapter->hw;
1640 /* PSRTYPE must be initialized in 82599 */
1641 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1642 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1643 IXGBE_PSRTYPE_L2HDR;
1645 if (adapter->num_rx_queues > 1)
1648 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1651 #define IXGBEVF_MAX_RX_DESC_POLL 10
1652 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1653 struct ixgbevf_ring *ring)
1655 struct ixgbe_hw *hw = &adapter->hw;
1656 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1658 u8 reg_idx = ring->reg_idx;
1660 if (IXGBE_REMOVED(hw->hw_addr))
1662 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1663 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1665 /* write value back with RXDCTL.ENABLE bit cleared */
1666 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1668 /* the hardware may take up to 100us to really disable the Rx queue */
1671 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1672 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1675 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1679 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1680 struct ixgbevf_ring *ring)
1682 struct ixgbe_hw *hw = &adapter->hw;
1683 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1685 u8 reg_idx = ring->reg_idx;
1687 if (IXGBE_REMOVED(hw->hw_addr))
1690 usleep_range(1000, 2000);
1691 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1692 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1695 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1699 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1701 struct ixgbe_hw *hw = &adapter->hw;
1702 u32 vfmrqc = 0, vfreta = 0;
1703 u16 rss_i = adapter->num_rx_queues;
1706 /* Fill out hash function seeds */
1707 netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
1708 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1709 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
1711 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1715 adapter->rss_indir_tbl[i] = j;
1717 vfreta |= j << (i & 0x3) * 8;
1719 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1724 /* Perform hash on these packet types */
1725 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1726 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1727 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1728 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1730 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1732 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1735 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1736 struct ixgbevf_ring *ring)
1738 struct ixgbe_hw *hw = &adapter->hw;
1739 u64 rdba = ring->dma;
1741 u8 reg_idx = ring->reg_idx;
1743 /* disable queue to avoid issues while updating state */
1744 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1745 ixgbevf_disable_rx_queue(adapter, ring);
1747 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1748 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1749 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1750 ring->count * sizeof(union ixgbe_adv_rx_desc));
1752 /* enable relaxed ordering */
1753 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1754 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1756 /* reset head and tail pointers */
1757 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1758 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1759 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1761 /* reset ntu and ntc to place SW in sync with hardwdare */
1762 ring->next_to_clean = 0;
1763 ring->next_to_use = 0;
1764 ring->next_to_alloc = 0;
1766 ixgbevf_configure_srrctl(adapter, reg_idx);
1768 /* allow any size packet since we can handle overflow */
1769 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1771 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1772 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1774 ixgbevf_rx_desc_queue_enable(adapter, ring);
1775 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1779 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1780 * @adapter: board private structure
1782 * Configure the Rx unit of the MAC after a reset.
1784 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1787 struct ixgbe_hw *hw = &adapter->hw;
1788 struct net_device *netdev = adapter->netdev;
1790 ixgbevf_setup_psrtype(adapter);
1791 if (hw->mac.type >= ixgbe_mac_X550_vf)
1792 ixgbevf_setup_vfmrqc(adapter);
1794 /* notify the PF of our intent to use this size of frame */
1795 ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
1797 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1798 * the Base and Length of the Rx Descriptor Ring
1800 for (i = 0; i < adapter->num_rx_queues; i++)
1801 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1804 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1805 __be16 proto, u16 vid)
1807 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1808 struct ixgbe_hw *hw = &adapter->hw;
1811 spin_lock_bh(&adapter->mbx_lock);
1813 /* add VID to filter table */
1814 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1816 spin_unlock_bh(&adapter->mbx_lock);
1818 /* translate error return types so error makes sense */
1819 if (err == IXGBE_ERR_MBX)
1822 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1825 set_bit(vid, adapter->active_vlans);
1830 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1831 __be16 proto, u16 vid)
1833 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1834 struct ixgbe_hw *hw = &adapter->hw;
1835 int err = -EOPNOTSUPP;
1837 spin_lock_bh(&adapter->mbx_lock);
1839 /* remove VID from filter table */
1840 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1842 spin_unlock_bh(&adapter->mbx_lock);
1844 clear_bit(vid, adapter->active_vlans);
1849 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1853 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1854 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1855 htons(ETH_P_8021Q), vid);
1858 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1860 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1861 struct ixgbe_hw *hw = &adapter->hw;
1864 if (!netdev_uc_empty(netdev)) {
1865 struct netdev_hw_addr *ha;
1867 netdev_for_each_uc_addr(ha, netdev) {
1868 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1872 /* If the list is empty then send message to PF driver to
1873 * clear all MAC VLANs on this VF.
1875 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1882 * ixgbevf_set_rx_mode - Multicast and unicast set
1883 * @netdev: network interface device structure
1885 * The set_rx_method entry point is called whenever the multicast address
1886 * list, unicast address list or the network interface flags are updated.
1887 * This routine is responsible for configuring the hardware for proper
1888 * multicast mode and configuring requested unicast filters.
1890 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1892 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1893 struct ixgbe_hw *hw = &adapter->hw;
1894 unsigned int flags = netdev->flags;
1897 xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
1898 (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
1899 IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
1901 spin_lock_bh(&adapter->mbx_lock);
1903 hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
1905 /* reprogram multicast list */
1906 hw->mac.ops.update_mc_addr_list(hw, netdev);
1908 ixgbevf_write_uc_addr_list(netdev);
1910 spin_unlock_bh(&adapter->mbx_lock);
1913 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1916 struct ixgbevf_q_vector *q_vector;
1917 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1919 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1920 q_vector = adapter->q_vector[q_idx];
1921 #ifdef CONFIG_NET_RX_BUSY_POLL
1922 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1924 napi_enable(&q_vector->napi);
1928 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1931 struct ixgbevf_q_vector *q_vector;
1932 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1934 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1935 q_vector = adapter->q_vector[q_idx];
1936 napi_disable(&q_vector->napi);
1937 #ifdef CONFIG_NET_RX_BUSY_POLL
1938 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1939 pr_info("QV %d locked\n", q_idx);
1940 usleep_range(1000, 20000);
1942 #endif /* CONFIG_NET_RX_BUSY_POLL */
1946 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1948 struct ixgbe_hw *hw = &adapter->hw;
1949 unsigned int def_q = 0;
1950 unsigned int num_tcs = 0;
1951 unsigned int num_rx_queues = adapter->num_rx_queues;
1952 unsigned int num_tx_queues = adapter->num_tx_queues;
1955 spin_lock_bh(&adapter->mbx_lock);
1957 /* fetch queue configuration from the PF */
1958 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1960 spin_unlock_bh(&adapter->mbx_lock);
1966 /* we need only one Tx queue */
1969 /* update default Tx ring register index */
1970 adapter->tx_ring[0]->reg_idx = def_q;
1972 /* we need as many queues as traffic classes */
1973 num_rx_queues = num_tcs;
1976 /* if we have a bad config abort request queue reset */
1977 if ((adapter->num_rx_queues != num_rx_queues) ||
1978 (adapter->num_tx_queues != num_tx_queues)) {
1979 /* force mailbox timeout to prevent further messages */
1980 hw->mbx.timeout = 0;
1982 /* wait for watchdog to come around and bail us out */
1983 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1989 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1991 ixgbevf_configure_dcb(adapter);
1993 ixgbevf_set_rx_mode(adapter->netdev);
1995 ixgbevf_restore_vlan(adapter);
1997 ixgbevf_configure_tx(adapter);
1998 ixgbevf_configure_rx(adapter);
2001 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2003 /* Only save pre-reset stats if there are some */
2004 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2005 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2006 adapter->stats.base_vfgprc;
2007 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2008 adapter->stats.base_vfgptc;
2009 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2010 adapter->stats.base_vfgorc;
2011 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2012 adapter->stats.base_vfgotc;
2013 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2014 adapter->stats.base_vfmprc;
2018 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2020 struct ixgbe_hw *hw = &adapter->hw;
2022 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2023 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2024 adapter->stats.last_vfgorc |=
2025 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2026 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2027 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2028 adapter->stats.last_vfgotc |=
2029 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2030 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2032 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2033 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2034 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2035 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2036 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2039 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2041 struct ixgbe_hw *hw = &adapter->hw;
2042 int api[] = { ixgbe_mbox_api_12,
2045 ixgbe_mbox_api_unknown };
2046 int err = 0, idx = 0;
2048 spin_lock_bh(&adapter->mbx_lock);
2050 while (api[idx] != ixgbe_mbox_api_unknown) {
2051 err = ixgbevf_negotiate_api_version(hw, api[idx]);
2057 spin_unlock_bh(&adapter->mbx_lock);
2060 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2062 struct net_device *netdev = adapter->netdev;
2063 struct ixgbe_hw *hw = &adapter->hw;
2065 ixgbevf_configure_msix(adapter);
2067 spin_lock_bh(&adapter->mbx_lock);
2069 if (is_valid_ether_addr(hw->mac.addr))
2070 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2072 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2074 spin_unlock_bh(&adapter->mbx_lock);
2076 smp_mb__before_atomic();
2077 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2078 ixgbevf_napi_enable_all(adapter);
2080 /* clear any pending interrupts, may auto mask */
2081 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2082 ixgbevf_irq_enable(adapter);
2084 /* enable transmits */
2085 netif_tx_start_all_queues(netdev);
2087 ixgbevf_save_reset_stats(adapter);
2088 ixgbevf_init_last_counter_stats(adapter);
2090 hw->mac.get_link_status = 1;
2091 mod_timer(&adapter->service_timer, jiffies);
2094 void ixgbevf_up(struct ixgbevf_adapter *adapter)
2096 ixgbevf_configure(adapter);
2098 ixgbevf_up_complete(adapter);
2102 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2103 * @rx_ring: ring to free buffers from
2105 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2107 struct device *dev = rx_ring->dev;
2111 /* Free Rx ring sk_buff */
2113 dev_kfree_skb(rx_ring->skb);
2114 rx_ring->skb = NULL;
2117 /* ring already cleared, nothing to do */
2118 if (!rx_ring->rx_buffer_info)
2121 /* Free all the Rx ring pages */
2122 for (i = 0; i < rx_ring->count; i++) {
2123 struct ixgbevf_rx_buffer *rx_buffer;
2125 rx_buffer = &rx_ring->rx_buffer_info[i];
2127 dma_unmap_page(dev, rx_buffer->dma,
2128 PAGE_SIZE, DMA_FROM_DEVICE);
2130 if (rx_buffer->page)
2131 __free_page(rx_buffer->page);
2132 rx_buffer->page = NULL;
2135 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2136 memset(rx_ring->rx_buffer_info, 0, size);
2138 /* Zero out the descriptor ring */
2139 memset(rx_ring->desc, 0, rx_ring->size);
2143 * ixgbevf_clean_tx_ring - Free Tx Buffers
2144 * @tx_ring: ring to be cleaned
2146 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2148 struct ixgbevf_tx_buffer *tx_buffer_info;
2152 if (!tx_ring->tx_buffer_info)
2155 /* Free all the Tx ring sk_buffs */
2156 for (i = 0; i < tx_ring->count; i++) {
2157 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2158 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2161 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2162 memset(tx_ring->tx_buffer_info, 0, size);
2164 memset(tx_ring->desc, 0, tx_ring->size);
2168 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2169 * @adapter: board private structure
2171 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2175 for (i = 0; i < adapter->num_rx_queues; i++)
2176 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2180 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2181 * @adapter: board private structure
2183 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2187 for (i = 0; i < adapter->num_tx_queues; i++)
2188 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2191 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2193 struct net_device *netdev = adapter->netdev;
2194 struct ixgbe_hw *hw = &adapter->hw;
2197 /* signal that we are down to the interrupt handler */
2198 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2199 return; /* do nothing if already down */
2201 /* disable all enabled Rx queues */
2202 for (i = 0; i < adapter->num_rx_queues; i++)
2203 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2205 usleep_range(10000, 20000);
2207 netif_tx_stop_all_queues(netdev);
2209 /* call carrier off first to avoid false dev_watchdog timeouts */
2210 netif_carrier_off(netdev);
2211 netif_tx_disable(netdev);
2213 ixgbevf_irq_disable(adapter);
2215 ixgbevf_napi_disable_all(adapter);
2217 del_timer_sync(&adapter->service_timer);
2219 /* disable transmits in the hardware now that interrupts are off */
2220 for (i = 0; i < adapter->num_tx_queues; i++) {
2221 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2223 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2224 IXGBE_TXDCTL_SWFLSH);
2227 if (!pci_channel_offline(adapter->pdev))
2228 ixgbevf_reset(adapter);
2230 ixgbevf_clean_all_tx_rings(adapter);
2231 ixgbevf_clean_all_rx_rings(adapter);
2234 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2236 WARN_ON(in_interrupt());
2238 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2241 ixgbevf_down(adapter);
2242 ixgbevf_up(adapter);
2244 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2247 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2249 struct ixgbe_hw *hw = &adapter->hw;
2250 struct net_device *netdev = adapter->netdev;
2252 if (hw->mac.ops.reset_hw(hw)) {
2253 hw_dbg(hw, "PF still resetting\n");
2255 hw->mac.ops.init_hw(hw);
2256 ixgbevf_negotiate_api(adapter);
2259 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2260 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2262 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
2266 adapter->last_reset = jiffies;
2269 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2272 int vector_threshold;
2274 /* We'll want at least 2 (vector_threshold):
2275 * 1) TxQ[0] + RxQ[0] handler
2276 * 2) Other (Link Status Change, etc.)
2278 vector_threshold = MIN_MSIX_COUNT;
2280 /* The more we get, the more we will assign to Tx/Rx Cleanup
2281 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2282 * Right now, we simply care about how many we'll get; we'll
2283 * set them up later while requesting irq's.
2285 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2286 vector_threshold, vectors);
2289 dev_err(&adapter->pdev->dev,
2290 "Unable to allocate MSI-X interrupts\n");
2291 kfree(adapter->msix_entries);
2292 adapter->msix_entries = NULL;
2296 /* Adjust for only the vectors we'll use, which is minimum
2297 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2298 * vectors we were allocated.
2300 adapter->num_msix_vectors = vectors;
2306 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2307 * @adapter: board private structure to initialize
2309 * This is the top level queue allocation routine. The order here is very
2310 * important, starting with the "most" number of features turned on at once,
2311 * and ending with the smallest set of features. This way large combinations
2312 * can be allocated if they're turned on, and smaller combinations are the
2313 * fallthrough conditions.
2316 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2318 struct ixgbe_hw *hw = &adapter->hw;
2319 unsigned int def_q = 0;
2320 unsigned int num_tcs = 0;
2323 /* Start with base case */
2324 adapter->num_rx_queues = 1;
2325 adapter->num_tx_queues = 1;
2327 spin_lock_bh(&adapter->mbx_lock);
2329 /* fetch queue configuration from the PF */
2330 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2332 spin_unlock_bh(&adapter->mbx_lock);
2337 /* we need as many queues as traffic classes */
2339 adapter->num_rx_queues = num_tcs;
2341 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2343 switch (hw->api_version) {
2344 case ixgbe_mbox_api_11:
2345 case ixgbe_mbox_api_12:
2346 adapter->num_rx_queues = rss;
2347 adapter->num_tx_queues = rss;
2355 * ixgbevf_alloc_queues - Allocate memory for all rings
2356 * @adapter: board private structure to initialize
2358 * We allocate one ring per queue at run-time since we don't know the
2359 * number of queues at compile-time. The polling_netdev array is
2360 * intended for Multiqueue, but should work fine with a single queue.
2362 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2364 struct ixgbevf_ring *ring;
2367 for (; tx < adapter->num_tx_queues; tx++) {
2368 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2370 goto err_allocation;
2372 ring->dev = &adapter->pdev->dev;
2373 ring->netdev = adapter->netdev;
2374 ring->count = adapter->tx_ring_count;
2375 ring->queue_index = tx;
2378 adapter->tx_ring[tx] = ring;
2381 for (; rx < adapter->num_rx_queues; rx++) {
2382 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2384 goto err_allocation;
2386 ring->dev = &adapter->pdev->dev;
2387 ring->netdev = adapter->netdev;
2389 ring->count = adapter->rx_ring_count;
2390 ring->queue_index = rx;
2393 adapter->rx_ring[rx] = ring;
2400 kfree(adapter->tx_ring[--tx]);
2401 adapter->tx_ring[tx] = NULL;
2405 kfree(adapter->rx_ring[--rx]);
2406 adapter->rx_ring[rx] = NULL;
2412 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2413 * @adapter: board private structure to initialize
2415 * Attempt to configure the interrupts using the best available
2416 * capabilities of the hardware and the kernel.
2418 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2420 struct net_device *netdev = adapter->netdev;
2422 int vector, v_budget;
2424 /* It's easy to be greedy for MSI-X vectors, but it really
2425 * doesn't do us much good if we have a lot more vectors
2426 * than CPU's. So let's be conservative and only ask for
2427 * (roughly) the same number of vectors as there are CPU's.
2428 * The default is to use pairs of vectors.
2430 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2431 v_budget = min_t(int, v_budget, num_online_cpus());
2432 v_budget += NON_Q_VECTORS;
2434 /* A failure in MSI-X entry allocation isn't fatal, but it does
2435 * mean we disable MSI-X capabilities of the adapter.
2437 adapter->msix_entries = kcalloc(v_budget,
2438 sizeof(struct msix_entry), GFP_KERNEL);
2439 if (!adapter->msix_entries) {
2444 for (vector = 0; vector < v_budget; vector++)
2445 adapter->msix_entries[vector].entry = vector;
2447 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2451 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2455 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2462 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2463 * @adapter: board private structure to initialize
2465 * We allocate one q_vector per queue interrupt. If allocation fails we
2468 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2470 int q_idx, num_q_vectors;
2471 struct ixgbevf_q_vector *q_vector;
2473 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2475 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2476 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2479 q_vector->adapter = adapter;
2480 q_vector->v_idx = q_idx;
2481 netif_napi_add(adapter->netdev, &q_vector->napi,
2483 #ifdef CONFIG_NET_RX_BUSY_POLL
2484 napi_hash_add(&q_vector->napi);
2486 adapter->q_vector[q_idx] = q_vector;
2494 q_vector = adapter->q_vector[q_idx];
2495 #ifdef CONFIG_NET_RX_BUSY_POLL
2496 napi_hash_del(&q_vector->napi);
2498 netif_napi_del(&q_vector->napi);
2500 adapter->q_vector[q_idx] = NULL;
2506 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2507 * @adapter: board private structure to initialize
2509 * This function frees the memory allocated to the q_vectors. In addition if
2510 * NAPI is enabled it will delete any references to the NAPI struct prior
2511 * to freeing the q_vector.
2513 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2515 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2517 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2518 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2520 adapter->q_vector[q_idx] = NULL;
2521 #ifdef CONFIG_NET_RX_BUSY_POLL
2522 napi_hash_del(&q_vector->napi);
2524 netif_napi_del(&q_vector->napi);
2530 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2531 * @adapter: board private structure
2534 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2536 pci_disable_msix(adapter->pdev);
2537 kfree(adapter->msix_entries);
2538 adapter->msix_entries = NULL;
2542 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2543 * @adapter: board private structure to initialize
2546 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2550 /* Number of supported queues */
2551 ixgbevf_set_num_queues(adapter);
2553 err = ixgbevf_set_interrupt_capability(adapter);
2555 hw_dbg(&adapter->hw,
2556 "Unable to setup interrupt capabilities\n");
2557 goto err_set_interrupt;
2560 err = ixgbevf_alloc_q_vectors(adapter);
2562 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2563 goto err_alloc_q_vectors;
2566 err = ixgbevf_alloc_queues(adapter);
2568 pr_err("Unable to allocate memory for queues\n");
2569 goto err_alloc_queues;
2572 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
2573 (adapter->num_rx_queues > 1) ? "Enabled" :
2574 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2576 set_bit(__IXGBEVF_DOWN, &adapter->state);
2580 ixgbevf_free_q_vectors(adapter);
2581 err_alloc_q_vectors:
2582 ixgbevf_reset_interrupt_capability(adapter);
2588 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2589 * @adapter: board private structure to clear interrupt scheme on
2591 * We go through and clear interrupt specific resources and reset the structure
2592 * to pre-load conditions
2594 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2598 for (i = 0; i < adapter->num_tx_queues; i++) {
2599 kfree(adapter->tx_ring[i]);
2600 adapter->tx_ring[i] = NULL;
2602 for (i = 0; i < adapter->num_rx_queues; i++) {
2603 kfree(adapter->rx_ring[i]);
2604 adapter->rx_ring[i] = NULL;
2607 adapter->num_tx_queues = 0;
2608 adapter->num_rx_queues = 0;
2610 ixgbevf_free_q_vectors(adapter);
2611 ixgbevf_reset_interrupt_capability(adapter);
2615 * ixgbevf_sw_init - Initialize general software structures
2616 * @adapter: board private structure to initialize
2618 * ixgbevf_sw_init initializes the Adapter private data structure.
2619 * Fields are initialized based on PCI device information and
2620 * OS network device settings (MTU size).
2622 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2624 struct ixgbe_hw *hw = &adapter->hw;
2625 struct pci_dev *pdev = adapter->pdev;
2626 struct net_device *netdev = adapter->netdev;
2629 /* PCI config space info */
2630 hw->vendor_id = pdev->vendor;
2631 hw->device_id = pdev->device;
2632 hw->revision_id = pdev->revision;
2633 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2634 hw->subsystem_device_id = pdev->subsystem_device;
2636 hw->mbx.ops.init_params(hw);
2638 /* assume legacy case in which PF would only give VF 2 queues */
2639 hw->mac.max_tx_queues = 2;
2640 hw->mac.max_rx_queues = 2;
2642 /* lock to protect mailbox accesses */
2643 spin_lock_init(&adapter->mbx_lock);
2645 err = hw->mac.ops.reset_hw(hw);
2647 dev_info(&pdev->dev,
2648 "PF still in reset state. Is the PF interface up?\n");
2650 err = hw->mac.ops.init_hw(hw);
2652 pr_err("init_shared_code failed: %d\n", err);
2655 ixgbevf_negotiate_api(adapter);
2656 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2658 dev_info(&pdev->dev, "Error reading MAC address\n");
2659 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2660 dev_info(&pdev->dev,
2661 "MAC address not assigned by administrator.\n");
2662 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2665 if (!is_valid_ether_addr(netdev->dev_addr)) {
2666 dev_info(&pdev->dev, "Assigning random MAC address\n");
2667 eth_hw_addr_random(netdev);
2668 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2671 /* Enable dynamic interrupt throttling rates */
2672 adapter->rx_itr_setting = 1;
2673 adapter->tx_itr_setting = 1;
2675 /* set default ring sizes */
2676 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2677 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2679 set_bit(__IXGBEVF_DOWN, &adapter->state);
2686 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2688 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2689 if (current_counter < last_counter) \
2690 counter += 0x100000000LL; \
2691 last_counter = current_counter; \
2692 counter &= 0xFFFFFFFF00000000LL; \
2693 counter |= current_counter; \
2696 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2698 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2699 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2700 u64 current_counter = (current_counter_msb << 32) | \
2701 current_counter_lsb; \
2702 if (current_counter < last_counter) \
2703 counter += 0x1000000000LL; \
2704 last_counter = current_counter; \
2705 counter &= 0xFFFFFFF000000000LL; \
2706 counter |= current_counter; \
2709 * ixgbevf_update_stats - Update the board statistics counters.
2710 * @adapter: board private structure
2712 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2714 struct ixgbe_hw *hw = &adapter->hw;
2717 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2718 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2721 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2722 adapter->stats.vfgprc);
2723 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2724 adapter->stats.vfgptc);
2725 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2726 adapter->stats.last_vfgorc,
2727 adapter->stats.vfgorc);
2728 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2729 adapter->stats.last_vfgotc,
2730 adapter->stats.vfgotc);
2731 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2732 adapter->stats.vfmprc);
2734 for (i = 0; i < adapter->num_rx_queues; i++) {
2735 adapter->hw_csum_rx_error +=
2736 adapter->rx_ring[i]->hw_csum_rx_error;
2737 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2742 * ixgbevf_service_timer - Timer Call-back
2743 * @data: pointer to adapter cast into an unsigned long
2745 static void ixgbevf_service_timer(unsigned long data)
2747 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2749 /* Reset the timer */
2750 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2752 ixgbevf_service_event_schedule(adapter);
2755 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
2757 if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
2760 adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
2762 /* If we're already down or resetting, just bail */
2763 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2764 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2767 adapter->tx_timeout_count++;
2769 ixgbevf_reinit_locked(adapter);
2773 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2774 * @adapter: pointer to the device adapter structure
2776 * This function serves two purposes. First it strobes the interrupt lines
2777 * in order to make certain interrupts are occurring. Secondly it sets the
2778 * bits needed to check for TX hangs. As a result we should immediately
2779 * determine if a hang has occurred.
2781 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2783 struct ixgbe_hw *hw = &adapter->hw;
2787 /* If we're down or resetting, just bail */
2788 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2789 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2792 /* Force detection of hung controller */
2793 if (netif_carrier_ok(adapter->netdev)) {
2794 for (i = 0; i < adapter->num_tx_queues; i++)
2795 set_check_for_tx_hang(adapter->tx_ring[i]);
2798 /* get one bit for every active Tx/Rx interrupt vector */
2799 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2800 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2802 if (qv->rx.ring || qv->tx.ring)
2806 /* Cause software interrupt to ensure rings are cleaned */
2807 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2811 * ixgbevf_watchdog_update_link - update the link status
2812 * @adapter: pointer to the device adapter structure
2814 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
2816 struct ixgbe_hw *hw = &adapter->hw;
2817 u32 link_speed = adapter->link_speed;
2818 bool link_up = adapter->link_up;
2821 spin_lock_bh(&adapter->mbx_lock);
2823 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2825 spin_unlock_bh(&adapter->mbx_lock);
2827 /* if check for link returns error we will need to reset */
2828 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
2829 adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
2833 adapter->link_up = link_up;
2834 adapter->link_speed = link_speed;
2838 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2839 * print link up message
2840 * @adapter: pointer to the device adapter structure
2842 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2844 struct net_device *netdev = adapter->netdev;
2846 /* only continue if link was previously down */
2847 if (netif_carrier_ok(netdev))
2850 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2851 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2853 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2855 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2859 netif_carrier_on(netdev);
2863 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2864 * print link down message
2865 * @adapter: pointer to the adapter structure
2867 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2869 struct net_device *netdev = adapter->netdev;
2871 adapter->link_speed = 0;
2873 /* only continue if link was up previously */
2874 if (!netif_carrier_ok(netdev))
2877 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2879 netif_carrier_off(netdev);
2883 * ixgbevf_watchdog_subtask - worker thread to bring link up
2884 * @work: pointer to work_struct containing our data
2886 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2888 /* if interface is down do nothing */
2889 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2890 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2893 ixgbevf_watchdog_update_link(adapter);
2895 if (adapter->link_up)
2896 ixgbevf_watchdog_link_is_up(adapter);
2898 ixgbevf_watchdog_link_is_down(adapter);
2900 ixgbevf_update_stats(adapter);
2904 * ixgbevf_service_task - manages and runs subtasks
2905 * @work: pointer to work_struct containing our data
2907 static void ixgbevf_service_task(struct work_struct *work)
2909 struct ixgbevf_adapter *adapter = container_of(work,
2910 struct ixgbevf_adapter,
2912 struct ixgbe_hw *hw = &adapter->hw;
2914 if (IXGBE_REMOVED(hw->hw_addr)) {
2915 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2917 ixgbevf_down(adapter);
2923 ixgbevf_queue_reset_subtask(adapter);
2924 ixgbevf_reset_subtask(adapter);
2925 ixgbevf_watchdog_subtask(adapter);
2926 ixgbevf_check_hang_subtask(adapter);
2928 ixgbevf_service_event_complete(adapter);
2932 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2933 * @tx_ring: Tx descriptor ring for a specific queue
2935 * Free all transmit software resources
2937 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2939 ixgbevf_clean_tx_ring(tx_ring);
2941 vfree(tx_ring->tx_buffer_info);
2942 tx_ring->tx_buffer_info = NULL;
2944 /* if not set, then don't free */
2948 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2951 tx_ring->desc = NULL;
2955 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2956 * @adapter: board private structure
2958 * Free all transmit software resources
2960 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2964 for (i = 0; i < adapter->num_tx_queues; i++)
2965 if (adapter->tx_ring[i]->desc)
2966 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2970 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2971 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
2973 * Return 0 on success, negative on failure
2975 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2979 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2980 tx_ring->tx_buffer_info = vzalloc(size);
2981 if (!tx_ring->tx_buffer_info)
2984 /* round up to nearest 4K */
2985 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2986 tx_ring->size = ALIGN(tx_ring->size, 4096);
2988 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2989 &tx_ring->dma, GFP_KERNEL);
2996 vfree(tx_ring->tx_buffer_info);
2997 tx_ring->tx_buffer_info = NULL;
2998 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3003 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3004 * @adapter: board private structure
3006 * If this function returns with an error, then it's possible one or
3007 * more of the rings is populated (while the rest are not). It is the
3008 * callers duty to clean those orphaned rings.
3010 * Return 0 on success, negative on failure
3012 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3016 for (i = 0; i < adapter->num_tx_queues; i++) {
3017 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3020 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3028 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3029 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3031 * Returns 0 on success, negative on failure
3033 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
3037 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3038 rx_ring->rx_buffer_info = vzalloc(size);
3039 if (!rx_ring->rx_buffer_info)
3042 /* Round up to nearest 4K */
3043 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3044 rx_ring->size = ALIGN(rx_ring->size, 4096);
3046 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3047 &rx_ring->dma, GFP_KERNEL);
3054 vfree(rx_ring->rx_buffer_info);
3055 rx_ring->rx_buffer_info = NULL;
3056 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3061 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3062 * @adapter: board private structure
3064 * If this function returns with an error, then it's possible one or
3065 * more of the rings is populated (while the rest are not). It is the
3066 * callers duty to clean those orphaned rings.
3068 * Return 0 on success, negative on failure
3070 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3074 for (i = 0; i < adapter->num_rx_queues; i++) {
3075 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
3078 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3085 * ixgbevf_free_rx_resources - Free Rx Resources
3086 * @rx_ring: ring to clean the resources from
3088 * Free all receive software resources
3090 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3092 ixgbevf_clean_rx_ring(rx_ring);
3094 vfree(rx_ring->rx_buffer_info);
3095 rx_ring->rx_buffer_info = NULL;
3097 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3100 rx_ring->desc = NULL;
3104 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3105 * @adapter: board private structure
3107 * Free all receive software resources
3109 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3113 for (i = 0; i < adapter->num_rx_queues; i++)
3114 if (adapter->rx_ring[i]->desc)
3115 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3119 * ixgbevf_open - Called when a network interface is made active
3120 * @netdev: network interface device structure
3122 * Returns 0 on success, negative value on failure
3124 * The open entry point is called when a network interface is made
3125 * active by the system (IFF_UP). At this point all resources needed
3126 * for transmit and receive operations are allocated, the interrupt
3127 * handler is registered with the OS, the watchdog timer is started,
3128 * and the stack is notified that the interface is ready.
3130 static int ixgbevf_open(struct net_device *netdev)
3132 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3133 struct ixgbe_hw *hw = &adapter->hw;
3136 /* A previous failure to open the device because of a lack of
3137 * available MSIX vector resources may have reset the number
3138 * of msix vectors variable to zero. The only way to recover
3139 * is to unload/reload the driver and hope that the system has
3140 * been able to recover some MSIX vector resources.
3142 if (!adapter->num_msix_vectors)
3145 if (hw->adapter_stopped) {
3146 ixgbevf_reset(adapter);
3147 /* if adapter is still stopped then PF isn't up and
3148 * the VF can't start.
3150 if (hw->adapter_stopped) {
3151 err = IXGBE_ERR_MBX;
3152 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3153 goto err_setup_reset;
3157 /* disallow open during test */
3158 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3161 netif_carrier_off(netdev);
3163 /* allocate transmit descriptors */
3164 err = ixgbevf_setup_all_tx_resources(adapter);
3168 /* allocate receive descriptors */
3169 err = ixgbevf_setup_all_rx_resources(adapter);
3173 ixgbevf_configure(adapter);
3175 /* Map the Tx/Rx rings to the vectors we were allotted.
3176 * if request_irq will be called in this function map_rings
3177 * must be called *before* up_complete
3179 ixgbevf_map_rings_to_vectors(adapter);
3181 err = ixgbevf_request_irq(adapter);
3185 ixgbevf_up_complete(adapter);
3190 ixgbevf_down(adapter);
3192 ixgbevf_free_all_rx_resources(adapter);
3194 ixgbevf_free_all_tx_resources(adapter);
3195 ixgbevf_reset(adapter);
3203 * ixgbevf_close - Disables a network interface
3204 * @netdev: network interface device structure
3206 * Returns 0, this is not allowed to fail
3208 * The close entry point is called when an interface is de-activated
3209 * by the OS. The hardware is still under the drivers control, but
3210 * needs to be disabled. A global MAC reset is issued to stop the
3211 * hardware, and all transmit and receive resources are freed.
3213 static int ixgbevf_close(struct net_device *netdev)
3215 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3217 ixgbevf_down(adapter);
3218 ixgbevf_free_irq(adapter);
3220 ixgbevf_free_all_tx_resources(adapter);
3221 ixgbevf_free_all_rx_resources(adapter);
3226 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3228 struct net_device *dev = adapter->netdev;
3230 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
3233 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
3235 /* if interface is down do nothing */
3236 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3237 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3240 /* Hardware has to reinitialize queues and interrupts to
3241 * match packet buffer alignment. Unfortunately, the
3242 * hardware is not flexible enough to do this dynamically.
3244 if (netif_running(dev))
3247 ixgbevf_clear_interrupt_scheme(adapter);
3248 ixgbevf_init_interrupt_scheme(adapter);
3250 if (netif_running(dev))
3254 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3255 u32 vlan_macip_lens, u32 type_tucmd,
3258 struct ixgbe_adv_tx_context_desc *context_desc;
3259 u16 i = tx_ring->next_to_use;
3261 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3264 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3266 /* set bits to identify this as an advanced context descriptor */
3267 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3269 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3270 context_desc->seqnum_seed = 0;
3271 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3272 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3275 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3276 struct ixgbevf_tx_buffer *first,
3279 struct sk_buff *skb = first->skb;
3280 u32 vlan_macip_lens, type_tucmd;
3281 u32 mss_l4len_idx, l4len;
3284 if (skb->ip_summed != CHECKSUM_PARTIAL)
3287 if (!skb_is_gso(skb))
3290 err = skb_cow_head(skb, 0);
3294 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3295 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3297 if (first->protocol == htons(ETH_P_IP)) {
3298 struct iphdr *iph = ip_hdr(skb);
3302 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3306 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3307 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3308 IXGBE_TX_FLAGS_CSUM |
3309 IXGBE_TX_FLAGS_IPV4;
3310 } else if (skb_is_gso_v6(skb)) {
3311 ipv6_hdr(skb)->payload_len = 0;
3312 tcp_hdr(skb)->check =
3313 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3314 &ipv6_hdr(skb)->daddr,
3316 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3317 IXGBE_TX_FLAGS_CSUM;
3320 /* compute header lengths */
3321 l4len = tcp_hdrlen(skb);
3323 *hdr_len = skb_transport_offset(skb) + l4len;
3325 /* update GSO size and bytecount with header size */
3326 first->gso_segs = skb_shinfo(skb)->gso_segs;
3327 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3329 /* mss_l4len_id: use 1 as index for TSO */
3330 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
3331 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3332 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
3334 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3335 vlan_macip_lens = skb_network_header_len(skb);
3336 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3337 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3339 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3340 type_tucmd, mss_l4len_idx);
3345 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3346 struct ixgbevf_tx_buffer *first)
3348 struct sk_buff *skb = first->skb;
3349 u32 vlan_macip_lens = 0;
3350 u32 mss_l4len_idx = 0;
3353 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3356 switch (first->protocol) {
3357 case htons(ETH_P_IP):
3358 vlan_macip_lens |= skb_network_header_len(skb);
3359 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3360 l4_hdr = ip_hdr(skb)->protocol;
3362 case htons(ETH_P_IPV6):
3363 vlan_macip_lens |= skb_network_header_len(skb);
3364 l4_hdr = ipv6_hdr(skb)->nexthdr;
3367 if (unlikely(net_ratelimit())) {
3368 dev_warn(tx_ring->dev,
3369 "partial checksum but proto=%x!\n",
3377 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3378 mss_l4len_idx = tcp_hdrlen(skb) <<
3379 IXGBE_ADVTXD_L4LEN_SHIFT;
3382 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3383 mss_l4len_idx = sizeof(struct sctphdr) <<
3384 IXGBE_ADVTXD_L4LEN_SHIFT;
3387 mss_l4len_idx = sizeof(struct udphdr) <<
3388 IXGBE_ADVTXD_L4LEN_SHIFT;
3391 if (unlikely(net_ratelimit())) {
3392 dev_warn(tx_ring->dev,
3393 "partial checksum but l4 proto=%x!\n",
3399 /* update TX checksum flag */
3400 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3403 /* vlan_macip_lens: MACLEN, VLAN tag */
3404 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3405 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3407 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3408 type_tucmd, mss_l4len_idx);
3411 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3413 /* set type for advanced descriptor with frame checksum insertion */
3414 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3415 IXGBE_ADVTXD_DCMD_IFCS |
3416 IXGBE_ADVTXD_DCMD_DEXT);
3418 /* set HW VLAN bit if VLAN is present */
3419 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3420 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3422 /* set segmentation enable bits for TSO/FSO */
3423 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3424 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3429 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3430 u32 tx_flags, unsigned int paylen)
3432 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3434 /* enable L4 checksum for TSO and TX checksum offload */
3435 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3436 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3438 /* enble IPv4 checksum for TSO */
3439 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3440 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3442 /* use index 1 context for TSO/FSO/FCOE */
3443 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3444 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
3446 /* Check Context must be set if Tx switch is enabled, which it
3447 * always is for case where virtual functions are running
3449 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3451 tx_desc->read.olinfo_status = olinfo_status;
3454 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3455 struct ixgbevf_tx_buffer *first,
3459 struct sk_buff *skb = first->skb;
3460 struct ixgbevf_tx_buffer *tx_buffer;
3461 union ixgbe_adv_tx_desc *tx_desc;
3462 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3463 unsigned int data_len = skb->data_len;
3464 unsigned int size = skb_headlen(skb);
3465 unsigned int paylen = skb->len - hdr_len;
3466 u32 tx_flags = first->tx_flags;
3468 u16 i = tx_ring->next_to_use;
3470 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3472 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3473 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3475 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3476 if (dma_mapping_error(tx_ring->dev, dma))
3479 /* record length, and DMA address */
3480 dma_unmap_len_set(first, len, size);
3481 dma_unmap_addr_set(first, dma, dma);
3483 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3486 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3487 tx_desc->read.cmd_type_len =
3488 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3492 if (i == tx_ring->count) {
3493 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3497 dma += IXGBE_MAX_DATA_PER_TXD;
3498 size -= IXGBE_MAX_DATA_PER_TXD;
3500 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3501 tx_desc->read.olinfo_status = 0;
3504 if (likely(!data_len))
3507 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3511 if (i == tx_ring->count) {
3512 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3516 size = skb_frag_size(frag);
3519 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3521 if (dma_mapping_error(tx_ring->dev, dma))
3524 tx_buffer = &tx_ring->tx_buffer_info[i];
3525 dma_unmap_len_set(tx_buffer, len, size);
3526 dma_unmap_addr_set(tx_buffer, dma, dma);
3528 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3529 tx_desc->read.olinfo_status = 0;
3534 /* write last descriptor with RS and EOP bits */
3535 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3536 tx_desc->read.cmd_type_len = cmd_type;
3538 /* set the timestamp */
3539 first->time_stamp = jiffies;
3541 /* Force memory writes to complete before letting h/w know there
3542 * are new descriptors to fetch. (Only applicable for weak-ordered
3543 * memory model archs, such as IA-64).
3545 * We also need this memory barrier (wmb) to make certain all of the
3546 * status bits have been updated before next_to_watch is written.
3550 /* set next_to_watch value indicating a packet is present */
3551 first->next_to_watch = tx_desc;
3554 if (i == tx_ring->count)
3557 tx_ring->next_to_use = i;
3559 /* notify HW of packet */
3560 ixgbevf_write_tail(tx_ring, i);
3564 dev_err(tx_ring->dev, "TX DMA map failed\n");
3566 /* clear dma mappings for failed tx_buffer_info map */
3568 tx_buffer = &tx_ring->tx_buffer_info[i];
3569 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3570 if (tx_buffer == first)
3577 tx_ring->next_to_use = i;
3580 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3582 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3583 /* Herbert's original patch had:
3584 * smp_mb__after_netif_stop_queue();
3585 * but since that doesn't exist yet, just open code it.
3589 /* We need to check again in a case another CPU has just
3590 * made room available.
3592 if (likely(ixgbevf_desc_unused(tx_ring) < size))
3595 /* A reprieve! - use start_queue because it doesn't call schedule */
3596 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3597 ++tx_ring->tx_stats.restart_queue;
3602 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3604 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3606 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3609 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3611 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3612 struct ixgbevf_tx_buffer *first;
3613 struct ixgbevf_ring *tx_ring;
3616 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3617 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3621 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3623 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3624 dev_kfree_skb_any(skb);
3625 return NETDEV_TX_OK;
3628 tx_ring = adapter->tx_ring[skb->queue_mapping];
3630 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3631 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3632 * + 2 desc gap to keep tail from touching head,
3633 * + 1 desc for context descriptor,
3634 * otherwise try next time
3636 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3637 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3638 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3640 count += skb_shinfo(skb)->nr_frags;
3642 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3643 tx_ring->tx_stats.tx_busy++;
3644 return NETDEV_TX_BUSY;
3647 /* record the location of the first descriptor for this packet */
3648 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3650 first->bytecount = skb->len;
3651 first->gso_segs = 1;
3653 if (skb_vlan_tag_present(skb)) {
3654 tx_flags |= skb_vlan_tag_get(skb);
3655 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3656 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3659 /* record initial flags and protocol */
3660 first->tx_flags = tx_flags;
3661 first->protocol = vlan_get_protocol(skb);
3663 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3667 ixgbevf_tx_csum(tx_ring, first);
3669 ixgbevf_tx_map(tx_ring, first, hdr_len);
3671 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3673 return NETDEV_TX_OK;
3676 dev_kfree_skb_any(first->skb);
3679 return NETDEV_TX_OK;
3683 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3684 * @netdev: network interface device structure
3685 * @p: pointer to an address structure
3687 * Returns 0 on success, negative on failure
3689 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3691 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3692 struct ixgbe_hw *hw = &adapter->hw;
3693 struct sockaddr *addr = p;
3695 if (!is_valid_ether_addr(addr->sa_data))
3696 return -EADDRNOTAVAIL;
3698 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3699 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3701 spin_lock_bh(&adapter->mbx_lock);
3703 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3705 spin_unlock_bh(&adapter->mbx_lock);
3711 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3712 * @netdev: network interface device structure
3713 * @new_mtu: new value for maximum frame size
3715 * Returns 0 on success, negative on failure
3717 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3719 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3720 struct ixgbe_hw *hw = &adapter->hw;
3721 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3722 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3724 switch (adapter->hw.api_version) {
3725 case ixgbe_mbox_api_11:
3726 case ixgbe_mbox_api_12:
3727 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3730 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
3731 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3735 /* MTU < 68 is an error and causes problems on some kernels */
3736 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3739 hw_dbg(hw, "changing MTU from %d to %d\n",
3740 netdev->mtu, new_mtu);
3741 /* must set new MTU before calling down or up */
3742 netdev->mtu = new_mtu;
3744 /* notify the PF of our intent to use this size of frame */
3745 ixgbevf_rlpml_set_vf(hw, max_frame);
3750 #ifdef CONFIG_NET_POLL_CONTROLLER
3751 /* Polling 'interrupt' - used by things like netconsole to send skbs
3752 * without having to re-enable interrupts. It's not called while
3753 * the interrupt routine is executing.
3755 static void ixgbevf_netpoll(struct net_device *netdev)
3757 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3760 /* if interface is down do nothing */
3761 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3763 for (i = 0; i < adapter->num_rx_queues; i++)
3764 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3766 #endif /* CONFIG_NET_POLL_CONTROLLER */
3768 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3770 struct net_device *netdev = pci_get_drvdata(pdev);
3771 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3776 netif_device_detach(netdev);
3778 if (netif_running(netdev)) {
3780 ixgbevf_down(adapter);
3781 ixgbevf_free_irq(adapter);
3782 ixgbevf_free_all_tx_resources(adapter);
3783 ixgbevf_free_all_rx_resources(adapter);
3787 ixgbevf_clear_interrupt_scheme(adapter);
3790 retval = pci_save_state(pdev);
3795 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3796 pci_disable_device(pdev);
3802 static int ixgbevf_resume(struct pci_dev *pdev)
3804 struct net_device *netdev = pci_get_drvdata(pdev);
3805 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3808 pci_restore_state(pdev);
3809 /* pci_restore_state clears dev->state_saved so call
3810 * pci_save_state to restore it.
3812 pci_save_state(pdev);
3814 err = pci_enable_device_mem(pdev);
3816 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3819 smp_mb__before_atomic();
3820 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3821 pci_set_master(pdev);
3823 ixgbevf_reset(adapter);
3826 err = ixgbevf_init_interrupt_scheme(adapter);
3829 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3833 if (netif_running(netdev)) {
3834 err = ixgbevf_open(netdev);
3839 netif_device_attach(netdev);
3844 #endif /* CONFIG_PM */
3845 static void ixgbevf_shutdown(struct pci_dev *pdev)
3847 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3850 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3851 struct rtnl_link_stats64 *stats)
3853 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3856 const struct ixgbevf_ring *ring;
3859 ixgbevf_update_stats(adapter);
3861 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3863 for (i = 0; i < adapter->num_rx_queues; i++) {
3864 ring = adapter->rx_ring[i];
3866 start = u64_stats_fetch_begin_irq(&ring->syncp);
3867 bytes = ring->stats.bytes;
3868 packets = ring->stats.packets;
3869 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3870 stats->rx_bytes += bytes;
3871 stats->rx_packets += packets;
3874 for (i = 0; i < adapter->num_tx_queues; i++) {
3875 ring = adapter->tx_ring[i];
3877 start = u64_stats_fetch_begin_irq(&ring->syncp);
3878 bytes = ring->stats.bytes;
3879 packets = ring->stats.packets;
3880 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3881 stats->tx_bytes += bytes;
3882 stats->tx_packets += packets;
3888 static const struct net_device_ops ixgbevf_netdev_ops = {
3889 .ndo_open = ixgbevf_open,
3890 .ndo_stop = ixgbevf_close,
3891 .ndo_start_xmit = ixgbevf_xmit_frame,
3892 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3893 .ndo_get_stats64 = ixgbevf_get_stats,
3894 .ndo_validate_addr = eth_validate_addr,
3895 .ndo_set_mac_address = ixgbevf_set_mac,
3896 .ndo_change_mtu = ixgbevf_change_mtu,
3897 .ndo_tx_timeout = ixgbevf_tx_timeout,
3898 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3899 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3900 #ifdef CONFIG_NET_RX_BUSY_POLL
3901 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3903 #ifdef CONFIG_NET_POLL_CONTROLLER
3904 .ndo_poll_controller = ixgbevf_netpoll,
3906 .ndo_features_check = passthru_features_check,
3909 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3911 dev->netdev_ops = &ixgbevf_netdev_ops;
3912 ixgbevf_set_ethtool_ops(dev);
3913 dev->watchdog_timeo = 5 * HZ;
3917 * ixgbevf_probe - Device Initialization Routine
3918 * @pdev: PCI device information struct
3919 * @ent: entry in ixgbevf_pci_tbl
3921 * Returns 0 on success, negative on failure
3923 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3924 * The OS initialization, configuring of the adapter private structure,
3925 * and a hardware reset occur.
3927 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3929 struct net_device *netdev;
3930 struct ixgbevf_adapter *adapter = NULL;
3931 struct ixgbe_hw *hw = NULL;
3932 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3933 int err, pci_using_dac;
3934 bool disable_dev = false;
3936 err = pci_enable_device(pdev);
3940 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3943 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3945 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
3951 err = pci_request_regions(pdev, ixgbevf_driver_name);
3953 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3957 pci_set_master(pdev);
3959 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3963 goto err_alloc_etherdev;
3966 SET_NETDEV_DEV(netdev, &pdev->dev);
3968 adapter = netdev_priv(netdev);
3970 adapter->netdev = netdev;
3971 adapter->pdev = pdev;
3974 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3976 /* call save state here in standalone driver because it relies on
3977 * adapter struct to exist, and needs to call netdev_priv
3979 pci_save_state(pdev);
3981 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3982 pci_resource_len(pdev, 0));
3983 adapter->io_addr = hw->hw_addr;
3989 ixgbevf_assign_netdev_ops(netdev);
3992 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3993 hw->mac.type = ii->mac;
3995 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3996 sizeof(struct ixgbe_mbx_operations));
3998 /* setup the private structure */
3999 err = ixgbevf_sw_init(adapter);
4003 /* The HW MAC address was set and/or determined in sw_init */
4004 if (!is_valid_ether_addr(netdev->dev_addr)) {
4005 pr_err("invalid MAC address\n");
4010 netdev->hw_features = NETIF_F_SG |
4017 netdev->features = netdev->hw_features |
4018 NETIF_F_HW_VLAN_CTAG_TX |
4019 NETIF_F_HW_VLAN_CTAG_RX |
4020 NETIF_F_HW_VLAN_CTAG_FILTER;
4022 netdev->vlan_features |= NETIF_F_TSO |
4029 netdev->features |= NETIF_F_HIGHDMA;
4031 netdev->priv_flags |= IFF_UNICAST_FLT;
4033 if (IXGBE_REMOVED(hw->hw_addr)) {
4038 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
4039 (unsigned long)adapter);
4041 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4042 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4043 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4045 err = ixgbevf_init_interrupt_scheme(adapter);
4049 strcpy(netdev->name, "eth%d");
4051 err = register_netdev(netdev);
4055 pci_set_drvdata(pdev, netdev);
4056 netif_carrier_off(netdev);
4058 ixgbevf_init_last_counter_stats(adapter);
4060 /* print the VF info */
4061 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4062 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4064 switch (hw->mac.type) {
4065 case ixgbe_mac_X550_vf:
4066 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4068 case ixgbe_mac_X540_vf:
4069 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4071 case ixgbe_mac_82599_vf:
4073 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4080 ixgbevf_clear_interrupt_scheme(adapter);
4082 ixgbevf_reset_interrupt_capability(adapter);
4083 iounmap(adapter->io_addr);
4085 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4086 free_netdev(netdev);
4088 pci_release_regions(pdev);
4091 if (!adapter || disable_dev)
4092 pci_disable_device(pdev);
4097 * ixgbevf_remove - Device Removal Routine
4098 * @pdev: PCI device information struct
4100 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4101 * that it should release a PCI device. The could be caused by a
4102 * Hot-Plug event, or because the driver is going to be removed from
4105 static void ixgbevf_remove(struct pci_dev *pdev)
4107 struct net_device *netdev = pci_get_drvdata(pdev);
4108 struct ixgbevf_adapter *adapter;
4114 adapter = netdev_priv(netdev);
4116 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4117 cancel_work_sync(&adapter->service_task);
4119 if (netdev->reg_state == NETREG_REGISTERED)
4120 unregister_netdev(netdev);
4122 ixgbevf_clear_interrupt_scheme(adapter);
4123 ixgbevf_reset_interrupt_capability(adapter);
4125 iounmap(adapter->io_addr);
4126 pci_release_regions(pdev);
4128 hw_dbg(&adapter->hw, "Remove complete\n");
4130 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4131 free_netdev(netdev);
4134 pci_disable_device(pdev);
4138 * ixgbevf_io_error_detected - called when PCI error is detected
4139 * @pdev: Pointer to PCI device
4140 * @state: The current pci connection state
4142 * This function is called after a PCI bus error affecting
4143 * this device has been detected.
4145 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4146 pci_channel_state_t state)
4148 struct net_device *netdev = pci_get_drvdata(pdev);
4149 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4151 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4152 return PCI_ERS_RESULT_DISCONNECT;
4155 netif_device_detach(netdev);
4157 if (state == pci_channel_io_perm_failure) {
4159 return PCI_ERS_RESULT_DISCONNECT;
4162 if (netif_running(netdev))
4163 ixgbevf_down(adapter);
4165 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4166 pci_disable_device(pdev);
4169 /* Request a slot slot reset. */
4170 return PCI_ERS_RESULT_NEED_RESET;
4174 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4175 * @pdev: Pointer to PCI device
4177 * Restart the card from scratch, as if from a cold-boot. Implementation
4178 * resembles the first-half of the ixgbevf_resume routine.
4180 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4182 struct net_device *netdev = pci_get_drvdata(pdev);
4183 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4185 if (pci_enable_device_mem(pdev)) {
4187 "Cannot re-enable PCI device after reset.\n");
4188 return PCI_ERS_RESULT_DISCONNECT;
4191 smp_mb__before_atomic();
4192 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4193 pci_set_master(pdev);
4195 ixgbevf_reset(adapter);
4197 return PCI_ERS_RESULT_RECOVERED;
4201 * ixgbevf_io_resume - called when traffic can start flowing again.
4202 * @pdev: Pointer to PCI device
4204 * This callback is called when the error recovery driver tells us that
4205 * its OK to resume normal operation. Implementation resembles the
4206 * second-half of the ixgbevf_resume routine.
4208 static void ixgbevf_io_resume(struct pci_dev *pdev)
4210 struct net_device *netdev = pci_get_drvdata(pdev);
4211 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4213 if (netif_running(netdev))
4214 ixgbevf_up(adapter);
4216 netif_device_attach(netdev);
4219 /* PCI Error Recovery (ERS) */
4220 static const struct pci_error_handlers ixgbevf_err_handler = {
4221 .error_detected = ixgbevf_io_error_detected,
4222 .slot_reset = ixgbevf_io_slot_reset,
4223 .resume = ixgbevf_io_resume,
4226 static struct pci_driver ixgbevf_driver = {
4227 .name = ixgbevf_driver_name,
4228 .id_table = ixgbevf_pci_tbl,
4229 .probe = ixgbevf_probe,
4230 .remove = ixgbevf_remove,
4232 /* Power Management Hooks */
4233 .suspend = ixgbevf_suspend,
4234 .resume = ixgbevf_resume,
4236 .shutdown = ixgbevf_shutdown,
4237 .err_handler = &ixgbevf_err_handler
4241 * ixgbevf_init_module - Driver Registration Routine
4243 * ixgbevf_init_module is the first routine called when the driver is
4244 * loaded. All it does is register with the PCI subsystem.
4246 static int __init ixgbevf_init_module(void)
4250 pr_info("%s - version %s\n", ixgbevf_driver_string,
4251 ixgbevf_driver_version);
4253 pr_info("%s\n", ixgbevf_copyright);
4255 ret = pci_register_driver(&ixgbevf_driver);
4259 module_init(ixgbevf_init_module);
4262 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4264 * ixgbevf_exit_module is called just before the driver is removed
4267 static void __exit ixgbevf_exit_module(void)
4269 pci_unregister_driver(&ixgbevf_driver);
4274 * ixgbevf_get_hw_dev_name - return device name string
4275 * used by hardware layer to print debugging information
4277 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4279 struct ixgbevf_adapter *adapter = hw->back;
4281 return adapter->netdev->name;
4285 module_exit(ixgbevf_exit_module);
4287 /* ixgbevf_main.c */