1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *******************************************************************************/
27 /******************************************************************************
28 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
29 ******************************************************************************/
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/types.h>
34 #include <linux/bitops.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/netdevice.h>
38 #include <linux/vmalloc.h>
39 #include <linux/string.h>
42 #include <linux/tcp.h>
43 #include <linux/sctp.h>
44 #include <linux/ipv6.h>
45 #include <linux/slab.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/ethtool.h>
50 #include <linux/if_vlan.h>
51 #include <linux/prefetch.h>
56 const char ixgbevf_driver_name[] = "ixgbevf";
57 static const char ixgbevf_driver_string[] =
58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60 #define DRV_VERSION "4.1.0-k"
61 const char ixgbevf_driver_version[] = DRV_VERSION;
62 static char ixgbevf_copyright[] =
63 "Copyright (c) 2009 - 2015 Intel Corporation.";
65 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
66 [board_82599_vf] = &ixgbevf_82599_vf_info,
67 [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
69 [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
70 [board_X550_vf] = &ixgbevf_X550_vf_info,
71 [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
72 [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
73 [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
74 [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
77 /* ixgbevf_pci_tbl - PCI Device ID Table
79 * Wildcard entries (PCI_ANY_ID) should come last
80 * Last entry must be all 0s
82 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
83 * Class, Class Mask, private data (not used) }
85 static const struct pci_device_id ixgbevf_pci_tbl[] = {
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
95 /* required last entry */
98 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
100 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
101 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
102 MODULE_LICENSE("GPL");
103 MODULE_VERSION(DRV_VERSION);
105 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
106 static int debug = -1;
107 module_param(debug, int, 0);
108 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
110 static struct workqueue_struct *ixgbevf_wq;
112 static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
114 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
115 !test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
116 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
117 queue_work(ixgbevf_wq, &adapter->service_task);
120 static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
122 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state));
124 /* flush memory to make sure state is correct before next watchdog */
125 smp_mb__before_atomic();
126 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
130 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
131 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
132 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
134 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
136 struct ixgbevf_adapter *adapter = hw->back;
141 dev_err(&adapter->pdev->dev, "Adapter removed\n");
142 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
143 ixgbevf_service_event_schedule(adapter);
146 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
150 /* The following check not only optimizes a bit by not
151 * performing a read on the status register when the
152 * register just read was a status register read that
153 * returned IXGBE_FAILED_READ_REG. It also blocks any
154 * potential recursion.
156 if (reg == IXGBE_VFSTATUS) {
157 ixgbevf_remove_adapter(hw);
160 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
161 if (value == IXGBE_FAILED_READ_REG)
162 ixgbevf_remove_adapter(hw);
165 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
167 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
170 if (IXGBE_REMOVED(reg_addr))
171 return IXGBE_FAILED_READ_REG;
172 value = readl(reg_addr + reg);
173 if (unlikely(value == IXGBE_FAILED_READ_REG))
174 ixgbevf_check_remove(hw, reg);
179 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
180 * @adapter: pointer to adapter struct
181 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
182 * @queue: queue to map the corresponding interrupt to
183 * @msix_vector: the vector to map to the corresponding queue
185 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
186 u8 queue, u8 msix_vector)
189 struct ixgbe_hw *hw = &adapter->hw;
191 if (direction == -1) {
193 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
194 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
197 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
199 /* Tx or Rx causes */
200 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
201 index = ((16 * (queue & 1)) + (8 * direction));
202 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
203 ivar &= ~(0xFF << index);
204 ivar |= (msix_vector << index);
205 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
209 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
210 struct ixgbevf_tx_buffer *tx_buffer)
212 if (tx_buffer->skb) {
213 dev_kfree_skb_any(tx_buffer->skb);
214 if (dma_unmap_len(tx_buffer, len))
215 dma_unmap_single(tx_ring->dev,
216 dma_unmap_addr(tx_buffer, dma),
217 dma_unmap_len(tx_buffer, len),
219 } else if (dma_unmap_len(tx_buffer, len)) {
220 dma_unmap_page(tx_ring->dev,
221 dma_unmap_addr(tx_buffer, dma),
222 dma_unmap_len(tx_buffer, len),
225 tx_buffer->next_to_watch = NULL;
226 tx_buffer->skb = NULL;
227 dma_unmap_len_set(tx_buffer, len, 0);
228 /* tx_buffer must be completely set up in the transmit path */
231 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
233 return ring->stats.packets;
236 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring)
238 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
239 struct ixgbe_hw *hw = &adapter->hw;
241 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
242 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
245 return (head < tail) ?
246 tail - head : (tail + ring->count - head);
251 static inline bool ixgbevf_check_tx_hang(struct ixgbevf_ring *tx_ring)
253 u32 tx_done = ixgbevf_get_tx_completed(tx_ring);
254 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
255 u32 tx_pending = ixgbevf_get_tx_pending(tx_ring);
257 clear_check_for_tx_hang(tx_ring);
259 /* Check for a hung queue, but be thorough. This verifies
260 * that a transmit has been completed since the previous
261 * check AND there is at least one packet pending. The
262 * ARMED bit is set to indicate a potential hang.
264 if ((tx_done_old == tx_done) && tx_pending) {
265 /* make sure it is true for two checks in a row */
266 return test_and_set_bit(__IXGBEVF_HANG_CHECK_ARMED,
269 /* reset the countdown */
270 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state);
272 /* update completed stats and continue */
273 tx_ring->tx_stats.tx_done_old = tx_done;
278 static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
280 /* Do the reset outside of interrupt context */
281 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
282 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
283 ixgbevf_service_event_schedule(adapter);
288 * ixgbevf_tx_timeout - Respond to a Tx Hang
289 * @netdev: network interface device structure
291 static void ixgbevf_tx_timeout(struct net_device *netdev)
293 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
295 ixgbevf_tx_timeout_reset(adapter);
299 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
300 * @q_vector: board private structure
301 * @tx_ring: tx ring to clean
302 * @napi_budget: Used to determine if we are in netpoll
304 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
305 struct ixgbevf_ring *tx_ring, int napi_budget)
307 struct ixgbevf_adapter *adapter = q_vector->adapter;
308 struct ixgbevf_tx_buffer *tx_buffer;
309 union ixgbe_adv_tx_desc *tx_desc;
310 unsigned int total_bytes = 0, total_packets = 0;
311 unsigned int budget = tx_ring->count / 2;
312 unsigned int i = tx_ring->next_to_clean;
314 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
317 tx_buffer = &tx_ring->tx_buffer_info[i];
318 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
322 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
324 /* if next_to_watch is not set then there is no work pending */
328 /* prevent any other reads prior to eop_desc */
331 /* if DD is not set pending work has not been completed */
332 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
335 /* clear next_to_watch to prevent false hangs */
336 tx_buffer->next_to_watch = NULL;
338 /* update the statistics for this packet */
339 total_bytes += tx_buffer->bytecount;
340 total_packets += tx_buffer->gso_segs;
343 napi_consume_skb(tx_buffer->skb, napi_budget);
345 /* unmap skb header data */
346 dma_unmap_single(tx_ring->dev,
347 dma_unmap_addr(tx_buffer, dma),
348 dma_unmap_len(tx_buffer, len),
351 /* clear tx_buffer data */
352 tx_buffer->skb = NULL;
353 dma_unmap_len_set(tx_buffer, len, 0);
355 /* unmap remaining buffers */
356 while (tx_desc != eop_desc) {
362 tx_buffer = tx_ring->tx_buffer_info;
363 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
366 /* unmap any remaining paged data */
367 if (dma_unmap_len(tx_buffer, len)) {
368 dma_unmap_page(tx_ring->dev,
369 dma_unmap_addr(tx_buffer, dma),
370 dma_unmap_len(tx_buffer, len),
372 dma_unmap_len_set(tx_buffer, len, 0);
376 /* move us one more past the eop_desc for start of next pkt */
382 tx_buffer = tx_ring->tx_buffer_info;
383 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
386 /* issue prefetch for next Tx descriptor */
389 /* update budget accounting */
391 } while (likely(budget));
394 tx_ring->next_to_clean = i;
395 u64_stats_update_begin(&tx_ring->syncp);
396 tx_ring->stats.bytes += total_bytes;
397 tx_ring->stats.packets += total_packets;
398 u64_stats_update_end(&tx_ring->syncp);
399 q_vector->tx.total_bytes += total_bytes;
400 q_vector->tx.total_packets += total_packets;
402 if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) {
403 struct ixgbe_hw *hw = &adapter->hw;
404 union ixgbe_adv_tx_desc *eop_desc;
406 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch;
408 pr_err("Detected Tx Unit Hang\n"
410 " TDH, TDT <%x>, <%x>\n"
411 " next_to_use <%x>\n"
412 " next_to_clean <%x>\n"
413 "tx_buffer_info[next_to_clean]\n"
414 " next_to_watch <%p>\n"
415 " eop_desc->wb.status <%x>\n"
416 " time_stamp <%lx>\n"
418 tx_ring->queue_index,
419 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)),
420 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)),
421 tx_ring->next_to_use, i,
422 eop_desc, (eop_desc ? eop_desc->wb.status : 0),
423 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
425 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
427 /* schedule immediate reset if we believe we hung */
428 ixgbevf_tx_timeout_reset(adapter);
433 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
434 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
435 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
436 /* Make sure that anybody stopping the queue after this
437 * sees the new next_to_clean.
441 if (__netif_subqueue_stopped(tx_ring->netdev,
442 tx_ring->queue_index) &&
443 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
444 netif_wake_subqueue(tx_ring->netdev,
445 tx_ring->queue_index);
446 ++tx_ring->tx_stats.restart_queue;
454 * ixgbevf_rx_skb - Helper function to determine proper Rx method
455 * @q_vector: structure containing interrupt and ring information
456 * @skb: packet to send up
458 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
461 napi_gro_receive(&q_vector->napi, skb);
464 #define IXGBE_RSS_L4_TYPES_MASK \
465 ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
466 (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
467 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
468 (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
470 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
471 union ixgbe_adv_rx_desc *rx_desc,
476 if (!(ring->netdev->features & NETIF_F_RXHASH))
479 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
480 IXGBE_RXDADV_RSSTYPE_MASK;
485 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
486 (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
487 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
491 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
492 * @ring: structure containig ring specific data
493 * @rx_desc: current Rx descriptor being processed
494 * @skb: skb currently being received and modified
496 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
497 union ixgbe_adv_rx_desc *rx_desc,
500 skb_checksum_none_assert(skb);
502 /* Rx csum disabled */
503 if (!(ring->netdev->features & NETIF_F_RXCSUM))
506 /* if IP and error */
507 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
508 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
509 ring->rx_stats.csum_err++;
513 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
516 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
517 ring->rx_stats.csum_err++;
521 /* It must be a TCP or UDP packet with a valid checksum */
522 skb->ip_summed = CHECKSUM_UNNECESSARY;
526 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
527 * @rx_ring: rx descriptor ring packet is being transacted on
528 * @rx_desc: pointer to the EOP Rx descriptor
529 * @skb: pointer to current skb being populated
531 * This function checks the ring, descriptor, and packet information in
532 * order to populate the checksum, VLAN, protocol, and other fields within
535 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
536 union ixgbe_adv_rx_desc *rx_desc,
539 ixgbevf_rx_hash(rx_ring, rx_desc, skb);
540 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
542 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
543 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
544 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
546 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
547 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
550 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
554 * ixgbevf_is_non_eop - process handling of non-EOP buffers
555 * @rx_ring: Rx ring being processed
556 * @rx_desc: Rx descriptor for current buffer
557 * @skb: current socket buffer containing buffer in progress
559 * This function updates next to clean. If the buffer is an EOP buffer
560 * this function exits returning false, otherwise it will place the
561 * sk_buff in the next buffer to be chained and return true indicating
562 * that this is in fact a non-EOP buffer.
564 static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
565 union ixgbe_adv_rx_desc *rx_desc)
567 u32 ntc = rx_ring->next_to_clean + 1;
569 /* fetch, update, and store next to clean */
570 ntc = (ntc < rx_ring->count) ? ntc : 0;
571 rx_ring->next_to_clean = ntc;
573 prefetch(IXGBEVF_RX_DESC(rx_ring, ntc));
575 if (likely(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
581 static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
582 struct ixgbevf_rx_buffer *bi)
584 struct page *page = bi->page;
585 dma_addr_t dma = bi->dma;
587 /* since we are recycling buffers we should seldom need to alloc */
591 /* alloc new page for storage */
592 page = dev_alloc_page();
593 if (unlikely(!page)) {
594 rx_ring->rx_stats.alloc_rx_page_failed++;
598 /* map page for use */
599 dma = dma_map_page(rx_ring->dev, page, 0,
600 PAGE_SIZE, DMA_FROM_DEVICE);
602 /* if mapping failed free memory back to system since
603 * there isn't much point in holding memory we can't use
605 if (dma_mapping_error(rx_ring->dev, dma)) {
608 rx_ring->rx_stats.alloc_rx_buff_failed++;
620 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
621 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
622 * @cleaned_count: number of buffers to replace
624 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
627 union ixgbe_adv_rx_desc *rx_desc;
628 struct ixgbevf_rx_buffer *bi;
629 unsigned int i = rx_ring->next_to_use;
631 /* nothing to do or no valid netdev defined */
632 if (!cleaned_count || !rx_ring->netdev)
635 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
636 bi = &rx_ring->rx_buffer_info[i];
640 if (!ixgbevf_alloc_mapped_page(rx_ring, bi))
643 /* Refresh the desc even if pkt_addr didn't change
644 * because each write-back erases this info.
646 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
652 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
653 bi = rx_ring->rx_buffer_info;
657 /* clear the hdr_addr for the next_to_use descriptor */
658 rx_desc->read.hdr_addr = 0;
661 } while (cleaned_count);
665 if (rx_ring->next_to_use != i) {
666 /* record the next descriptor to use */
667 rx_ring->next_to_use = i;
669 /* update next to alloc since we have filled the ring */
670 rx_ring->next_to_alloc = i;
672 /* Force memory writes to complete before letting h/w
673 * know there are new descriptors to fetch. (Only
674 * applicable for weak-ordered memory model archs,
678 ixgbevf_write_tail(rx_ring, i);
683 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
684 * @rx_ring: rx descriptor ring packet is being transacted on
685 * @rx_desc: pointer to the EOP Rx descriptor
686 * @skb: pointer to current skb being fixed
688 * Check for corrupted packet headers caused by senders on the local L2
689 * embedded NIC switch not setting up their Tx Descriptors right. These
690 * should be very rare.
692 * Also address the case where we are pulling data in on pages only
693 * and as such no data is present in the skb header.
695 * In addition if skb is not at least 60 bytes we need to pad it so that
696 * it is large enough to qualify as a valid Ethernet frame.
698 * Returns true if an error was encountered and skb was freed.
700 static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
701 union ixgbe_adv_rx_desc *rx_desc,
704 /* verify that the packet does not have any known errors */
705 if (unlikely(ixgbevf_test_staterr(rx_desc,
706 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
707 struct net_device *netdev = rx_ring->netdev;
709 if (!(netdev->features & NETIF_F_RXALL)) {
710 dev_kfree_skb_any(skb);
715 /* if eth_skb_pad returns an error the skb was freed */
716 if (eth_skb_pad(skb))
723 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
724 * @rx_ring: rx descriptor ring to store buffers on
725 * @old_buff: donor buffer to have page reused
727 * Synchronizes page for reuse by the adapter
729 static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
730 struct ixgbevf_rx_buffer *old_buff)
732 struct ixgbevf_rx_buffer *new_buff;
733 u16 nta = rx_ring->next_to_alloc;
735 new_buff = &rx_ring->rx_buffer_info[nta];
737 /* update, and store next to alloc */
739 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
741 /* transfer page from old buffer to new buffer */
742 new_buff->page = old_buff->page;
743 new_buff->dma = old_buff->dma;
744 new_buff->page_offset = old_buff->page_offset;
746 /* sync the buffer for use by the device */
747 dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
748 new_buff->page_offset,
753 static inline bool ixgbevf_page_is_reserved(struct page *page)
755 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
759 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
760 * @rx_ring: rx descriptor ring to transact packets on
761 * @rx_buffer: buffer containing page to add
762 * @rx_desc: descriptor containing length of buffer written by hardware
763 * @skb: sk_buff to place the data into
765 * This function will add the data contained in rx_buffer->page to the skb.
766 * This is done either through a direct copy if the data in the buffer is
767 * less than the skb header size, otherwise it will just attach the page as
770 * The function will then update the page offset if necessary and return
771 * true if the buffer can be reused by the adapter.
773 static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
774 struct ixgbevf_rx_buffer *rx_buffer,
775 union ixgbe_adv_rx_desc *rx_desc,
778 struct page *page = rx_buffer->page;
779 unsigned char *va = page_address(page) + rx_buffer->page_offset;
780 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
781 #if (PAGE_SIZE < 8192)
782 unsigned int truesize = IXGBEVF_RX_BUFSZ;
784 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
786 unsigned int pull_len;
788 if (unlikely(skb_is_nonlinear(skb)))
791 if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
792 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
794 /* page is not reserved, we can reuse buffer as is */
795 if (likely(!ixgbevf_page_is_reserved(page)))
798 /* this page cannot be reused so discard it */
803 /* we need the header to contain the greater of either ETH_HLEN or
804 * 60 bytes if the skb->len is less than 60 for skb_pad.
806 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
808 /* align pull length to size of long to optimize memcpy performance */
809 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
811 /* update all of the pointers */
816 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
817 (unsigned long)va & ~PAGE_MASK, size, truesize);
819 /* avoid re-using remote pages */
820 if (unlikely(ixgbevf_page_is_reserved(page)))
823 #if (PAGE_SIZE < 8192)
824 /* if we are only owner of page we can reuse it */
825 if (unlikely(page_count(page) != 1))
828 /* flip page offset to other buffer */
829 rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
832 /* move offset up to the next cache line */
833 rx_buffer->page_offset += truesize;
835 if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
839 /* Even if we own the page, we are not allowed to use atomic_set()
840 * This would break get_page_unless_zero() users.
847 static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
848 union ixgbe_adv_rx_desc *rx_desc,
851 struct ixgbevf_rx_buffer *rx_buffer;
854 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
855 page = rx_buffer->page;
859 void *page_addr = page_address(page) +
860 rx_buffer->page_offset;
862 /* prefetch first cache line of first page */
864 #if L1_CACHE_BYTES < 128
865 prefetch(page_addr + L1_CACHE_BYTES);
868 /* allocate a skb to store the frags */
869 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
870 IXGBEVF_RX_HDR_SIZE);
871 if (unlikely(!skb)) {
872 rx_ring->rx_stats.alloc_rx_buff_failed++;
876 /* we will be copying header into skb->data in
877 * pskb_may_pull so it is in our interest to prefetch
878 * it now to avoid a possible cache miss
880 prefetchw(skb->data);
883 /* we are reusing so sync this buffer for CPU use */
884 dma_sync_single_range_for_cpu(rx_ring->dev,
886 rx_buffer->page_offset,
890 /* pull page into skb */
891 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
892 /* hand second half of page back to the ring */
893 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
895 /* we are not reusing the buffer so unmap it */
896 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
897 PAGE_SIZE, DMA_FROM_DEVICE);
900 /* clear contents of buffer_info */
902 rx_buffer->page = NULL;
907 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
910 struct ixgbe_hw *hw = &adapter->hw;
912 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
915 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
916 struct ixgbevf_ring *rx_ring,
919 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
920 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
921 struct sk_buff *skb = rx_ring->skb;
923 while (likely(total_rx_packets < budget)) {
924 union ixgbe_adv_rx_desc *rx_desc;
926 /* return some buffers to hardware, one at a time is too slow */
927 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
928 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
932 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
934 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD))
937 /* This memory barrier is needed to keep us from reading
938 * any other fields out of the rx_desc until we know the
939 * RXD_STAT_DD bit is set
943 /* retrieve a buffer from the ring */
944 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
946 /* exit if we failed to retrieve a buffer */
952 /* fetch next buffer in frame if non-eop */
953 if (ixgbevf_is_non_eop(rx_ring, rx_desc))
956 /* verify the packet layout is correct */
957 if (ixgbevf_cleanup_headers(rx_ring, rx_desc, skb)) {
962 /* probably a little skewed due to removing CRC */
963 total_rx_bytes += skb->len;
965 /* Workaround hardware that can't do proper VEPA multicast
968 if ((skb->pkt_type == PACKET_BROADCAST ||
969 skb->pkt_type == PACKET_MULTICAST) &&
970 ether_addr_equal(rx_ring->netdev->dev_addr,
971 eth_hdr(skb)->h_source)) {
972 dev_kfree_skb_irq(skb);
976 /* populate checksum, VLAN, and protocol */
977 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
979 ixgbevf_rx_skb(q_vector, skb);
981 /* reset skb pointer */
984 /* update budget accounting */
988 /* place incomplete frames back on ring for completion */
991 u64_stats_update_begin(&rx_ring->syncp);
992 rx_ring->stats.packets += total_rx_packets;
993 rx_ring->stats.bytes += total_rx_bytes;
994 u64_stats_update_end(&rx_ring->syncp);
995 q_vector->rx.total_packets += total_rx_packets;
996 q_vector->rx.total_bytes += total_rx_bytes;
998 return total_rx_packets;
1002 * ixgbevf_poll - NAPI polling calback
1003 * @napi: napi struct with our devices info in it
1004 * @budget: amount of work driver is allowed to do this pass, in packets
1006 * This function will clean more than one or more rings associated with a
1009 static int ixgbevf_poll(struct napi_struct *napi, int budget)
1011 struct ixgbevf_q_vector *q_vector =
1012 container_of(napi, struct ixgbevf_q_vector, napi);
1013 struct ixgbevf_adapter *adapter = q_vector->adapter;
1014 struct ixgbevf_ring *ring;
1015 int per_ring_budget, work_done = 0;
1016 bool clean_complete = true;
1018 ixgbevf_for_each_ring(ring, q_vector->tx) {
1019 if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
1020 clean_complete = false;
1026 /* attempt to distribute budget to each queue fairly, but don't allow
1027 * the budget to go below 1 because we'll exit polling
1029 if (q_vector->rx.count > 1)
1030 per_ring_budget = max(budget/q_vector->rx.count, 1);
1032 per_ring_budget = budget;
1034 ixgbevf_for_each_ring(ring, q_vector->rx) {
1035 int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
1037 work_done += cleaned;
1038 if (cleaned >= per_ring_budget)
1039 clean_complete = false;
1042 /* If all work not completed, return budget and keep polling */
1043 if (!clean_complete)
1045 /* all work done, exit the polling mode */
1046 napi_complete_done(napi, work_done);
1047 if (adapter->rx_itr_setting == 1)
1048 ixgbevf_set_itr(q_vector);
1049 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
1050 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
1051 ixgbevf_irq_enable_queues(adapter,
1052 BIT(q_vector->v_idx));
1058 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1059 * @q_vector: structure containing interrupt and ring information
1061 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
1063 struct ixgbevf_adapter *adapter = q_vector->adapter;
1064 struct ixgbe_hw *hw = &adapter->hw;
1065 int v_idx = q_vector->v_idx;
1066 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
1068 /* set the WDIS bit to not clear the timer bits and cause an
1069 * immediate assertion of the interrupt
1071 itr_reg |= IXGBE_EITR_CNT_WDIS;
1073 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
1077 * ixgbevf_configure_msix - Configure MSI-X hardware
1078 * @adapter: board private structure
1080 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1083 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
1085 struct ixgbevf_q_vector *q_vector;
1086 int q_vectors, v_idx;
1088 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1089 adapter->eims_enable_mask = 0;
1091 /* Populate the IVAR table and set the ITR values to the
1092 * corresponding register.
1094 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1095 struct ixgbevf_ring *ring;
1097 q_vector = adapter->q_vector[v_idx];
1099 ixgbevf_for_each_ring(ring, q_vector->rx)
1100 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1102 ixgbevf_for_each_ring(ring, q_vector->tx)
1103 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1105 if (q_vector->tx.ring && !q_vector->rx.ring) {
1106 /* Tx only vector */
1107 if (adapter->tx_itr_setting == 1)
1108 q_vector->itr = IXGBE_12K_ITR;
1110 q_vector->itr = adapter->tx_itr_setting;
1112 /* Rx or Rx/Tx vector */
1113 if (adapter->rx_itr_setting == 1)
1114 q_vector->itr = IXGBE_20K_ITR;
1116 q_vector->itr = adapter->rx_itr_setting;
1119 /* add q_vector eims value to global eims_enable_mask */
1120 adapter->eims_enable_mask |= BIT(v_idx);
1122 ixgbevf_write_eitr(q_vector);
1125 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
1126 /* setup eims_other and add value to global eims_enable_mask */
1127 adapter->eims_other = BIT(v_idx);
1128 adapter->eims_enable_mask |= adapter->eims_other;
1131 enum latency_range {
1135 latency_invalid = 255
1139 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1140 * @q_vector: structure containing interrupt and ring information
1141 * @ring_container: structure containing ring performance data
1143 * Stores a new ITR value based on packets and byte
1144 * counts during the last interrupt. The advantage of per interrupt
1145 * computation is faster updates and more accurate ITR for the current
1146 * traffic pattern. Constants in this function were computed
1147 * based on theoretical maximum wire speed and thresholds were set based
1148 * on testing data as well as attempting to minimize response time
1149 * while increasing bulk throughput.
1151 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
1152 struct ixgbevf_ring_container *ring_container)
1154 int bytes = ring_container->total_bytes;
1155 int packets = ring_container->total_packets;
1158 u8 itr_setting = ring_container->itr;
1163 /* simple throttle rate management
1164 * 0-20MB/s lowest (100000 ints/s)
1165 * 20-100MB/s low (20000 ints/s)
1166 * 100-1249MB/s bulk (12000 ints/s)
1168 /* what was last interrupt timeslice? */
1169 timepassed_us = q_vector->itr >> 2;
1170 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1172 switch (itr_setting) {
1173 case lowest_latency:
1174 if (bytes_perint > 10)
1175 itr_setting = low_latency;
1178 if (bytes_perint > 20)
1179 itr_setting = bulk_latency;
1180 else if (bytes_perint <= 10)
1181 itr_setting = lowest_latency;
1184 if (bytes_perint <= 20)
1185 itr_setting = low_latency;
1189 /* clear work counters since we have the values we need */
1190 ring_container->total_bytes = 0;
1191 ring_container->total_packets = 0;
1193 /* write updated itr to ring container */
1194 ring_container->itr = itr_setting;
1197 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
1199 u32 new_itr = q_vector->itr;
1202 ixgbevf_update_itr(q_vector, &q_vector->tx);
1203 ixgbevf_update_itr(q_vector, &q_vector->rx);
1205 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1207 switch (current_itr) {
1208 /* counts and packets in update_itr are dependent on these numbers */
1209 case lowest_latency:
1210 new_itr = IXGBE_100K_ITR;
1213 new_itr = IXGBE_20K_ITR;
1216 new_itr = IXGBE_12K_ITR;
1222 if (new_itr != q_vector->itr) {
1223 /* do an exponential smoothing */
1224 new_itr = (10 * new_itr * q_vector->itr) /
1225 ((9 * new_itr) + q_vector->itr);
1227 /* save the algorithm value here */
1228 q_vector->itr = new_itr;
1230 ixgbevf_write_eitr(q_vector);
1234 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
1236 struct ixgbevf_adapter *adapter = data;
1237 struct ixgbe_hw *hw = &adapter->hw;
1239 hw->mac.get_link_status = 1;
1241 ixgbevf_service_event_schedule(adapter);
1243 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
1249 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1251 * @data: pointer to our q_vector struct for this interrupt vector
1253 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
1255 struct ixgbevf_q_vector *q_vector = data;
1257 /* EIAM disabled interrupts (on this vector) for us */
1258 if (q_vector->rx.ring || q_vector->tx.ring)
1259 napi_schedule_irqoff(&q_vector->napi);
1264 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1267 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1269 a->rx_ring[r_idx]->next = q_vector->rx.ring;
1270 q_vector->rx.ring = a->rx_ring[r_idx];
1271 q_vector->rx.count++;
1274 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1277 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1279 a->tx_ring[t_idx]->next = q_vector->tx.ring;
1280 q_vector->tx.ring = a->tx_ring[t_idx];
1281 q_vector->tx.count++;
1285 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1286 * @adapter: board private structure to initialize
1288 * This function maps descriptor rings to the queue-specific vectors
1289 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1290 * one vector per ring/queue, but on a constrained vector budget, we
1291 * group the rings as "efficiently" as possible. You would add new
1292 * mapping configurations in here.
1294 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1298 int rxr_idx = 0, txr_idx = 0;
1299 int rxr_remaining = adapter->num_rx_queues;
1300 int txr_remaining = adapter->num_tx_queues;
1304 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1306 /* The ideal configuration...
1307 * We have enough vectors to map one per queue.
1309 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1310 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1311 map_vector_to_rxq(adapter, v_start, rxr_idx);
1313 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1314 map_vector_to_txq(adapter, v_start, txr_idx);
1318 /* If we don't have enough vectors for a 1-to-1
1319 * mapping, we'll have to group them so there are
1320 * multiple queues per vector.
1322 /* Re-adjusting *qpv takes care of the remainder. */
1323 for (i = v_start; i < q_vectors; i++) {
1324 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1325 for (j = 0; j < rqpv; j++) {
1326 map_vector_to_rxq(adapter, i, rxr_idx);
1331 for (i = v_start; i < q_vectors; i++) {
1332 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1333 for (j = 0; j < tqpv; j++) {
1334 map_vector_to_txq(adapter, i, txr_idx);
1344 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1345 * @adapter: board private structure
1347 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1348 * interrupts from the kernel.
1350 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1352 struct net_device *netdev = adapter->netdev;
1353 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1354 unsigned int ri = 0, ti = 0;
1357 for (vector = 0; vector < q_vectors; vector++) {
1358 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1359 struct msix_entry *entry = &adapter->msix_entries[vector];
1361 if (q_vector->tx.ring && q_vector->rx.ring) {
1362 snprintf(q_vector->name, sizeof(q_vector->name),
1363 "%s-TxRx-%u", netdev->name, ri++);
1365 } else if (q_vector->rx.ring) {
1366 snprintf(q_vector->name, sizeof(q_vector->name),
1367 "%s-rx-%u", netdev->name, ri++);
1368 } else if (q_vector->tx.ring) {
1369 snprintf(q_vector->name, sizeof(q_vector->name),
1370 "%s-tx-%u", netdev->name, ti++);
1372 /* skip this unused q_vector */
1375 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1376 q_vector->name, q_vector);
1378 hw_dbg(&adapter->hw,
1379 "request_irq failed for MSIX interrupt Error: %d\n",
1381 goto free_queue_irqs;
1385 err = request_irq(adapter->msix_entries[vector].vector,
1386 &ixgbevf_msix_other, 0, netdev->name, adapter);
1388 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
1390 goto free_queue_irqs;
1398 free_irq(adapter->msix_entries[vector].vector,
1399 adapter->q_vector[vector]);
1401 /* This failure is non-recoverable - it indicates the system is
1402 * out of MSIX vector resources and the VF driver cannot run
1403 * without them. Set the number of msix vectors to zero
1404 * indicating that not enough can be allocated. The error
1405 * will be returned to the user indicating device open failed.
1406 * Any further attempts to force the driver to open will also
1407 * fail. The only way to recover is to unload the driver and
1408 * reload it again. If the system has recovered some MSIX
1409 * vectors then it may succeed.
1411 adapter->num_msix_vectors = 0;
1415 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1417 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1419 for (i = 0; i < q_vectors; i++) {
1420 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1422 q_vector->rx.ring = NULL;
1423 q_vector->tx.ring = NULL;
1424 q_vector->rx.count = 0;
1425 q_vector->tx.count = 0;
1430 * ixgbevf_request_irq - initialize interrupts
1431 * @adapter: board private structure
1433 * Attempts to configure interrupts using the best available
1434 * capabilities of the hardware and kernel.
1436 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1438 int err = ixgbevf_request_msix_irqs(adapter);
1441 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
1446 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1450 if (!adapter->msix_entries)
1453 q_vectors = adapter->num_msix_vectors;
1456 free_irq(adapter->msix_entries[i].vector, adapter);
1459 for (; i >= 0; i--) {
1460 /* free only the irqs that were actually requested */
1461 if (!adapter->q_vector[i]->rx.ring &&
1462 !adapter->q_vector[i]->tx.ring)
1465 free_irq(adapter->msix_entries[i].vector,
1466 adapter->q_vector[i]);
1469 ixgbevf_reset_q_vectors(adapter);
1473 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1474 * @adapter: board private structure
1476 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1478 struct ixgbe_hw *hw = &adapter->hw;
1481 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1482 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1483 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1485 IXGBE_WRITE_FLUSH(hw);
1487 for (i = 0; i < adapter->num_msix_vectors; i++)
1488 synchronize_irq(adapter->msix_entries[i].vector);
1492 * ixgbevf_irq_enable - Enable default interrupt generation settings
1493 * @adapter: board private structure
1495 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1497 struct ixgbe_hw *hw = &adapter->hw;
1499 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1500 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1501 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1505 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1506 * @adapter: board private structure
1507 * @ring: structure containing ring specific data
1509 * Configure the Tx descriptor ring after a reset.
1511 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1512 struct ixgbevf_ring *ring)
1514 struct ixgbe_hw *hw = &adapter->hw;
1515 u64 tdba = ring->dma;
1517 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1518 u8 reg_idx = ring->reg_idx;
1520 /* disable queue to avoid issues while updating state */
1521 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1522 IXGBE_WRITE_FLUSH(hw);
1524 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1525 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1526 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1527 ring->count * sizeof(union ixgbe_adv_tx_desc));
1529 /* disable head writeback */
1530 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1531 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1533 /* enable relaxed ordering */
1534 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1535 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1536 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1538 /* reset head and tail pointers */
1539 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1540 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1541 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1543 /* reset ntu and ntc to place SW in sync with hardwdare */
1544 ring->next_to_clean = 0;
1545 ring->next_to_use = 0;
1547 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1548 * to or less than the number of on chip descriptors, which is
1551 txdctl |= (8 << 16); /* WTHRESH = 8 */
1553 /* Setting PTHRESH to 32 both improves performance */
1554 txdctl |= (1u << 8) | /* HTHRESH = 1 */
1555 32; /* PTHRESH = 32 */
1557 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1559 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1561 /* poll to verify queue is enabled */
1563 usleep_range(1000, 2000);
1564 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1565 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1567 hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx);
1571 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1572 * @adapter: board private structure
1574 * Configure the Tx unit of the MAC after a reset.
1576 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1580 /* Setup the HW Tx Head and Tail descriptor pointers */
1581 for (i = 0; i < adapter->num_tx_queues; i++)
1582 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1585 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1587 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1589 struct ixgbe_hw *hw = &adapter->hw;
1592 srrctl = IXGBE_SRRCTL_DROP_EN;
1594 srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
1595 srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1596 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1598 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1601 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1603 struct ixgbe_hw *hw = &adapter->hw;
1605 /* PSRTYPE must be initialized in 82599 */
1606 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1607 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1608 IXGBE_PSRTYPE_L2HDR;
1610 if (adapter->num_rx_queues > 1)
1613 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1616 #define IXGBEVF_MAX_RX_DESC_POLL 10
1617 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1618 struct ixgbevf_ring *ring)
1620 struct ixgbe_hw *hw = &adapter->hw;
1621 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1623 u8 reg_idx = ring->reg_idx;
1625 if (IXGBE_REMOVED(hw->hw_addr))
1627 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1628 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1630 /* write value back with RXDCTL.ENABLE bit cleared */
1631 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1633 /* the hardware may take up to 100us to really disable the Rx queue */
1636 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1637 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1640 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1644 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1645 struct ixgbevf_ring *ring)
1647 struct ixgbe_hw *hw = &adapter->hw;
1648 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1650 u8 reg_idx = ring->reg_idx;
1652 if (IXGBE_REMOVED(hw->hw_addr))
1655 usleep_range(1000, 2000);
1656 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1657 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1660 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1665 * ixgbevf_init_rss_key - Initialize adapter RSS key
1666 * @adapter: device handle
1668 * Allocates and initializes the RSS key if it is not allocated.
1670 static inline int ixgbevf_init_rss_key(struct ixgbevf_adapter *adapter)
1674 if (!adapter->rss_key) {
1675 rss_key = kzalloc(IXGBEVF_RSS_HASH_KEY_SIZE, GFP_KERNEL);
1676 if (unlikely(!rss_key))
1679 netdev_rss_key_fill(rss_key, IXGBEVF_RSS_HASH_KEY_SIZE);
1680 adapter->rss_key = rss_key;
1686 static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
1688 struct ixgbe_hw *hw = &adapter->hw;
1689 u32 vfmrqc = 0, vfreta = 0;
1690 u16 rss_i = adapter->num_rx_queues;
1693 /* Fill out hash function seeds */
1694 for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
1695 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i));
1697 for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
1701 adapter->rss_indir_tbl[i] = j;
1703 vfreta |= j << (i & 0x3) * 8;
1705 IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
1710 /* Perform hash on these packet types */
1711 vfmrqc |= IXGBE_VFMRQC_RSS_FIELD_IPV4 |
1712 IXGBE_VFMRQC_RSS_FIELD_IPV4_TCP |
1713 IXGBE_VFMRQC_RSS_FIELD_IPV6 |
1714 IXGBE_VFMRQC_RSS_FIELD_IPV6_TCP;
1716 vfmrqc |= IXGBE_VFMRQC_RSSEN;
1718 IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, vfmrqc);
1721 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1722 struct ixgbevf_ring *ring)
1724 struct ixgbe_hw *hw = &adapter->hw;
1725 u64 rdba = ring->dma;
1727 u8 reg_idx = ring->reg_idx;
1729 /* disable queue to avoid issues while updating state */
1730 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1731 ixgbevf_disable_rx_queue(adapter, ring);
1733 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1734 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1735 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1736 ring->count * sizeof(union ixgbe_adv_rx_desc));
1738 #ifndef CONFIG_SPARC
1739 /* enable relaxed ordering */
1740 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1741 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1743 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1744 IXGBE_DCA_RXCTRL_DESC_RRO_EN |
1745 IXGBE_DCA_RXCTRL_DATA_WRO_EN);
1748 /* reset head and tail pointers */
1749 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1750 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1751 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1753 /* reset ntu and ntc to place SW in sync with hardwdare */
1754 ring->next_to_clean = 0;
1755 ring->next_to_use = 0;
1756 ring->next_to_alloc = 0;
1758 ixgbevf_configure_srrctl(adapter, reg_idx);
1760 /* allow any size packet since we can handle overflow */
1761 rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
1763 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1764 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1766 ixgbevf_rx_desc_queue_enable(adapter, ring);
1767 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1771 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1772 * @adapter: board private structure
1774 * Configure the Rx unit of the MAC after a reset.
1776 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1778 struct ixgbe_hw *hw = &adapter->hw;
1779 struct net_device *netdev = adapter->netdev;
1782 ixgbevf_setup_psrtype(adapter);
1783 if (hw->mac.type >= ixgbe_mac_X550_vf)
1784 ixgbevf_setup_vfmrqc(adapter);
1786 spin_lock_bh(&adapter->mbx_lock);
1787 /* notify the PF of our intent to use this size of frame */
1788 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
1789 spin_unlock_bh(&adapter->mbx_lock);
1791 dev_err(&adapter->pdev->dev,
1792 "Failed to set MTU at %d\n", netdev->mtu);
1794 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1795 * the Base and Length of the Rx Descriptor Ring
1797 for (i = 0; i < adapter->num_rx_queues; i++)
1798 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1801 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1802 __be16 proto, u16 vid)
1804 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1805 struct ixgbe_hw *hw = &adapter->hw;
1808 spin_lock_bh(&adapter->mbx_lock);
1810 /* add VID to filter table */
1811 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1813 spin_unlock_bh(&adapter->mbx_lock);
1815 /* translate error return types so error makes sense */
1816 if (err == IXGBE_ERR_MBX)
1819 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1822 set_bit(vid, adapter->active_vlans);
1827 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1828 __be16 proto, u16 vid)
1830 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1831 struct ixgbe_hw *hw = &adapter->hw;
1834 spin_lock_bh(&adapter->mbx_lock);
1836 /* remove VID from filter table */
1837 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1839 spin_unlock_bh(&adapter->mbx_lock);
1841 clear_bit(vid, adapter->active_vlans);
1846 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1850 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1851 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1852 htons(ETH_P_8021Q), vid);
1855 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1857 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1858 struct ixgbe_hw *hw = &adapter->hw;
1861 if (!netdev_uc_empty(netdev)) {
1862 struct netdev_hw_addr *ha;
1864 netdev_for_each_uc_addr(ha, netdev) {
1865 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1869 /* If the list is empty then send message to PF driver to
1870 * clear all MAC VLANs on this VF.
1872 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1879 * ixgbevf_set_rx_mode - Multicast and unicast set
1880 * @netdev: network interface device structure
1882 * The set_rx_method entry point is called whenever the multicast address
1883 * list, unicast address list or the network interface flags are updated.
1884 * This routine is responsible for configuring the hardware for proper
1885 * multicast mode and configuring requested unicast filters.
1887 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1889 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1890 struct ixgbe_hw *hw = &adapter->hw;
1891 unsigned int flags = netdev->flags;
1894 xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI :
1895 (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
1896 IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
1898 /* request the most inclusive mode we need */
1899 if (flags & IFF_PROMISC)
1900 xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
1901 else if (flags & IFF_ALLMULTI)
1902 xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
1903 else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
1904 xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
1906 xcast_mode = IXGBEVF_XCAST_MODE_NONE;
1908 spin_lock_bh(&adapter->mbx_lock);
1910 hw->mac.ops.update_xcast_mode(hw, xcast_mode);
1912 /* reprogram multicast list */
1913 hw->mac.ops.update_mc_addr_list(hw, netdev);
1915 ixgbevf_write_uc_addr_list(netdev);
1917 spin_unlock_bh(&adapter->mbx_lock);
1920 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1923 struct ixgbevf_q_vector *q_vector;
1924 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1926 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1927 q_vector = adapter->q_vector[q_idx];
1928 napi_enable(&q_vector->napi);
1932 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1935 struct ixgbevf_q_vector *q_vector;
1936 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1938 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1939 q_vector = adapter->q_vector[q_idx];
1940 napi_disable(&q_vector->napi);
1944 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1946 struct ixgbe_hw *hw = &adapter->hw;
1947 unsigned int def_q = 0;
1948 unsigned int num_tcs = 0;
1949 unsigned int num_rx_queues = adapter->num_rx_queues;
1950 unsigned int num_tx_queues = adapter->num_tx_queues;
1953 spin_lock_bh(&adapter->mbx_lock);
1955 /* fetch queue configuration from the PF */
1956 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1958 spin_unlock_bh(&adapter->mbx_lock);
1964 /* we need only one Tx queue */
1967 /* update default Tx ring register index */
1968 adapter->tx_ring[0]->reg_idx = def_q;
1970 /* we need as many queues as traffic classes */
1971 num_rx_queues = num_tcs;
1974 /* if we have a bad config abort request queue reset */
1975 if ((adapter->num_rx_queues != num_rx_queues) ||
1976 (adapter->num_tx_queues != num_tx_queues)) {
1977 /* force mailbox timeout to prevent further messages */
1978 hw->mbx.timeout = 0;
1980 /* wait for watchdog to come around and bail us out */
1981 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
1987 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1989 ixgbevf_configure_dcb(adapter);
1991 ixgbevf_set_rx_mode(adapter->netdev);
1993 ixgbevf_restore_vlan(adapter);
1995 ixgbevf_configure_tx(adapter);
1996 ixgbevf_configure_rx(adapter);
1999 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
2001 /* Only save pre-reset stats if there are some */
2002 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
2003 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
2004 adapter->stats.base_vfgprc;
2005 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
2006 adapter->stats.base_vfgptc;
2007 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
2008 adapter->stats.base_vfgorc;
2009 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
2010 adapter->stats.base_vfgotc;
2011 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
2012 adapter->stats.base_vfmprc;
2016 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
2018 struct ixgbe_hw *hw = &adapter->hw;
2020 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
2021 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
2022 adapter->stats.last_vfgorc |=
2023 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
2024 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
2025 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
2026 adapter->stats.last_vfgotc |=
2027 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
2028 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
2030 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
2031 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
2032 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
2033 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
2034 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
2037 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
2039 struct ixgbe_hw *hw = &adapter->hw;
2040 int api[] = { ixgbe_mbox_api_13,
2044 ixgbe_mbox_api_unknown };
2047 spin_lock_bh(&adapter->mbx_lock);
2049 while (api[idx] != ixgbe_mbox_api_unknown) {
2050 err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
2056 spin_unlock_bh(&adapter->mbx_lock);
2059 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
2061 struct net_device *netdev = adapter->netdev;
2062 struct ixgbe_hw *hw = &adapter->hw;
2064 ixgbevf_configure_msix(adapter);
2066 spin_lock_bh(&adapter->mbx_lock);
2068 if (is_valid_ether_addr(hw->mac.addr))
2069 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2071 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
2073 spin_unlock_bh(&adapter->mbx_lock);
2075 smp_mb__before_atomic();
2076 clear_bit(__IXGBEVF_DOWN, &adapter->state);
2077 ixgbevf_napi_enable_all(adapter);
2079 /* clear any pending interrupts, may auto mask */
2080 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2081 ixgbevf_irq_enable(adapter);
2083 /* enable transmits */
2084 netif_tx_start_all_queues(netdev);
2086 ixgbevf_save_reset_stats(adapter);
2087 ixgbevf_init_last_counter_stats(adapter);
2089 hw->mac.get_link_status = 1;
2090 mod_timer(&adapter->service_timer, jiffies);
2093 void ixgbevf_up(struct ixgbevf_adapter *adapter)
2095 ixgbevf_configure(adapter);
2097 ixgbevf_up_complete(adapter);
2101 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2102 * @rx_ring: ring to free buffers from
2104 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2106 struct device *dev = rx_ring->dev;
2110 /* Free Rx ring sk_buff */
2112 dev_kfree_skb(rx_ring->skb);
2113 rx_ring->skb = NULL;
2116 /* ring already cleared, nothing to do */
2117 if (!rx_ring->rx_buffer_info)
2120 /* Free all the Rx ring pages */
2121 for (i = 0; i < rx_ring->count; i++) {
2122 struct ixgbevf_rx_buffer *rx_buffer;
2124 rx_buffer = &rx_ring->rx_buffer_info[i];
2126 dma_unmap_page(dev, rx_buffer->dma,
2127 PAGE_SIZE, DMA_FROM_DEVICE);
2129 if (rx_buffer->page)
2130 __free_page(rx_buffer->page);
2131 rx_buffer->page = NULL;
2134 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2135 memset(rx_ring->rx_buffer_info, 0, size);
2137 /* Zero out the descriptor ring */
2138 memset(rx_ring->desc, 0, rx_ring->size);
2142 * ixgbevf_clean_tx_ring - Free Tx Buffers
2143 * @tx_ring: ring to be cleaned
2145 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2147 struct ixgbevf_tx_buffer *tx_buffer_info;
2151 if (!tx_ring->tx_buffer_info)
2154 /* Free all the Tx ring sk_buffs */
2155 for (i = 0; i < tx_ring->count; i++) {
2156 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2157 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
2160 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2161 memset(tx_ring->tx_buffer_info, 0, size);
2163 memset(tx_ring->desc, 0, tx_ring->size);
2167 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2168 * @adapter: board private structure
2170 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
2174 for (i = 0; i < adapter->num_rx_queues; i++)
2175 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
2179 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2180 * @adapter: board private structure
2182 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
2186 for (i = 0; i < adapter->num_tx_queues; i++)
2187 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
2190 void ixgbevf_down(struct ixgbevf_adapter *adapter)
2192 struct net_device *netdev = adapter->netdev;
2193 struct ixgbe_hw *hw = &adapter->hw;
2196 /* signal that we are down to the interrupt handler */
2197 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
2198 return; /* do nothing if already down */
2200 /* disable all enabled Rx queues */
2201 for (i = 0; i < adapter->num_rx_queues; i++)
2202 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
2204 usleep_range(10000, 20000);
2206 netif_tx_stop_all_queues(netdev);
2208 /* call carrier off first to avoid false dev_watchdog timeouts */
2209 netif_carrier_off(netdev);
2210 netif_tx_disable(netdev);
2212 ixgbevf_irq_disable(adapter);
2214 ixgbevf_napi_disable_all(adapter);
2216 del_timer_sync(&adapter->service_timer);
2218 /* disable transmits in the hardware now that interrupts are off */
2219 for (i = 0; i < adapter->num_tx_queues; i++) {
2220 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
2222 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
2223 IXGBE_TXDCTL_SWFLSH);
2226 if (!pci_channel_offline(adapter->pdev))
2227 ixgbevf_reset(adapter);
2229 ixgbevf_clean_all_tx_rings(adapter);
2230 ixgbevf_clean_all_rx_rings(adapter);
2233 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
2235 WARN_ON(in_interrupt());
2237 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
2240 ixgbevf_down(adapter);
2241 ixgbevf_up(adapter);
2243 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
2246 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
2248 struct ixgbe_hw *hw = &adapter->hw;
2249 struct net_device *netdev = adapter->netdev;
2251 if (hw->mac.ops.reset_hw(hw)) {
2252 hw_dbg(hw, "PF still resetting\n");
2254 hw->mac.ops.init_hw(hw);
2255 ixgbevf_negotiate_api(adapter);
2258 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
2259 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
2260 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2263 adapter->last_reset = jiffies;
2266 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
2269 int vector_threshold;
2271 /* We'll want at least 2 (vector_threshold):
2272 * 1) TxQ[0] + RxQ[0] handler
2273 * 2) Other (Link Status Change, etc.)
2275 vector_threshold = MIN_MSIX_COUNT;
2277 /* The more we get, the more we will assign to Tx/Rx Cleanup
2278 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2279 * Right now, we simply care about how many we'll get; we'll
2280 * set them up later while requesting irq's.
2282 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2283 vector_threshold, vectors);
2286 dev_err(&adapter->pdev->dev,
2287 "Unable to allocate MSI-X interrupts\n");
2288 kfree(adapter->msix_entries);
2289 adapter->msix_entries = NULL;
2293 /* Adjust for only the vectors we'll use, which is minimum
2294 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2295 * vectors we were allocated.
2297 adapter->num_msix_vectors = vectors;
2303 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2304 * @adapter: board private structure to initialize
2306 * This is the top level queue allocation routine. The order here is very
2307 * important, starting with the "most" number of features turned on at once,
2308 * and ending with the smallest set of features. This way large combinations
2309 * can be allocated if they're turned on, and smaller combinations are the
2310 * fallthrough conditions.
2313 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
2315 struct ixgbe_hw *hw = &adapter->hw;
2316 unsigned int def_q = 0;
2317 unsigned int num_tcs = 0;
2320 /* Start with base case */
2321 adapter->num_rx_queues = 1;
2322 adapter->num_tx_queues = 1;
2324 spin_lock_bh(&adapter->mbx_lock);
2326 /* fetch queue configuration from the PF */
2327 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
2329 spin_unlock_bh(&adapter->mbx_lock);
2334 /* we need as many queues as traffic classes */
2336 adapter->num_rx_queues = num_tcs;
2338 u16 rss = min_t(u16, num_online_cpus(), IXGBEVF_MAX_RSS_QUEUES);
2340 switch (hw->api_version) {
2341 case ixgbe_mbox_api_11:
2342 case ixgbe_mbox_api_12:
2343 case ixgbe_mbox_api_13:
2344 adapter->num_rx_queues = rss;
2345 adapter->num_tx_queues = rss;
2353 * ixgbevf_alloc_queues - Allocate memory for all rings
2354 * @adapter: board private structure to initialize
2356 * We allocate one ring per queue at run-time since we don't know the
2357 * number of queues at compile-time. The polling_netdev array is
2358 * intended for Multiqueue, but should work fine with a single queue.
2360 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
2362 struct ixgbevf_ring *ring;
2365 for (; tx < adapter->num_tx_queues; tx++) {
2366 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2368 goto err_allocation;
2370 ring->dev = &adapter->pdev->dev;
2371 ring->netdev = adapter->netdev;
2372 ring->count = adapter->tx_ring_count;
2373 ring->queue_index = tx;
2376 adapter->tx_ring[tx] = ring;
2379 for (; rx < adapter->num_rx_queues; rx++) {
2380 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
2382 goto err_allocation;
2384 ring->dev = &adapter->pdev->dev;
2385 ring->netdev = adapter->netdev;
2387 ring->count = adapter->rx_ring_count;
2388 ring->queue_index = rx;
2391 adapter->rx_ring[rx] = ring;
2398 kfree(adapter->tx_ring[--tx]);
2399 adapter->tx_ring[tx] = NULL;
2403 kfree(adapter->rx_ring[--rx]);
2404 adapter->rx_ring[rx] = NULL;
2410 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2411 * @adapter: board private structure to initialize
2413 * Attempt to configure the interrupts using the best available
2414 * capabilities of the hardware and the kernel.
2416 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2418 struct net_device *netdev = adapter->netdev;
2420 int vector, v_budget;
2422 /* It's easy to be greedy for MSI-X vectors, but it really
2423 * doesn't do us much good if we have a lot more vectors
2424 * than CPU's. So let's be conservative and only ask for
2425 * (roughly) the same number of vectors as there are CPU's.
2426 * The default is to use pairs of vectors.
2428 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2429 v_budget = min_t(int, v_budget, num_online_cpus());
2430 v_budget += NON_Q_VECTORS;
2432 /* A failure in MSI-X entry allocation isn't fatal, but it does
2433 * mean we disable MSI-X capabilities of the adapter.
2435 adapter->msix_entries = kcalloc(v_budget,
2436 sizeof(struct msix_entry), GFP_KERNEL);
2437 if (!adapter->msix_entries)
2440 for (vector = 0; vector < v_budget; vector++)
2441 adapter->msix_entries[vector].entry = vector;
2443 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2447 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2451 return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2455 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2456 * @adapter: board private structure to initialize
2458 * We allocate one q_vector per queue interrupt. If allocation fails we
2461 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2463 int q_idx, num_q_vectors;
2464 struct ixgbevf_q_vector *q_vector;
2466 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2468 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2469 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2472 q_vector->adapter = adapter;
2473 q_vector->v_idx = q_idx;
2474 netif_napi_add(adapter->netdev, &q_vector->napi,
2476 adapter->q_vector[q_idx] = q_vector;
2484 q_vector = adapter->q_vector[q_idx];
2485 #ifdef CONFIG_NET_RX_BUSY_POLL
2486 napi_hash_del(&q_vector->napi);
2488 netif_napi_del(&q_vector->napi);
2490 adapter->q_vector[q_idx] = NULL;
2496 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2497 * @adapter: board private structure to initialize
2499 * This function frees the memory allocated to the q_vectors. In addition if
2500 * NAPI is enabled it will delete any references to the NAPI struct prior
2501 * to freeing the q_vector.
2503 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2505 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2507 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2508 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2510 adapter->q_vector[q_idx] = NULL;
2511 #ifdef CONFIG_NET_RX_BUSY_POLL
2512 napi_hash_del(&q_vector->napi);
2514 netif_napi_del(&q_vector->napi);
2520 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2521 * @adapter: board private structure
2524 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2526 if (!adapter->msix_entries)
2529 pci_disable_msix(adapter->pdev);
2530 kfree(adapter->msix_entries);
2531 adapter->msix_entries = NULL;
2535 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2536 * @adapter: board private structure to initialize
2539 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2543 /* Number of supported queues */
2544 ixgbevf_set_num_queues(adapter);
2546 err = ixgbevf_set_interrupt_capability(adapter);
2548 hw_dbg(&adapter->hw,
2549 "Unable to setup interrupt capabilities\n");
2550 goto err_set_interrupt;
2553 err = ixgbevf_alloc_q_vectors(adapter);
2555 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
2556 goto err_alloc_q_vectors;
2559 err = ixgbevf_alloc_queues(adapter);
2561 pr_err("Unable to allocate memory for queues\n");
2562 goto err_alloc_queues;
2565 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
2566 (adapter->num_rx_queues > 1) ? "Enabled" :
2567 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2569 set_bit(__IXGBEVF_DOWN, &adapter->state);
2573 ixgbevf_free_q_vectors(adapter);
2574 err_alloc_q_vectors:
2575 ixgbevf_reset_interrupt_capability(adapter);
2581 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2582 * @adapter: board private structure to clear interrupt scheme on
2584 * We go through and clear interrupt specific resources and reset the structure
2585 * to pre-load conditions
2587 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2591 for (i = 0; i < adapter->num_tx_queues; i++) {
2592 kfree(adapter->tx_ring[i]);
2593 adapter->tx_ring[i] = NULL;
2595 for (i = 0; i < adapter->num_rx_queues; i++) {
2596 kfree(adapter->rx_ring[i]);
2597 adapter->rx_ring[i] = NULL;
2600 adapter->num_tx_queues = 0;
2601 adapter->num_rx_queues = 0;
2603 ixgbevf_free_q_vectors(adapter);
2604 ixgbevf_reset_interrupt_capability(adapter);
2608 * ixgbevf_sw_init - Initialize general software structures
2609 * @adapter: board private structure to initialize
2611 * ixgbevf_sw_init initializes the Adapter private data structure.
2612 * Fields are initialized based on PCI device information and
2613 * OS network device settings (MTU size).
2615 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2617 struct ixgbe_hw *hw = &adapter->hw;
2618 struct pci_dev *pdev = adapter->pdev;
2619 struct net_device *netdev = adapter->netdev;
2622 /* PCI config space info */
2623 hw->vendor_id = pdev->vendor;
2624 hw->device_id = pdev->device;
2625 hw->revision_id = pdev->revision;
2626 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2627 hw->subsystem_device_id = pdev->subsystem_device;
2629 hw->mbx.ops.init_params(hw);
2631 if (hw->mac.type >= ixgbe_mac_X550_vf) {
2632 err = ixgbevf_init_rss_key(adapter);
2637 /* assume legacy case in which PF would only give VF 2 queues */
2638 hw->mac.max_tx_queues = 2;
2639 hw->mac.max_rx_queues = 2;
2641 /* lock to protect mailbox accesses */
2642 spin_lock_init(&adapter->mbx_lock);
2644 err = hw->mac.ops.reset_hw(hw);
2646 dev_info(&pdev->dev,
2647 "PF still in reset state. Is the PF interface up?\n");
2649 err = hw->mac.ops.init_hw(hw);
2651 pr_err("init_shared_code failed: %d\n", err);
2654 ixgbevf_negotiate_api(adapter);
2655 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2657 dev_info(&pdev->dev, "Error reading MAC address\n");
2658 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2659 dev_info(&pdev->dev,
2660 "MAC address not assigned by administrator.\n");
2661 ether_addr_copy(netdev->dev_addr, hw->mac.addr);
2664 if (!is_valid_ether_addr(netdev->dev_addr)) {
2665 dev_info(&pdev->dev, "Assigning random MAC address\n");
2666 eth_hw_addr_random(netdev);
2667 ether_addr_copy(hw->mac.addr, netdev->dev_addr);
2668 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
2671 /* Enable dynamic interrupt throttling rates */
2672 adapter->rx_itr_setting = 1;
2673 adapter->tx_itr_setting = 1;
2675 /* set default ring sizes */
2676 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2677 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2679 set_bit(__IXGBEVF_DOWN, &adapter->state);
2686 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2688 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2689 if (current_counter < last_counter) \
2690 counter += 0x100000000LL; \
2691 last_counter = current_counter; \
2692 counter &= 0xFFFFFFFF00000000LL; \
2693 counter |= current_counter; \
2696 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2698 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2699 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2700 u64 current_counter = (current_counter_msb << 32) | \
2701 current_counter_lsb; \
2702 if (current_counter < last_counter) \
2703 counter += 0x1000000000LL; \
2704 last_counter = current_counter; \
2705 counter &= 0xFFFFFFF000000000LL; \
2706 counter |= current_counter; \
2709 * ixgbevf_update_stats - Update the board statistics counters.
2710 * @adapter: board private structure
2712 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2714 struct ixgbe_hw *hw = &adapter->hw;
2717 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2718 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2721 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2722 adapter->stats.vfgprc);
2723 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2724 adapter->stats.vfgptc);
2725 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2726 adapter->stats.last_vfgorc,
2727 adapter->stats.vfgorc);
2728 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2729 adapter->stats.last_vfgotc,
2730 adapter->stats.vfgotc);
2731 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2732 adapter->stats.vfmprc);
2734 for (i = 0; i < adapter->num_rx_queues; i++) {
2735 adapter->hw_csum_rx_error +=
2736 adapter->rx_ring[i]->hw_csum_rx_error;
2737 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2742 * ixgbevf_service_timer - Timer Call-back
2743 * @data: pointer to adapter cast into an unsigned long
2745 static void ixgbevf_service_timer(unsigned long data)
2747 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2749 /* Reset the timer */
2750 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies);
2752 ixgbevf_service_event_schedule(adapter);
2755 static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
2757 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
2760 /* If we're already down or resetting, just bail */
2761 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2762 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2763 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2766 adapter->tx_timeout_count++;
2769 ixgbevf_reinit_locked(adapter);
2774 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
2775 * @adapter: pointer to the device adapter structure
2777 * This function serves two purposes. First it strobes the interrupt lines
2778 * in order to make certain interrupts are occurring. Secondly it sets the
2779 * bits needed to check for TX hangs. As a result we should immediately
2780 * determine if a hang has occurred.
2782 static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
2784 struct ixgbe_hw *hw = &adapter->hw;
2788 /* If we're down or resetting, just bail */
2789 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2790 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2793 /* Force detection of hung controller */
2794 if (netif_carrier_ok(adapter->netdev)) {
2795 for (i = 0; i < adapter->num_tx_queues; i++)
2796 set_check_for_tx_hang(adapter->tx_ring[i]);
2799 /* get one bit for every active Tx/Rx interrupt vector */
2800 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2801 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2803 if (qv->rx.ring || qv->tx.ring)
2807 /* Cause software interrupt to ensure rings are cleaned */
2808 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2812 * ixgbevf_watchdog_update_link - update the link status
2813 * @adapter: pointer to the device adapter structure
2815 static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
2817 struct ixgbe_hw *hw = &adapter->hw;
2818 u32 link_speed = adapter->link_speed;
2819 bool link_up = adapter->link_up;
2822 spin_lock_bh(&adapter->mbx_lock);
2824 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2826 spin_unlock_bh(&adapter->mbx_lock);
2828 /* if check for link returns error we will need to reset */
2829 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
2830 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
2834 adapter->link_up = link_up;
2835 adapter->link_speed = link_speed;
2839 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
2840 * print link up message
2841 * @adapter: pointer to the device adapter structure
2843 static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
2845 struct net_device *netdev = adapter->netdev;
2847 /* only continue if link was previously down */
2848 if (netif_carrier_ok(netdev))
2851 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n",
2852 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2854 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ?
2856 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ?
2860 netif_carrier_on(netdev);
2864 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
2865 * print link down message
2866 * @adapter: pointer to the adapter structure
2868 static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
2870 struct net_device *netdev = adapter->netdev;
2872 adapter->link_speed = 0;
2874 /* only continue if link was up previously */
2875 if (!netif_carrier_ok(netdev))
2878 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2880 netif_carrier_off(netdev);
2884 * ixgbevf_watchdog_subtask - worker thread to bring link up
2885 * @work: pointer to work_struct containing our data
2887 static void ixgbevf_watchdog_subtask(struct ixgbevf_adapter *adapter)
2889 /* if interface is down do nothing */
2890 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2891 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2894 ixgbevf_watchdog_update_link(adapter);
2896 if (adapter->link_up)
2897 ixgbevf_watchdog_link_is_up(adapter);
2899 ixgbevf_watchdog_link_is_down(adapter);
2901 ixgbevf_update_stats(adapter);
2905 * ixgbevf_service_task - manages and runs subtasks
2906 * @work: pointer to work_struct containing our data
2908 static void ixgbevf_service_task(struct work_struct *work)
2910 struct ixgbevf_adapter *adapter = container_of(work,
2911 struct ixgbevf_adapter,
2913 struct ixgbe_hw *hw = &adapter->hw;
2915 if (IXGBE_REMOVED(hw->hw_addr)) {
2916 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2918 ixgbevf_down(adapter);
2924 ixgbevf_queue_reset_subtask(adapter);
2925 ixgbevf_reset_subtask(adapter);
2926 ixgbevf_watchdog_subtask(adapter);
2927 ixgbevf_check_hang_subtask(adapter);
2929 ixgbevf_service_event_complete(adapter);
2933 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2934 * @tx_ring: Tx descriptor ring for a specific queue
2936 * Free all transmit software resources
2938 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2940 ixgbevf_clean_tx_ring(tx_ring);
2942 vfree(tx_ring->tx_buffer_info);
2943 tx_ring->tx_buffer_info = NULL;
2945 /* if not set, then don't free */
2949 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2952 tx_ring->desc = NULL;
2956 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2957 * @adapter: board private structure
2959 * Free all transmit software resources
2961 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2965 for (i = 0; i < adapter->num_tx_queues; i++)
2966 if (adapter->tx_ring[i]->desc)
2967 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2971 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2972 * @tx_ring: Tx descriptor ring (for a specific queue) to setup
2974 * Return 0 on success, negative on failure
2976 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2978 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
2981 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2982 tx_ring->tx_buffer_info = vzalloc(size);
2983 if (!tx_ring->tx_buffer_info)
2986 u64_stats_init(&tx_ring->syncp);
2988 /* round up to nearest 4K */
2989 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2990 tx_ring->size = ALIGN(tx_ring->size, 4096);
2992 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2993 &tx_ring->dma, GFP_KERNEL);
3000 vfree(tx_ring->tx_buffer_info);
3001 tx_ring->tx_buffer_info = NULL;
3002 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
3007 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3008 * @adapter: board private structure
3010 * If this function returns with an error, then it's possible one or
3011 * more of the rings is populated (while the rest are not). It is the
3012 * callers duty to clean those orphaned rings.
3014 * Return 0 on success, negative on failure
3016 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
3020 for (i = 0; i < adapter->num_tx_queues; i++) {
3021 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
3024 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
3032 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3033 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
3035 * Returns 0 on success, negative on failure
3037 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
3041 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
3042 rx_ring->rx_buffer_info = vzalloc(size);
3043 if (!rx_ring->rx_buffer_info)
3046 u64_stats_init(&rx_ring->syncp);
3048 /* Round up to nearest 4K */
3049 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3050 rx_ring->size = ALIGN(rx_ring->size, 4096);
3052 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
3053 &rx_ring->dma, GFP_KERNEL);
3060 vfree(rx_ring->rx_buffer_info);
3061 rx_ring->rx_buffer_info = NULL;
3062 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
3067 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3068 * @adapter: board private structure
3070 * If this function returns with an error, then it's possible one or
3071 * more of the rings is populated (while the rest are not). It is the
3072 * callers duty to clean those orphaned rings.
3074 * Return 0 on success, negative on failure
3076 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
3080 for (i = 0; i < adapter->num_rx_queues; i++) {
3081 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
3084 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
3091 * ixgbevf_free_rx_resources - Free Rx Resources
3092 * @rx_ring: ring to clean the resources from
3094 * Free all receive software resources
3096 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
3098 ixgbevf_clean_rx_ring(rx_ring);
3100 vfree(rx_ring->rx_buffer_info);
3101 rx_ring->rx_buffer_info = NULL;
3103 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
3106 rx_ring->desc = NULL;
3110 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3111 * @adapter: board private structure
3113 * Free all receive software resources
3115 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
3119 for (i = 0; i < adapter->num_rx_queues; i++)
3120 if (adapter->rx_ring[i]->desc)
3121 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
3125 * ixgbevf_open - Called when a network interface is made active
3126 * @netdev: network interface device structure
3128 * Returns 0 on success, negative value on failure
3130 * The open entry point is called when a network interface is made
3131 * active by the system (IFF_UP). At this point all resources needed
3132 * for transmit and receive operations are allocated, the interrupt
3133 * handler is registered with the OS, the watchdog timer is started,
3134 * and the stack is notified that the interface is ready.
3136 int ixgbevf_open(struct net_device *netdev)
3138 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3139 struct ixgbe_hw *hw = &adapter->hw;
3142 /* A previous failure to open the device because of a lack of
3143 * available MSIX vector resources may have reset the number
3144 * of msix vectors variable to zero. The only way to recover
3145 * is to unload/reload the driver and hope that the system has
3146 * been able to recover some MSIX vector resources.
3148 if (!adapter->num_msix_vectors)
3151 if (hw->adapter_stopped) {
3152 ixgbevf_reset(adapter);
3153 /* if adapter is still stopped then PF isn't up and
3154 * the VF can't start.
3156 if (hw->adapter_stopped) {
3157 err = IXGBE_ERR_MBX;
3158 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
3159 goto err_setup_reset;
3163 /* disallow open during test */
3164 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
3167 netif_carrier_off(netdev);
3169 /* allocate transmit descriptors */
3170 err = ixgbevf_setup_all_tx_resources(adapter);
3174 /* allocate receive descriptors */
3175 err = ixgbevf_setup_all_rx_resources(adapter);
3179 ixgbevf_configure(adapter);
3181 /* Map the Tx/Rx rings to the vectors we were allotted.
3182 * if request_irq will be called in this function map_rings
3183 * must be called *before* up_complete
3185 ixgbevf_map_rings_to_vectors(adapter);
3187 err = ixgbevf_request_irq(adapter);
3191 ixgbevf_up_complete(adapter);
3196 ixgbevf_down(adapter);
3198 ixgbevf_free_all_rx_resources(adapter);
3200 ixgbevf_free_all_tx_resources(adapter);
3201 ixgbevf_reset(adapter);
3209 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3210 * @adapter: the private adapter struct
3212 * This function should contain the necessary work common to both suspending
3213 * and closing of the device.
3215 static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
3217 ixgbevf_down(adapter);
3218 ixgbevf_free_irq(adapter);
3219 ixgbevf_free_all_tx_resources(adapter);
3220 ixgbevf_free_all_rx_resources(adapter);
3224 * ixgbevf_close - Disables a network interface
3225 * @netdev: network interface device structure
3227 * Returns 0, this is not allowed to fail
3229 * The close entry point is called when an interface is de-activated
3230 * by the OS. The hardware is still under the drivers control, but
3231 * needs to be disabled. A global MAC reset is issued to stop the
3232 * hardware, and all transmit and receive resources are freed.
3234 int ixgbevf_close(struct net_device *netdev)
3236 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3238 if (netif_device_present(netdev))
3239 ixgbevf_close_suspend(adapter);
3244 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
3246 struct net_device *dev = adapter->netdev;
3248 if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
3252 /* if interface is down do nothing */
3253 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
3254 test_bit(__IXGBEVF_RESETTING, &adapter->state))
3257 /* Hardware has to reinitialize queues and interrupts to
3258 * match packet buffer alignment. Unfortunately, the
3259 * hardware is not flexible enough to do this dynamically.
3263 if (netif_running(dev))
3266 ixgbevf_clear_interrupt_scheme(adapter);
3267 ixgbevf_init_interrupt_scheme(adapter);
3269 if (netif_running(dev))
3275 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
3276 u32 vlan_macip_lens, u32 type_tucmd,
3279 struct ixgbe_adv_tx_context_desc *context_desc;
3280 u16 i = tx_ring->next_to_use;
3282 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
3285 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3287 /* set bits to identify this as an advanced context descriptor */
3288 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
3290 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3291 context_desc->seqnum_seed = 0;
3292 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
3293 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3296 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
3297 struct ixgbevf_tx_buffer *first,
3300 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
3301 struct sk_buff *skb = first->skb;
3311 u32 paylen, l4_offset;
3314 if (skb->ip_summed != CHECKSUM_PARTIAL)
3317 if (!skb_is_gso(skb))
3320 err = skb_cow_head(skb, 0);
3324 if (eth_p_mpls(first->protocol))
3325 ip.hdr = skb_inner_network_header(skb);
3327 ip.hdr = skb_network_header(skb);
3328 l4.hdr = skb_checksum_start(skb);
3330 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3331 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3333 /* initialize outer IP header fields */
3334 if (ip.v4->version == 4) {
3335 unsigned char *csum_start = skb_checksum_start(skb);
3336 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
3338 /* IP header will have to cancel out any data that
3339 * is not a part of the outer IP header
3341 ip.v4->check = csum_fold(csum_partial(trans_start,
3342 csum_start - trans_start,
3344 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3347 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3348 IXGBE_TX_FLAGS_CSUM |
3349 IXGBE_TX_FLAGS_IPV4;
3351 ip.v6->payload_len = 0;
3352 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
3353 IXGBE_TX_FLAGS_CSUM;
3356 /* determine offset of inner transport header */
3357 l4_offset = l4.hdr - skb->data;
3359 /* compute length of segmentation header */
3360 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3362 /* remove payload length from inner checksum */
3363 paylen = skb->len - l4_offset;
3364 csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
3366 /* update gso size and bytecount with header size */
3367 first->gso_segs = skb_shinfo(skb)->gso_segs;
3368 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3370 /* mss_l4len_id: use 1 as index for TSO */
3371 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
3372 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
3373 mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
3375 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
3376 vlan_macip_lens = l4.hdr - ip.hdr;
3377 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
3378 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3380 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
3381 type_tucmd, mss_l4len_idx);
3386 static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
3388 unsigned int offset = 0;
3390 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
3392 return offset == skb_checksum_start_offset(skb);
3395 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
3396 struct ixgbevf_tx_buffer *first)
3398 struct sk_buff *skb = first->skb;
3399 u32 vlan_macip_lens = 0;
3402 if (skb->ip_summed != CHECKSUM_PARTIAL)
3405 switch (skb->csum_offset) {
3406 case offsetof(struct tcphdr, check):
3407 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
3409 case offsetof(struct udphdr, check):
3411 case offsetof(struct sctphdr, checksum):
3412 /* validate that this is actually an SCTP request */
3413 if (((first->protocol == htons(ETH_P_IP)) &&
3414 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
3415 ((first->protocol == htons(ETH_P_IPV6)) &&
3416 ixgbevf_ipv6_csum_is_sctp(skb))) {
3417 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
3422 skb_checksum_help(skb);
3426 if (first->protocol == htons(ETH_P_IP))
3427 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
3429 /* update TX checksum flag */
3430 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
3431 vlan_macip_lens = skb_checksum_start_offset(skb) -
3432 skb_network_offset(skb);
3434 /* vlan_macip_lens: MACLEN, VLAN tag */
3435 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
3436 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
3438 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
3441 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
3443 /* set type for advanced descriptor with frame checksum insertion */
3444 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
3445 IXGBE_ADVTXD_DCMD_IFCS |
3446 IXGBE_ADVTXD_DCMD_DEXT);
3448 /* set HW VLAN bit if VLAN is present */
3449 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3450 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3452 /* set segmentation enable bits for TSO/FSO */
3453 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3454 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3459 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3460 u32 tx_flags, unsigned int paylen)
3462 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3464 /* enable L4 checksum for TSO and TX checksum offload */
3465 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3466 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3468 /* enble IPv4 checksum for TSO */
3469 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3470 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3472 /* use index 1 context for TSO/FSO/FCOE */
3473 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3474 olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
3476 /* Check Context must be set if Tx switch is enabled, which it
3477 * always is for case where virtual functions are running
3479 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3481 tx_desc->read.olinfo_status = olinfo_status;
3484 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3485 struct ixgbevf_tx_buffer *first,
3489 struct sk_buff *skb = first->skb;
3490 struct ixgbevf_tx_buffer *tx_buffer;
3491 union ixgbe_adv_tx_desc *tx_desc;
3492 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3493 unsigned int data_len = skb->data_len;
3494 unsigned int size = skb_headlen(skb);
3495 unsigned int paylen = skb->len - hdr_len;
3496 u32 tx_flags = first->tx_flags;
3498 u16 i = tx_ring->next_to_use;
3500 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3502 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3503 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3505 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3506 if (dma_mapping_error(tx_ring->dev, dma))
3509 /* record length, and DMA address */
3510 dma_unmap_len_set(first, len, size);
3511 dma_unmap_addr_set(first, dma, dma);
3513 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3516 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3517 tx_desc->read.cmd_type_len =
3518 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3522 if (i == tx_ring->count) {
3523 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3527 dma += IXGBE_MAX_DATA_PER_TXD;
3528 size -= IXGBE_MAX_DATA_PER_TXD;
3530 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3531 tx_desc->read.olinfo_status = 0;
3534 if (likely(!data_len))
3537 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3541 if (i == tx_ring->count) {
3542 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3546 size = skb_frag_size(frag);
3549 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3551 if (dma_mapping_error(tx_ring->dev, dma))
3554 tx_buffer = &tx_ring->tx_buffer_info[i];
3555 dma_unmap_len_set(tx_buffer, len, size);
3556 dma_unmap_addr_set(tx_buffer, dma, dma);
3558 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3559 tx_desc->read.olinfo_status = 0;
3564 /* write last descriptor with RS and EOP bits */
3565 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3566 tx_desc->read.cmd_type_len = cmd_type;
3568 /* set the timestamp */
3569 first->time_stamp = jiffies;
3571 /* Force memory writes to complete before letting h/w know there
3572 * are new descriptors to fetch. (Only applicable for weak-ordered
3573 * memory model archs, such as IA-64).
3575 * We also need this memory barrier (wmb) to make certain all of the
3576 * status bits have been updated before next_to_watch is written.
3580 /* set next_to_watch value indicating a packet is present */
3581 first->next_to_watch = tx_desc;
3584 if (i == tx_ring->count)
3587 tx_ring->next_to_use = i;
3589 /* notify HW of packet */
3590 ixgbevf_write_tail(tx_ring, i);
3594 dev_err(tx_ring->dev, "TX DMA map failed\n");
3596 /* clear dma mappings for failed tx_buffer_info map */
3598 tx_buffer = &tx_ring->tx_buffer_info[i];
3599 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3600 if (tx_buffer == first)
3607 tx_ring->next_to_use = i;
3610 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3612 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3613 /* Herbert's original patch had:
3614 * smp_mb__after_netif_stop_queue();
3615 * but since that doesn't exist yet, just open code it.
3619 /* We need to check again in a case another CPU has just
3620 * made room available.
3622 if (likely(ixgbevf_desc_unused(tx_ring) < size))
3625 /* A reprieve! - use start_queue because it doesn't call schedule */
3626 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3627 ++tx_ring->tx_stats.restart_queue;
3632 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3634 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3636 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3639 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3641 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3642 struct ixgbevf_tx_buffer *first;
3643 struct ixgbevf_ring *tx_ring;
3646 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3647 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3651 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3653 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3654 dev_kfree_skb_any(skb);
3655 return NETDEV_TX_OK;
3658 tx_ring = adapter->tx_ring[skb->queue_mapping];
3660 /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3661 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3662 * + 2 desc gap to keep tail from touching head,
3663 * + 1 desc for context descriptor,
3664 * otherwise try next time
3666 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3667 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3668 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3670 count += skb_shinfo(skb)->nr_frags;
3672 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3673 tx_ring->tx_stats.tx_busy++;
3674 return NETDEV_TX_BUSY;
3677 /* record the location of the first descriptor for this packet */
3678 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3680 first->bytecount = skb->len;
3681 first->gso_segs = 1;
3683 if (skb_vlan_tag_present(skb)) {
3684 tx_flags |= skb_vlan_tag_get(skb);
3685 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3686 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3689 /* record initial flags and protocol */
3690 first->tx_flags = tx_flags;
3691 first->protocol = vlan_get_protocol(skb);
3693 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3697 ixgbevf_tx_csum(tx_ring, first);
3699 ixgbevf_tx_map(tx_ring, first, hdr_len);
3701 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3703 return NETDEV_TX_OK;
3706 dev_kfree_skb_any(first->skb);
3709 return NETDEV_TX_OK;
3713 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3714 * @netdev: network interface device structure
3715 * @p: pointer to an address structure
3717 * Returns 0 on success, negative on failure
3719 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3721 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3722 struct ixgbe_hw *hw = &adapter->hw;
3723 struct sockaddr *addr = p;
3726 if (!is_valid_ether_addr(addr->sa_data))
3727 return -EADDRNOTAVAIL;
3729 spin_lock_bh(&adapter->mbx_lock);
3731 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0);
3733 spin_unlock_bh(&adapter->mbx_lock);
3738 ether_addr_copy(hw->mac.addr, addr->sa_data);
3739 ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
3740 ether_addr_copy(netdev->dev_addr, addr->sa_data);
3746 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3747 * @netdev: network interface device structure
3748 * @new_mtu: new value for maximum frame size
3750 * Returns 0 on success, negative on failure
3752 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3754 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3755 struct ixgbe_hw *hw = &adapter->hw;
3756 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3759 spin_lock_bh(&adapter->mbx_lock);
3760 /* notify the PF of our intent to use this size of frame */
3761 ret = hw->mac.ops.set_rlpml(hw, max_frame);
3762 spin_unlock_bh(&adapter->mbx_lock);
3766 hw_dbg(hw, "changing MTU from %d to %d\n",
3767 netdev->mtu, new_mtu);
3769 /* must set new MTU before calling down or up */
3770 netdev->mtu = new_mtu;
3775 #ifdef CONFIG_NET_POLL_CONTROLLER
3776 /* Polling 'interrupt' - used by things like netconsole to send skbs
3777 * without having to re-enable interrupts. It's not called while
3778 * the interrupt routine is executing.
3780 static void ixgbevf_netpoll(struct net_device *netdev)
3782 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3785 /* if interface is down do nothing */
3786 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
3788 for (i = 0; i < adapter->num_rx_queues; i++)
3789 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
3791 #endif /* CONFIG_NET_POLL_CONTROLLER */
3793 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3795 struct net_device *netdev = pci_get_drvdata(pdev);
3796 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3802 netif_device_detach(netdev);
3804 if (netif_running(netdev))
3805 ixgbevf_close_suspend(adapter);
3807 ixgbevf_clear_interrupt_scheme(adapter);
3811 retval = pci_save_state(pdev);
3816 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3817 pci_disable_device(pdev);
3823 static int ixgbevf_resume(struct pci_dev *pdev)
3825 struct net_device *netdev = pci_get_drvdata(pdev);
3826 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3829 pci_restore_state(pdev);
3830 /* pci_restore_state clears dev->state_saved so call
3831 * pci_save_state to restore it.
3833 pci_save_state(pdev);
3835 err = pci_enable_device_mem(pdev);
3837 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3841 adapter->hw.hw_addr = adapter->io_addr;
3842 smp_mb__before_atomic();
3843 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3844 pci_set_master(pdev);
3846 ixgbevf_reset(adapter);
3849 err = ixgbevf_init_interrupt_scheme(adapter);
3852 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3856 if (netif_running(netdev)) {
3857 err = ixgbevf_open(netdev);
3862 netif_device_attach(netdev);
3867 #endif /* CONFIG_PM */
3868 static void ixgbevf_shutdown(struct pci_dev *pdev)
3870 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3873 static void ixgbevf_get_stats(struct net_device *netdev,
3874 struct rtnl_link_stats64 *stats)
3876 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3879 const struct ixgbevf_ring *ring;
3882 ixgbevf_update_stats(adapter);
3884 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3886 for (i = 0; i < adapter->num_rx_queues; i++) {
3887 ring = adapter->rx_ring[i];
3889 start = u64_stats_fetch_begin_irq(&ring->syncp);
3890 bytes = ring->stats.bytes;
3891 packets = ring->stats.packets;
3892 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3893 stats->rx_bytes += bytes;
3894 stats->rx_packets += packets;
3897 for (i = 0; i < adapter->num_tx_queues; i++) {
3898 ring = adapter->tx_ring[i];
3900 start = u64_stats_fetch_begin_irq(&ring->syncp);
3901 bytes = ring->stats.bytes;
3902 packets = ring->stats.packets;
3903 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3904 stats->tx_bytes += bytes;
3905 stats->tx_packets += packets;
3909 #define IXGBEVF_MAX_MAC_HDR_LEN 127
3910 #define IXGBEVF_MAX_NETWORK_HDR_LEN 511
3912 static netdev_features_t
3913 ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
3914 netdev_features_t features)
3916 unsigned int network_hdr_len, mac_hdr_len;
3918 /* Make certain the headers can be described by a context descriptor */
3919 mac_hdr_len = skb_network_header(skb) - skb->data;
3920 if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
3921 return features & ~(NETIF_F_HW_CSUM |
3923 NETIF_F_HW_VLAN_CTAG_TX |
3927 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
3928 if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
3929 return features & ~(NETIF_F_HW_CSUM |
3934 /* We can only support IPV4 TSO in tunnels if we can mangle the
3935 * inner IP ID field, so strip TSO if MANGLEID is not supported.
3937 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
3938 features &= ~NETIF_F_TSO;
3943 static const struct net_device_ops ixgbevf_netdev_ops = {
3944 .ndo_open = ixgbevf_open,
3945 .ndo_stop = ixgbevf_close,
3946 .ndo_start_xmit = ixgbevf_xmit_frame,
3947 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3948 .ndo_get_stats64 = ixgbevf_get_stats,
3949 .ndo_validate_addr = eth_validate_addr,
3950 .ndo_set_mac_address = ixgbevf_set_mac,
3951 .ndo_change_mtu = ixgbevf_change_mtu,
3952 .ndo_tx_timeout = ixgbevf_tx_timeout,
3953 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3954 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3955 #ifdef CONFIG_NET_POLL_CONTROLLER
3956 .ndo_poll_controller = ixgbevf_netpoll,
3958 .ndo_features_check = ixgbevf_features_check,
3961 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3963 dev->netdev_ops = &ixgbevf_netdev_ops;
3964 ixgbevf_set_ethtool_ops(dev);
3965 dev->watchdog_timeo = 5 * HZ;
3969 * ixgbevf_probe - Device Initialization Routine
3970 * @pdev: PCI device information struct
3971 * @ent: entry in ixgbevf_pci_tbl
3973 * Returns 0 on success, negative on failure
3975 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3976 * The OS initialization, configuring of the adapter private structure,
3977 * and a hardware reset occur.
3979 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3981 struct net_device *netdev;
3982 struct ixgbevf_adapter *adapter = NULL;
3983 struct ixgbe_hw *hw = NULL;
3984 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3985 int err, pci_using_dac;
3986 bool disable_dev = false;
3988 err = pci_enable_device(pdev);
3992 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3995 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3997 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
4003 err = pci_request_regions(pdev, ixgbevf_driver_name);
4005 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4009 pci_set_master(pdev);
4011 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
4015 goto err_alloc_etherdev;
4018 SET_NETDEV_DEV(netdev, &pdev->dev);
4020 adapter = netdev_priv(netdev);
4022 adapter->netdev = netdev;
4023 adapter->pdev = pdev;
4026 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4028 /* call save state here in standalone driver because it relies on
4029 * adapter struct to exist, and needs to call netdev_priv
4031 pci_save_state(pdev);
4033 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4034 pci_resource_len(pdev, 0));
4035 adapter->io_addr = hw->hw_addr;
4041 ixgbevf_assign_netdev_ops(netdev);
4044 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
4045 hw->mac.type = ii->mac;
4047 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
4048 sizeof(struct ixgbe_mbx_operations));
4050 /* setup the private structure */
4051 err = ixgbevf_sw_init(adapter);
4055 /* The HW MAC address was set and/or determined in sw_init */
4056 if (!is_valid_ether_addr(netdev->dev_addr)) {
4057 pr_err("invalid MAC address\n");
4062 netdev->hw_features = NETIF_F_SG |
4069 #define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
4070 NETIF_F_GSO_GRE_CSUM | \
4071 NETIF_F_GSO_IPXIP4 | \
4072 NETIF_F_GSO_IPXIP6 | \
4073 NETIF_F_GSO_UDP_TUNNEL | \
4074 NETIF_F_GSO_UDP_TUNNEL_CSUM)
4076 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
4077 netdev->hw_features |= NETIF_F_GSO_PARTIAL |
4078 IXGBEVF_GSO_PARTIAL_FEATURES;
4080 netdev->features = netdev->hw_features;
4083 netdev->features |= NETIF_F_HIGHDMA;
4085 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
4086 netdev->mpls_features |= NETIF_F_SG |
4090 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES;
4091 netdev->hw_enc_features |= netdev->vlan_features;
4093 /* set this bit last since it cannot be part of vlan_features */
4094 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4095 NETIF_F_HW_VLAN_CTAG_RX |
4096 NETIF_F_HW_VLAN_CTAG_TX;
4098 netdev->priv_flags |= IFF_UNICAST_FLT;
4100 /* MTU range: 68 - 1504 or 9710 */
4101 netdev->min_mtu = ETH_MIN_MTU;
4102 switch (adapter->hw.api_version) {
4103 case ixgbe_mbox_api_11:
4104 case ixgbe_mbox_api_12:
4105 case ixgbe_mbox_api_13:
4106 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4107 (ETH_HLEN + ETH_FCS_LEN);
4110 if (adapter->hw.mac.type != ixgbe_mac_82599_vf)
4111 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
4112 (ETH_HLEN + ETH_FCS_LEN);
4114 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN;
4118 if (IXGBE_REMOVED(hw->hw_addr)) {
4123 setup_timer(&adapter->service_timer, &ixgbevf_service_timer,
4124 (unsigned long)adapter);
4126 INIT_WORK(&adapter->service_task, ixgbevf_service_task);
4127 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state);
4128 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state);
4130 err = ixgbevf_init_interrupt_scheme(adapter);
4134 strcpy(netdev->name, "eth%d");
4136 err = register_netdev(netdev);
4140 pci_set_drvdata(pdev, netdev);
4141 netif_carrier_off(netdev);
4143 ixgbevf_init_last_counter_stats(adapter);
4145 /* print the VF info */
4146 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr);
4147 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
4149 switch (hw->mac.type) {
4150 case ixgbe_mac_X550_vf:
4151 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n");
4153 case ixgbe_mac_X540_vf:
4154 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n");
4156 case ixgbe_mac_82599_vf:
4158 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n");
4165 ixgbevf_clear_interrupt_scheme(adapter);
4167 ixgbevf_reset_interrupt_capability(adapter);
4168 iounmap(adapter->io_addr);
4169 kfree(adapter->rss_key);
4171 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4172 free_netdev(netdev);
4174 pci_release_regions(pdev);
4177 if (!adapter || disable_dev)
4178 pci_disable_device(pdev);
4183 * ixgbevf_remove - Device Removal Routine
4184 * @pdev: PCI device information struct
4186 * ixgbevf_remove is called by the PCI subsystem to alert the driver
4187 * that it should release a PCI device. The could be caused by a
4188 * Hot-Plug event, or because the driver is going to be removed from
4191 static void ixgbevf_remove(struct pci_dev *pdev)
4193 struct net_device *netdev = pci_get_drvdata(pdev);
4194 struct ixgbevf_adapter *adapter;
4200 adapter = netdev_priv(netdev);
4202 set_bit(__IXGBEVF_REMOVING, &adapter->state);
4203 cancel_work_sync(&adapter->service_task);
4205 if (netdev->reg_state == NETREG_REGISTERED)
4206 unregister_netdev(netdev);
4208 ixgbevf_clear_interrupt_scheme(adapter);
4209 ixgbevf_reset_interrupt_capability(adapter);
4211 iounmap(adapter->io_addr);
4212 pci_release_regions(pdev);
4214 hw_dbg(&adapter->hw, "Remove complete\n");
4216 kfree(adapter->rss_key);
4217 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state);
4218 free_netdev(netdev);
4221 pci_disable_device(pdev);
4225 * ixgbevf_io_error_detected - called when PCI error is detected
4226 * @pdev: Pointer to PCI device
4227 * @state: The current pci connection state
4229 * This function is called after a PCI bus error affecting
4230 * this device has been detected.
4232 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
4233 pci_channel_state_t state)
4235 struct net_device *netdev = pci_get_drvdata(pdev);
4236 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4238 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state))
4239 return PCI_ERS_RESULT_DISCONNECT;
4242 netif_device_detach(netdev);
4244 if (state == pci_channel_io_perm_failure) {
4246 return PCI_ERS_RESULT_DISCONNECT;
4249 if (netif_running(netdev))
4250 ixgbevf_close_suspend(adapter);
4252 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
4253 pci_disable_device(pdev);
4256 /* Request a slot slot reset. */
4257 return PCI_ERS_RESULT_NEED_RESET;
4261 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4262 * @pdev: Pointer to PCI device
4264 * Restart the card from scratch, as if from a cold-boot. Implementation
4265 * resembles the first-half of the ixgbevf_resume routine.
4267 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
4269 struct net_device *netdev = pci_get_drvdata(pdev);
4270 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4272 if (pci_enable_device_mem(pdev)) {
4274 "Cannot re-enable PCI device after reset.\n");
4275 return PCI_ERS_RESULT_DISCONNECT;
4278 adapter->hw.hw_addr = adapter->io_addr;
4279 smp_mb__before_atomic();
4280 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
4281 pci_set_master(pdev);
4283 ixgbevf_reset(adapter);
4285 return PCI_ERS_RESULT_RECOVERED;
4289 * ixgbevf_io_resume - called when traffic can start flowing again.
4290 * @pdev: Pointer to PCI device
4292 * This callback is called when the error recovery driver tells us that
4293 * its OK to resume normal operation. Implementation resembles the
4294 * second-half of the ixgbevf_resume routine.
4296 static void ixgbevf_io_resume(struct pci_dev *pdev)
4298 struct net_device *netdev = pci_get_drvdata(pdev);
4301 if (netif_running(netdev))
4302 ixgbevf_open(netdev);
4304 netif_device_attach(netdev);
4308 /* PCI Error Recovery (ERS) */
4309 static const struct pci_error_handlers ixgbevf_err_handler = {
4310 .error_detected = ixgbevf_io_error_detected,
4311 .slot_reset = ixgbevf_io_slot_reset,
4312 .resume = ixgbevf_io_resume,
4315 static struct pci_driver ixgbevf_driver = {
4316 .name = ixgbevf_driver_name,
4317 .id_table = ixgbevf_pci_tbl,
4318 .probe = ixgbevf_probe,
4319 .remove = ixgbevf_remove,
4321 /* Power Management Hooks */
4322 .suspend = ixgbevf_suspend,
4323 .resume = ixgbevf_resume,
4325 .shutdown = ixgbevf_shutdown,
4326 .err_handler = &ixgbevf_err_handler
4330 * ixgbevf_init_module - Driver Registration Routine
4332 * ixgbevf_init_module is the first routine called when the driver is
4333 * loaded. All it does is register with the PCI subsystem.
4335 static int __init ixgbevf_init_module(void)
4337 pr_info("%s - version %s\n", ixgbevf_driver_string,
4338 ixgbevf_driver_version);
4340 pr_info("%s\n", ixgbevf_copyright);
4341 ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
4343 pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
4347 return pci_register_driver(&ixgbevf_driver);
4350 module_init(ixgbevf_init_module);
4353 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4355 * ixgbevf_exit_module is called just before the driver is removed
4358 static void __exit ixgbevf_exit_module(void)
4360 pci_unregister_driver(&ixgbevf_driver);
4362 destroy_workqueue(ixgbevf_wq);
4369 * ixgbevf_get_hw_dev_name - return device name string
4370 * used by hardware layer to print debugging information
4372 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
4374 struct ixgbevf_adapter *adapter = hw->back;
4376 return adapter->netdev->name;
4380 module_exit(ixgbevf_exit_module);
4382 /* ixgbevf_main.c */