1 /* Intel(R) Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2016 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 #include <linux/types.h>
22 #include <linux/module.h>
26 #include <linux/if_macvlan.h>
27 #include <linux/prefetch.h>
31 #define DRV_VERSION "0.21.2-k"
32 #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
33 const char fm10k_driver_version[] = DRV_VERSION;
34 char fm10k_driver_name[] = "fm10k";
35 static const char fm10k_driver_string[] = DRV_SUMMARY;
36 static const char fm10k_copyright[] =
37 "Copyright (c) 2013 - 2016 Intel Corporation.";
39 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
40 MODULE_DESCRIPTION(DRV_SUMMARY);
41 MODULE_LICENSE("GPL");
42 MODULE_VERSION(DRV_VERSION);
44 /* single workqueue for entire fm10k driver */
45 struct workqueue_struct *fm10k_workqueue;
48 * fm10k_init_module - Driver Registration Routine
50 * fm10k_init_module is the first routine called when the driver is
51 * loaded. All it does is register with the PCI subsystem.
53 static int __init fm10k_init_module(void)
55 pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version);
56 pr_info("%s\n", fm10k_copyright);
58 /* create driver workqueue */
59 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
66 return fm10k_register_pci_driver();
68 module_init(fm10k_init_module);
71 * fm10k_exit_module - Driver Exit Cleanup Routine
73 * fm10k_exit_module is called just before the driver is removed
76 static void __exit fm10k_exit_module(void)
78 fm10k_unregister_pci_driver();
82 /* destroy driver workqueue */
83 destroy_workqueue(fm10k_workqueue);
85 module_exit(fm10k_exit_module);
87 static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
88 struct fm10k_rx_buffer *bi)
90 struct page *page = bi->page;
93 /* Only page will be NULL if buffer was consumed */
97 /* alloc new page for storage */
98 page = dev_alloc_page();
99 if (unlikely(!page)) {
100 rx_ring->rx_stats.alloc_failed++;
104 /* map page for use */
105 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
107 /* if mapping failed free memory back to system since
108 * there isn't much point in holding memory we can't use
110 if (dma_mapping_error(rx_ring->dev, dma)) {
113 rx_ring->rx_stats.alloc_failed++;
125 * fm10k_alloc_rx_buffers - Replace used receive buffers
126 * @rx_ring: ring to place buffers on
127 * @cleaned_count: number of buffers to replace
129 void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
131 union fm10k_rx_desc *rx_desc;
132 struct fm10k_rx_buffer *bi;
133 u16 i = rx_ring->next_to_use;
139 rx_desc = FM10K_RX_DESC(rx_ring, i);
140 bi = &rx_ring->rx_buffer[i];
144 if (!fm10k_alloc_mapped_page(rx_ring, bi))
147 /* Refresh the desc even if buffer_addrs didn't change
148 * because each write-back erases this info.
150 rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
156 rx_desc = FM10K_RX_DESC(rx_ring, 0);
157 bi = rx_ring->rx_buffer;
161 /* clear the status bits for the next_to_use descriptor */
162 rx_desc->d.staterr = 0;
165 } while (cleaned_count);
169 if (rx_ring->next_to_use != i) {
170 /* record the next descriptor to use */
171 rx_ring->next_to_use = i;
173 /* update next to alloc since we have filled the ring */
174 rx_ring->next_to_alloc = i;
176 /* Force memory writes to complete before letting h/w
177 * know there are new descriptors to fetch. (Only
178 * applicable for weak-ordered memory model archs,
183 /* notify hardware of new descriptors */
184 writel(i, rx_ring->tail);
189 * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
190 * @rx_ring: rx descriptor ring to store buffers on
191 * @old_buff: donor buffer to have page reused
193 * Synchronizes page for reuse by the interface
195 static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
196 struct fm10k_rx_buffer *old_buff)
198 struct fm10k_rx_buffer *new_buff;
199 u16 nta = rx_ring->next_to_alloc;
201 new_buff = &rx_ring->rx_buffer[nta];
203 /* update, and store next to alloc */
205 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
207 /* transfer page from old buffer to new buffer */
208 *new_buff = *old_buff;
210 /* sync the buffer for use by the device */
211 dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
212 old_buff->page_offset,
217 static inline bool fm10k_page_is_reserved(struct page *page)
219 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
222 static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
224 unsigned int __maybe_unused truesize)
226 /* avoid re-using remote pages */
227 if (unlikely(fm10k_page_is_reserved(page)))
230 #if (PAGE_SIZE < 8192)
231 /* if we are only owner of page we can reuse it */
232 if (unlikely(page_count(page) != 1))
235 /* flip page offset to other buffer */
236 rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
238 /* move offset up to the next cache line */
239 rx_buffer->page_offset += truesize;
241 if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
245 /* Even if we own the page, we are not allowed to use atomic_set()
246 * This would break get_page_unless_zero() users.
254 * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
255 * @rx_buffer: buffer containing page to add
256 * @rx_desc: descriptor containing length of buffer written by hardware
257 * @skb: sk_buff to place the data into
259 * This function will add the data contained in rx_buffer->page to the skb.
260 * This is done either through a direct copy if the data in the buffer is
261 * less than the skb header size, otherwise it will just attach the page as
264 * The function will then update the page offset if necessary and return
265 * true if the buffer can be reused by the interface.
267 static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
268 union fm10k_rx_desc *rx_desc,
271 struct page *page = rx_buffer->page;
272 unsigned char *va = page_address(page) + rx_buffer->page_offset;
273 unsigned int size = le16_to_cpu(rx_desc->w.length);
274 #if (PAGE_SIZE < 8192)
275 unsigned int truesize = FM10K_RX_BUFSZ;
277 unsigned int truesize = ALIGN(size, 512);
279 unsigned int pull_len;
281 if (unlikely(skb_is_nonlinear(skb)))
284 if (likely(size <= FM10K_RX_HDR_LEN)) {
285 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
287 /* page is not reserved, we can reuse buffer as-is */
288 if (likely(!fm10k_page_is_reserved(page)))
291 /* this page cannot be reused so discard it */
296 /* we need the header to contain the greater of either ETH_HLEN or
297 * 60 bytes if the skb->len is less than 60 for skb_pad.
299 pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
301 /* align pull length to size of long to optimize memcpy performance */
302 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
304 /* update all of the pointers */
309 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
310 (unsigned long)va & ~PAGE_MASK, size, truesize);
312 return fm10k_can_reuse_rx_page(rx_buffer, page, truesize);
315 static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
316 union fm10k_rx_desc *rx_desc,
319 struct fm10k_rx_buffer *rx_buffer;
322 rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
323 page = rx_buffer->page;
327 void *page_addr = page_address(page) +
328 rx_buffer->page_offset;
330 /* prefetch first cache line of first page */
332 #if L1_CACHE_BYTES < 128
333 prefetch(page_addr + L1_CACHE_BYTES);
336 /* allocate a skb to store the frags */
337 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
339 if (unlikely(!skb)) {
340 rx_ring->rx_stats.alloc_failed++;
344 /* we will be copying header into skb->data in
345 * pskb_may_pull so it is in our interest to prefetch
346 * it now to avoid a possible cache miss
348 prefetchw(skb->data);
351 /* we are reusing so sync this buffer for CPU use */
352 dma_sync_single_range_for_cpu(rx_ring->dev,
354 rx_buffer->page_offset,
358 /* pull page into skb */
359 if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) {
360 /* hand second half of page back to the ring */
361 fm10k_reuse_rx_page(rx_ring, rx_buffer);
363 /* we are not reusing the buffer so unmap it */
364 dma_unmap_page(rx_ring->dev, rx_buffer->dma,
365 PAGE_SIZE, DMA_FROM_DEVICE);
368 /* clear contents of rx_buffer */
369 rx_buffer->page = NULL;
374 static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
375 union fm10k_rx_desc *rx_desc,
378 skb_checksum_none_assert(skb);
380 /* Rx checksum disabled via ethtool */
381 if (!(ring->netdev->features & NETIF_F_RXCSUM))
384 /* TCP/UDP checksum error bit is set */
385 if (fm10k_test_staterr(rx_desc,
386 FM10K_RXD_STATUS_L4E |
387 FM10K_RXD_STATUS_L4E2 |
388 FM10K_RXD_STATUS_IPE |
389 FM10K_RXD_STATUS_IPE2)) {
390 ring->rx_stats.csum_err++;
394 /* It must be a TCP or UDP packet with a valid checksum */
395 if (fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS2))
396 skb->encapsulation = true;
397 else if (!fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_L4CS))
400 skb->ip_summed = CHECKSUM_UNNECESSARY;
402 ring->rx_stats.csum_good++;
405 #define FM10K_RSS_L4_TYPES_MASK \
406 (BIT(FM10K_RSSTYPE_IPV4_TCP) | \
407 BIT(FM10K_RSSTYPE_IPV4_UDP) | \
408 BIT(FM10K_RSSTYPE_IPV6_TCP) | \
409 BIT(FM10K_RSSTYPE_IPV6_UDP))
411 static inline void fm10k_rx_hash(struct fm10k_ring *ring,
412 union fm10k_rx_desc *rx_desc,
417 if (!(ring->netdev->features & NETIF_F_RXHASH))
420 rss_type = le16_to_cpu(rx_desc->w.pkt_info) & FM10K_RXD_RSSTYPE_MASK;
424 skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
425 (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ?
426 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
429 static void fm10k_type_trans(struct fm10k_ring *rx_ring,
430 union fm10k_rx_desc __maybe_unused *rx_desc,
433 struct net_device *dev = rx_ring->netdev;
434 struct fm10k_l2_accel *l2_accel = rcu_dereference_bh(rx_ring->l2_accel);
436 /* check to see if DGLORT belongs to a MACVLAN */
438 u16 idx = le16_to_cpu(FM10K_CB(skb)->fi.w.dglort) - 1;
440 idx -= l2_accel->dglort;
441 if (idx < l2_accel->size && l2_accel->macvlan[idx])
442 dev = l2_accel->macvlan[idx];
447 skb->protocol = eth_type_trans(skb, dev);
452 /* update MACVLAN statistics */
453 macvlan_count_rx(netdev_priv(dev), skb->len + ETH_HLEN, 1,
454 !!(rx_desc->w.hdr_info &
455 cpu_to_le16(FM10K_RXD_HDR_INFO_XC_MASK)));
459 * fm10k_process_skb_fields - Populate skb header fields from Rx descriptor
460 * @rx_ring: rx descriptor ring packet is being transacted on
461 * @rx_desc: pointer to the EOP Rx descriptor
462 * @skb: pointer to current skb being populated
464 * This function checks the ring, descriptor, and packet information in
465 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
466 * other fields within the skb.
468 static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
469 union fm10k_rx_desc *rx_desc,
472 unsigned int len = skb->len;
474 fm10k_rx_hash(rx_ring, rx_desc, skb);
476 fm10k_rx_checksum(rx_ring, rx_desc, skb);
478 FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
480 skb_record_rx_queue(skb, rx_ring->queue_index);
482 FM10K_CB(skb)->fi.d.glort = rx_desc->d.glort;
484 if (rx_desc->w.vlan) {
485 u16 vid = le16_to_cpu(rx_desc->w.vlan);
487 if ((vid & VLAN_VID_MASK) != rx_ring->vid)
488 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
489 else if (vid & VLAN_PRIO_MASK)
490 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
491 vid & VLAN_PRIO_MASK);
494 fm10k_type_trans(rx_ring, rx_desc, skb);
500 * fm10k_is_non_eop - process handling of non-EOP buffers
501 * @rx_ring: Rx ring being processed
502 * @rx_desc: Rx descriptor for current buffer
504 * This function updates next to clean. If the buffer is an EOP buffer
505 * this function exits returning false, otherwise it will place the
506 * sk_buff in the next buffer to be chained and return true indicating
507 * that this is in fact a non-EOP buffer.
509 static bool fm10k_is_non_eop(struct fm10k_ring *rx_ring,
510 union fm10k_rx_desc *rx_desc)
512 u32 ntc = rx_ring->next_to_clean + 1;
514 /* fetch, update, and store next to clean */
515 ntc = (ntc < rx_ring->count) ? ntc : 0;
516 rx_ring->next_to_clean = ntc;
518 prefetch(FM10K_RX_DESC(rx_ring, ntc));
520 if (likely(fm10k_test_staterr(rx_desc, FM10K_RXD_STATUS_EOP)))
527 * fm10k_cleanup_headers - Correct corrupted or empty headers
528 * @rx_ring: rx descriptor ring packet is being transacted on
529 * @rx_desc: pointer to the EOP Rx descriptor
530 * @skb: pointer to current skb being fixed
532 * Address the case where we are pulling data in on pages only
533 * and as such no data is present in the skb header.
535 * In addition if skb is not at least 60 bytes we need to pad it so that
536 * it is large enough to qualify as a valid Ethernet frame.
538 * Returns true if an error was encountered and skb was freed.
540 static bool fm10k_cleanup_headers(struct fm10k_ring *rx_ring,
541 union fm10k_rx_desc *rx_desc,
544 if (unlikely((fm10k_test_staterr(rx_desc,
545 FM10K_RXD_STATUS_RXE)))) {
546 #define FM10K_TEST_RXD_BIT(rxd, bit) \
547 ((rxd)->w.csum_err & cpu_to_le16(bit))
548 if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_ERROR))
549 rx_ring->rx_stats.switch_errors++;
550 if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_NO_DESCRIPTOR))
551 rx_ring->rx_stats.drops++;
552 if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_PP_ERROR))
553 rx_ring->rx_stats.pp_errors++;
554 if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_SWITCH_READY))
555 rx_ring->rx_stats.link_errors++;
556 if (FM10K_TEST_RXD_BIT(rx_desc, FM10K_RXD_ERR_TOO_BIG))
557 rx_ring->rx_stats.length_errors++;
558 dev_kfree_skb_any(skb);
559 rx_ring->rx_stats.errors++;
563 /* if eth_skb_pad returns an error the skb was freed */
564 if (eth_skb_pad(skb))
571 * fm10k_receive_skb - helper function to handle rx indications
572 * @q_vector: structure containing interrupt and ring information
573 * @skb: packet to send up
575 static void fm10k_receive_skb(struct fm10k_q_vector *q_vector,
578 napi_gro_receive(&q_vector->napi, skb);
581 static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
582 struct fm10k_ring *rx_ring,
585 struct sk_buff *skb = rx_ring->skb;
586 unsigned int total_bytes = 0, total_packets = 0;
587 u16 cleaned_count = fm10k_desc_unused(rx_ring);
589 while (likely(total_packets < budget)) {
590 union fm10k_rx_desc *rx_desc;
592 /* return some buffers to hardware, one at a time is too slow */
593 if (cleaned_count >= FM10K_RX_BUFFER_WRITE) {
594 fm10k_alloc_rx_buffers(rx_ring, cleaned_count);
598 rx_desc = FM10K_RX_DESC(rx_ring, rx_ring->next_to_clean);
600 if (!rx_desc->d.staterr)
603 /* This memory barrier is needed to keep us from reading
604 * any other fields out of the rx_desc until we know the
605 * descriptor has been written back
609 /* retrieve a buffer from the ring */
610 skb = fm10k_fetch_rx_buffer(rx_ring, rx_desc, skb);
612 /* exit if we failed to retrieve a buffer */
618 /* fetch next buffer in frame if non-eop */
619 if (fm10k_is_non_eop(rx_ring, rx_desc))
622 /* verify the packet layout is correct */
623 if (fm10k_cleanup_headers(rx_ring, rx_desc, skb)) {
628 /* populate checksum, timestamp, VLAN, and protocol */
629 total_bytes += fm10k_process_skb_fields(rx_ring, rx_desc, skb);
631 fm10k_receive_skb(q_vector, skb);
633 /* reset skb pointer */
636 /* update budget accounting */
640 /* place incomplete frames back on ring for completion */
643 u64_stats_update_begin(&rx_ring->syncp);
644 rx_ring->stats.packets += total_packets;
645 rx_ring->stats.bytes += total_bytes;
646 u64_stats_update_end(&rx_ring->syncp);
647 q_vector->rx.total_packets += total_packets;
648 q_vector->rx.total_bytes += total_bytes;
650 return total_packets;
653 #define VXLAN_HLEN (sizeof(struct udphdr) + 8)
654 static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
656 struct fm10k_intfc *interface = netdev_priv(skb->dev);
657 struct fm10k_udp_port *vxlan_port;
659 /* we can only offload a vxlan if we recognize it as such */
660 vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
661 struct fm10k_udp_port, list);
665 if (vxlan_port->port != udp_hdr(skb)->dest)
668 /* return offset of udp_hdr plus 8 bytes for VXLAN header */
669 return (struct ethhdr *)(skb_transport_header(skb) + VXLAN_HLEN);
672 #define FM10K_NVGRE_RESERVED0_FLAGS htons(0x9FFF)
673 #define NVGRE_TNI htons(0x2000)
674 struct fm10k_nvgre_hdr {
680 static struct ethhdr *fm10k_gre_is_nvgre(struct sk_buff *skb)
682 struct fm10k_nvgre_hdr *nvgre_hdr;
683 int hlen = ip_hdrlen(skb);
685 /* currently only IPv4 is supported due to hlen above */
686 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
689 /* our transport header should be NVGRE */
690 nvgre_hdr = (struct fm10k_nvgre_hdr *)(skb_network_header(skb) + hlen);
692 /* verify all reserved flags are 0 */
693 if (nvgre_hdr->flags & FM10K_NVGRE_RESERVED0_FLAGS)
696 /* report start of ethernet header */
697 if (nvgre_hdr->flags & NVGRE_TNI)
698 return (struct ethhdr *)(nvgre_hdr + 1);
700 return (struct ethhdr *)(&nvgre_hdr->tni);
703 __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
705 u8 l4_hdr = 0, inner_l4_hdr = 0, inner_l4_hlen;
706 struct ethhdr *eth_hdr;
708 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
709 skb->inner_protocol != htons(ETH_P_TEB))
712 switch (vlan_get_protocol(skb)) {
713 case htons(ETH_P_IP):
714 l4_hdr = ip_hdr(skb)->protocol;
716 case htons(ETH_P_IPV6):
717 l4_hdr = ipv6_hdr(skb)->nexthdr;
725 eth_hdr = fm10k_port_is_vxlan(skb);
728 eth_hdr = fm10k_gre_is_nvgre(skb);
737 switch (eth_hdr->h_proto) {
738 case htons(ETH_P_IP):
739 inner_l4_hdr = inner_ip_hdr(skb)->protocol;
741 case htons(ETH_P_IPV6):
742 inner_l4_hdr = inner_ipv6_hdr(skb)->nexthdr;
748 switch (inner_l4_hdr) {
750 inner_l4_hlen = inner_tcp_hdrlen(skb);
759 /* The hardware allows tunnel offloads only if the combined inner and
760 * outer header is 184 bytes or less
762 if (skb_inner_transport_header(skb) + inner_l4_hlen -
763 skb_mac_header(skb) > FM10K_TUNNEL_HEADER_LENGTH)
766 return eth_hdr->h_proto;
769 static int fm10k_tso(struct fm10k_ring *tx_ring,
770 struct fm10k_tx_buffer *first)
772 struct sk_buff *skb = first->skb;
773 struct fm10k_tx_desc *tx_desc;
777 if (skb->ip_summed != CHECKSUM_PARTIAL)
780 if (!skb_is_gso(skb))
783 /* compute header lengths */
784 if (skb->encapsulation) {
785 if (!fm10k_tx_encap_offload(skb))
787 th = skb_inner_transport_header(skb);
789 th = skb_transport_header(skb);
792 /* compute offset from SOF to transport header and add header len */
793 hdrlen = (th - skb->data) + (((struct tcphdr *)th)->doff << 2);
795 first->tx_flags |= FM10K_TX_FLAGS_CSUM;
797 /* update gso size and bytecount with header size */
798 first->gso_segs = skb_shinfo(skb)->gso_segs;
799 first->bytecount += (first->gso_segs - 1) * hdrlen;
801 /* populate Tx descriptor header size and mss */
802 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
803 tx_desc->hdrlen = hdrlen;
804 tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
808 tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
809 if (!net_ratelimit())
810 netdev_err(tx_ring->netdev,
811 "TSO requested for unsupported tunnel, disabling offload\n");
815 static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
816 struct fm10k_tx_buffer *first)
818 struct sk_buff *skb = first->skb;
819 struct fm10k_tx_desc *tx_desc;
822 struct ipv6hdr *ipv6;
830 if (skb->ip_summed != CHECKSUM_PARTIAL)
833 if (skb->encapsulation) {
834 protocol = fm10k_tx_encap_offload(skb);
836 if (skb_checksum_help(skb)) {
837 dev_warn(tx_ring->dev,
838 "failed to offload encap csum!\n");
839 tx_ring->tx_stats.csum_err++;
843 network_hdr.raw = skb_inner_network_header(skb);
844 transport_hdr = skb_inner_transport_header(skb);
846 protocol = vlan_get_protocol(skb);
847 network_hdr.raw = skb_network_header(skb);
848 transport_hdr = skb_transport_header(skb);
852 case htons(ETH_P_IP):
853 l4_hdr = network_hdr.ipv4->protocol;
855 case htons(ETH_P_IPV6):
856 l4_hdr = network_hdr.ipv6->nexthdr;
857 if (likely((transport_hdr - network_hdr.raw) ==
858 sizeof(struct ipv6hdr)))
860 ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
861 sizeof(struct ipv6hdr),
863 if (unlikely(frag_off))
864 l4_hdr = NEXTHDR_FRAGMENT;
875 if (skb->encapsulation)
878 if (unlikely(net_ratelimit())) {
879 dev_warn(tx_ring->dev,
880 "partial checksum, version=%d l4 proto=%x\n",
883 skb_checksum_help(skb);
884 tx_ring->tx_stats.csum_err++;
888 /* update TX checksum flag */
889 first->tx_flags |= FM10K_TX_FLAGS_CSUM;
890 tx_ring->tx_stats.csum_good++;
893 /* populate Tx descriptor header size and mss */
894 tx_desc = FM10K_TX_DESC(tx_ring, tx_ring->next_to_use);
899 #define FM10K_SET_FLAG(_input, _flag, _result) \
900 ((_flag <= _result) ? \
901 ((u32)(_input & _flag) * (_result / _flag)) : \
902 ((u32)(_input & _flag) / (_flag / _result)))
904 static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
906 /* set type for advanced descriptor with frame checksum insertion */
909 /* set checksum offload bits */
910 desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
911 FM10K_TXD_FLAG_CSUM);
916 static bool fm10k_tx_desc_push(struct fm10k_ring *tx_ring,
917 struct fm10k_tx_desc *tx_desc, u16 i,
918 dma_addr_t dma, unsigned int size, u8 desc_flags)
920 /* set RS and INT for last frame in a cache line */
921 if ((++i & (FM10K_TXD_WB_FIFO_SIZE - 1)) == 0)
922 desc_flags |= FM10K_TXD_FLAG_RS | FM10K_TXD_FLAG_INT;
924 /* record values to descriptor */
925 tx_desc->buffer_addr = cpu_to_le64(dma);
926 tx_desc->flags = desc_flags;
927 tx_desc->buflen = cpu_to_le16(size);
929 /* return true if we just wrapped the ring */
930 return i == tx_ring->count;
933 static int __fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
935 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
937 /* Memory barrier before checking head and tail */
940 /* Check again in a case another CPU has just made room available */
941 if (likely(fm10k_desc_unused(tx_ring) < size))
944 /* A reprieve! - use start_queue because it doesn't call schedule */
945 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
946 ++tx_ring->tx_stats.restart_queue;
950 static inline int fm10k_maybe_stop_tx(struct fm10k_ring *tx_ring, u16 size)
952 if (likely(fm10k_desc_unused(tx_ring) >= size))
954 return __fm10k_maybe_stop_tx(tx_ring, size);
957 static void fm10k_tx_map(struct fm10k_ring *tx_ring,
958 struct fm10k_tx_buffer *first)
960 struct sk_buff *skb = first->skb;
961 struct fm10k_tx_buffer *tx_buffer;
962 struct fm10k_tx_desc *tx_desc;
963 struct skb_frag_struct *frag;
966 unsigned int data_len, size;
967 u32 tx_flags = first->tx_flags;
968 u16 i = tx_ring->next_to_use;
969 u8 flags = fm10k_tx_desc_flags(skb, tx_flags);
971 tx_desc = FM10K_TX_DESC(tx_ring, i);
973 /* add HW VLAN tag */
974 if (skb_vlan_tag_present(skb))
975 tx_desc->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
979 size = skb_headlen(skb);
982 dma = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
984 data_len = skb->data_len;
987 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
988 if (dma_mapping_error(tx_ring->dev, dma))
991 /* record length, and DMA address */
992 dma_unmap_len_set(tx_buffer, len, size);
993 dma_unmap_addr_set(tx_buffer, dma, dma);
995 while (unlikely(size > FM10K_MAX_DATA_PER_TXD)) {
996 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++, dma,
997 FM10K_MAX_DATA_PER_TXD, flags)) {
998 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1002 dma += FM10K_MAX_DATA_PER_TXD;
1003 size -= FM10K_MAX_DATA_PER_TXD;
1006 if (likely(!data_len))
1009 if (fm10k_tx_desc_push(tx_ring, tx_desc++, i++,
1010 dma, size, flags)) {
1011 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1015 size = skb_frag_size(frag);
1018 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1021 tx_buffer = &tx_ring->tx_buffer[i];
1024 /* write last descriptor with LAST bit set */
1025 flags |= FM10K_TXD_FLAG_LAST;
1027 if (fm10k_tx_desc_push(tx_ring, tx_desc, i++, dma, size, flags))
1030 /* record bytecount for BQL */
1031 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1033 /* record SW timestamp if HW timestamp is not available */
1034 skb_tx_timestamp(first->skb);
1036 /* Force memory writes to complete before letting h/w know there
1037 * are new descriptors to fetch. (Only applicable for weak-ordered
1038 * memory model archs, such as IA-64).
1040 * We also need this memory barrier to make certain all of the
1041 * status bits have been updated before next_to_watch is written.
1045 /* set next_to_watch value indicating a packet is present */
1046 first->next_to_watch = tx_desc;
1048 tx_ring->next_to_use = i;
1050 /* Make sure there is space in the ring for the next send. */
1051 fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
1053 /* notify HW of packet */
1054 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
1055 writel(i, tx_ring->tail);
1057 /* we need this if more than one processor can write to our tail
1058 * at a time, it synchronizes IO on IA64/Altix systems
1065 dev_err(tx_ring->dev, "TX DMA map failed\n");
1067 /* clear dma mappings for failed tx_buffer map */
1069 tx_buffer = &tx_ring->tx_buffer[i];
1070 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1071 if (tx_buffer == first)
1078 tx_ring->next_to_use = i;
1081 netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
1082 struct fm10k_ring *tx_ring)
1084 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1085 struct fm10k_tx_buffer *first;
1090 /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
1091 * + 1 desc for skb_headlen/FM10K_MAX_DATA_PER_TXD,
1092 * + 2 desc gap to keep tail from touching head
1093 * otherwise try next time
1095 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1096 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
1098 if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
1099 tx_ring->tx_stats.tx_busy++;
1100 return NETDEV_TX_BUSY;
1103 /* record the location of the first descriptor for this packet */
1104 first = &tx_ring->tx_buffer[tx_ring->next_to_use];
1106 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
1107 first->gso_segs = 1;
1109 /* record initial flags and protocol */
1110 first->tx_flags = tx_flags;
1112 tso = fm10k_tso(tx_ring, first);
1116 fm10k_tx_csum(tx_ring, first);
1118 fm10k_tx_map(tx_ring, first);
1120 return NETDEV_TX_OK;
1123 dev_kfree_skb_any(first->skb);
1126 return NETDEV_TX_OK;
1129 static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
1131 return ring->stats.packets;
1135 * fm10k_get_tx_pending - how many Tx descriptors not processed
1136 * @ring: the ring structure
1137 * @in_sw: is tx_pending being checked in SW or in HW?
1139 u64 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw)
1141 struct fm10k_intfc *interface = ring->q_vector->interface;
1142 struct fm10k_hw *hw = &interface->hw;
1145 if (likely(in_sw)) {
1146 head = ring->next_to_clean;
1147 tail = ring->next_to_use;
1149 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
1150 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
1153 return ((head <= tail) ? tail : tail + ring->count) - head;
1156 bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring)
1158 u32 tx_done = fm10k_get_tx_completed(tx_ring);
1159 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
1160 u32 tx_pending = fm10k_get_tx_pending(tx_ring, true);
1162 clear_check_for_tx_hang(tx_ring);
1164 /* Check for a hung queue, but be thorough. This verifies
1165 * that a transmit has been completed since the previous
1166 * check AND there is at least one packet pending. By
1167 * requiring this to fail twice we avoid races with
1168 * clearing the ARMED bit and conditions where we
1169 * run the check_tx_hang logic with a transmit completion
1170 * pending but without time to complete it yet.
1172 if (!tx_pending || (tx_done_old != tx_done)) {
1173 /* update completed stats and continue */
1174 tx_ring->tx_stats.tx_done_old = tx_done;
1175 /* reset the countdown */
1176 clear_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
1181 /* make sure it is true for two checks in a row */
1182 return test_and_set_bit(__FM10K_HANG_CHECK_ARMED, &tx_ring->state);
1186 * fm10k_tx_timeout_reset - initiate reset due to Tx timeout
1187 * @interface: driver private struct
1189 void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
1191 /* Do the reset outside of interrupt context */
1192 if (!test_bit(__FM10K_DOWN, &interface->state)) {
1193 interface->tx_timeout_count++;
1194 interface->flags |= FM10K_FLAG_RESET_REQUESTED;
1195 fm10k_service_event_schedule(interface);
1200 * fm10k_clean_tx_irq - Reclaim resources after transmit completes
1201 * @q_vector: structure containing interrupt and ring information
1202 * @tx_ring: tx ring to clean
1203 * @napi_budget: Used to determine if we are in netpoll
1205 static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
1206 struct fm10k_ring *tx_ring, int napi_budget)
1208 struct fm10k_intfc *interface = q_vector->interface;
1209 struct fm10k_tx_buffer *tx_buffer;
1210 struct fm10k_tx_desc *tx_desc;
1211 unsigned int total_bytes = 0, total_packets = 0;
1212 unsigned int budget = q_vector->tx.work_limit;
1213 unsigned int i = tx_ring->next_to_clean;
1215 if (test_bit(__FM10K_DOWN, &interface->state))
1218 tx_buffer = &tx_ring->tx_buffer[i];
1219 tx_desc = FM10K_TX_DESC(tx_ring, i);
1220 i -= tx_ring->count;
1223 struct fm10k_tx_desc *eop_desc = tx_buffer->next_to_watch;
1225 /* if next_to_watch is not set then there is no work pending */
1229 /* prevent any other reads prior to eop_desc */
1232 /* if DD is not set pending work has not been completed */
1233 if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
1236 /* clear next_to_watch to prevent false hangs */
1237 tx_buffer->next_to_watch = NULL;
1239 /* update the statistics for this packet */
1240 total_bytes += tx_buffer->bytecount;
1241 total_packets += tx_buffer->gso_segs;
1244 napi_consume_skb(tx_buffer->skb, napi_budget);
1246 /* unmap skb header data */
1247 dma_unmap_single(tx_ring->dev,
1248 dma_unmap_addr(tx_buffer, dma),
1249 dma_unmap_len(tx_buffer, len),
1252 /* clear tx_buffer data */
1253 tx_buffer->skb = NULL;
1254 dma_unmap_len_set(tx_buffer, len, 0);
1256 /* unmap remaining buffers */
1257 while (tx_desc != eop_desc) {
1262 i -= tx_ring->count;
1263 tx_buffer = tx_ring->tx_buffer;
1264 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1267 /* unmap any remaining paged data */
1268 if (dma_unmap_len(tx_buffer, len)) {
1269 dma_unmap_page(tx_ring->dev,
1270 dma_unmap_addr(tx_buffer, dma),
1271 dma_unmap_len(tx_buffer, len),
1273 dma_unmap_len_set(tx_buffer, len, 0);
1277 /* move us one more past the eop_desc for start of next pkt */
1282 i -= tx_ring->count;
1283 tx_buffer = tx_ring->tx_buffer;
1284 tx_desc = FM10K_TX_DESC(tx_ring, 0);
1287 /* issue prefetch for next Tx descriptor */
1290 /* update budget accounting */
1292 } while (likely(budget));
1294 i += tx_ring->count;
1295 tx_ring->next_to_clean = i;
1296 u64_stats_update_begin(&tx_ring->syncp);
1297 tx_ring->stats.bytes += total_bytes;
1298 tx_ring->stats.packets += total_packets;
1299 u64_stats_update_end(&tx_ring->syncp);
1300 q_vector->tx.total_bytes += total_bytes;
1301 q_vector->tx.total_packets += total_packets;
1303 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring)) {
1304 /* schedule immediate reset if we believe we hung */
1305 struct fm10k_hw *hw = &interface->hw;
1307 netif_err(interface, drv, tx_ring->netdev,
1308 "Detected Tx Unit Hang\n"
1310 " TDH, TDT <%x>, <%x>\n"
1311 " next_to_use <%x>\n"
1312 " next_to_clean <%x>\n",
1313 tx_ring->queue_index,
1314 fm10k_read_reg(hw, FM10K_TDH(tx_ring->reg_idx)),
1315 fm10k_read_reg(hw, FM10K_TDT(tx_ring->reg_idx)),
1316 tx_ring->next_to_use, i);
1318 netif_stop_subqueue(tx_ring->netdev,
1319 tx_ring->queue_index);
1321 netif_info(interface, probe, tx_ring->netdev,
1322 "tx hang %d detected on queue %d, resetting interface\n",
1323 interface->tx_timeout_count + 1,
1324 tx_ring->queue_index);
1326 fm10k_tx_timeout_reset(interface);
1328 /* the netdev is about to reset, no point in enabling stuff */
1332 /* notify netdev of completed buffers */
1333 netdev_tx_completed_queue(txring_txq(tx_ring),
1334 total_packets, total_bytes);
1336 #define TX_WAKE_THRESHOLD min_t(u16, FM10K_MIN_TXD - 1, DESC_NEEDED * 2)
1337 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1338 (fm10k_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
1339 /* Make sure that anybody stopping the queue after this
1340 * sees the new next_to_clean.
1343 if (__netif_subqueue_stopped(tx_ring->netdev,
1344 tx_ring->queue_index) &&
1345 !test_bit(__FM10K_DOWN, &interface->state)) {
1346 netif_wake_subqueue(tx_ring->netdev,
1347 tx_ring->queue_index);
1348 ++tx_ring->tx_stats.restart_queue;
1356 * fm10k_update_itr - update the dynamic ITR value based on packet size
1358 * Stores a new ITR value based on strictly on packet size. The
1359 * divisors and thresholds used by this function were determined based
1360 * on theoretical maximum wire speed and testing data, in order to
1361 * minimize response time while increasing bulk throughput.
1363 * @ring_container: Container for rings to have ITR updated
1365 static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
1367 unsigned int avg_wire_size, packets, itr_round;
1369 /* Only update ITR if we are using adaptive setting */
1370 if (!ITR_IS_ADAPTIVE(ring_container->itr))
1373 packets = ring_container->total_packets;
1377 avg_wire_size = ring_container->total_bytes / packets;
1379 /* The following is a crude approximation of:
1380 * wmem_default / (size + overhead) = desired_pkts_per_int
1381 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1382 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1384 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1385 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1388 * (34 * (size + 24)) / (size + 640) = ITR
1390 * We first do some math on the packet size and then finally bitshift
1391 * by 8 after rounding up. We also have to account for PCIe link speed
1392 * difference as ITR scales based on this.
1394 if (avg_wire_size <= 360) {
1395 /* Start at 250K ints/sec and gradually drop to 77K ints/sec */
1397 avg_wire_size += 376;
1398 } else if (avg_wire_size <= 1152) {
1399 /* 77K ints/sec to 45K ints/sec */
1401 avg_wire_size += 2176;
1402 } else if (avg_wire_size <= 1920) {
1403 /* 45K ints/sec to 38K ints/sec */
1404 avg_wire_size += 4480;
1406 /* plateau at a limit of 38K ints/sec */
1407 avg_wire_size = 6656;
1410 /* Perform final bitshift for division after rounding up to ensure
1411 * that the calculation will never get below a 1. The bit shift
1412 * accounts for changes in the ITR due to PCIe link speed.
1414 itr_round = READ_ONCE(ring_container->itr_scale) + 8;
1415 avg_wire_size += BIT(itr_round) - 1;
1416 avg_wire_size >>= itr_round;
1418 /* write back value and retain adaptive flag */
1419 ring_container->itr = avg_wire_size | FM10K_ITR_ADAPTIVE;
1422 ring_container->total_bytes = 0;
1423 ring_container->total_packets = 0;
1426 static void fm10k_qv_enable(struct fm10k_q_vector *q_vector)
1428 /* Enable auto-mask and clear the current mask */
1429 u32 itr = FM10K_ITR_ENABLE;
1432 fm10k_update_itr(&q_vector->tx);
1435 fm10k_update_itr(&q_vector->rx);
1437 /* Store Tx itr in timer slot 0 */
1438 itr |= (q_vector->tx.itr & FM10K_ITR_MAX);
1440 /* Shift Rx itr to timer slot 1 */
1441 itr |= (q_vector->rx.itr & FM10K_ITR_MAX) << FM10K_ITR_INTERVAL1_SHIFT;
1443 /* Write the final value to the ITR register */
1444 writel(itr, q_vector->itr);
1447 static int fm10k_poll(struct napi_struct *napi, int budget)
1449 struct fm10k_q_vector *q_vector =
1450 container_of(napi, struct fm10k_q_vector, napi);
1451 struct fm10k_ring *ring;
1452 int per_ring_budget, work_done = 0;
1453 bool clean_complete = true;
1455 fm10k_for_each_ring(ring, q_vector->tx) {
1456 if (!fm10k_clean_tx_irq(q_vector, ring, budget))
1457 clean_complete = false;
1460 /* Handle case where we are called by netpoll with a budget of 0 */
1464 /* attempt to distribute budget to each queue fairly, but don't
1465 * allow the budget to go below 1 because we'll exit polling
1467 if (q_vector->rx.count > 1)
1468 per_ring_budget = max(budget / q_vector->rx.count, 1);
1470 per_ring_budget = budget;
1472 fm10k_for_each_ring(ring, q_vector->rx) {
1473 int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
1476 if (work >= per_ring_budget)
1477 clean_complete = false;
1480 /* If all work not completed, return budget and keep polling */
1481 if (!clean_complete)
1484 /* all work done, exit the polling mode */
1485 napi_complete_done(napi, work_done);
1487 /* re-enable the q_vector */
1488 fm10k_qv_enable(q_vector);
1490 return min(work_done, budget - 1);
1494 * fm10k_set_qos_queues: Allocate queues for a QOS-enabled device
1495 * @interface: board private structure to initialize
1497 * When QoS (Quality of Service) is enabled, allocate queues for
1498 * each traffic class. If multiqueue isn't available,then abort QoS
1501 * This function handles all combinations of Qos and RSS.
1504 static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
1506 struct net_device *dev = interface->netdev;
1507 struct fm10k_ring_feature *f;
1511 /* Map queue offset and counts onto allocated tx queues */
1512 pcs = netdev_get_num_tc(dev);
1517 /* set QoS mask and indices */
1518 f = &interface->ring_feature[RING_F_QOS];
1520 f->mask = BIT(fls(pcs - 1)) - 1;
1522 /* determine the upper limit for our current DCB mode */
1523 rss_i = interface->hw.mac.max_queues / pcs;
1524 rss_i = BIT(fls(rss_i) - 1);
1526 /* set RSS mask and indices */
1527 f = &interface->ring_feature[RING_F_RSS];
1528 rss_i = min_t(u16, rss_i, f->limit);
1530 f->mask = BIT(fls(rss_i - 1)) - 1;
1532 /* configure pause class to queue mapping */
1533 for (i = 0; i < pcs; i++)
1534 netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
1536 interface->num_rx_queues = rss_i * pcs;
1537 interface->num_tx_queues = rss_i * pcs;
1543 * fm10k_set_rss_queues: Allocate queues for RSS
1544 * @interface: board private structure to initialize
1546 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
1547 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
1550 static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
1552 struct fm10k_ring_feature *f;
1555 f = &interface->ring_feature[RING_F_RSS];
1556 rss_i = min_t(u16, interface->hw.mac.max_queues, f->limit);
1558 /* record indices and power of 2 mask for RSS */
1560 f->mask = BIT(fls(rss_i - 1)) - 1;
1562 interface->num_rx_queues = rss_i;
1563 interface->num_tx_queues = rss_i;
1569 * fm10k_set_num_queues: Allocate queues for device, feature dependent
1570 * @interface: board private structure to initialize
1572 * This is the top level queue allocation routine. The order here is very
1573 * important, starting with the "most" number of features turned on at once,
1574 * and ending with the smallest set of features. This way large combinations
1575 * can be allocated if they're turned on, and smaller combinations are the
1576 * fallthrough conditions.
1579 static void fm10k_set_num_queues(struct fm10k_intfc *interface)
1581 /* Attempt to setup QoS and RSS first */
1582 if (fm10k_set_qos_queues(interface))
1585 /* If we don't have QoS, just fallback to only RSS. */
1586 fm10k_set_rss_queues(interface);
1590 * fm10k_reset_num_queues - Reset the number of queues to zero
1591 * @interface: board private structure
1593 * This function should be called whenever we need to reset the number of
1594 * queues after an error condition.
1596 static void fm10k_reset_num_queues(struct fm10k_intfc *interface)
1598 interface->num_tx_queues = 0;
1599 interface->num_rx_queues = 0;
1600 interface->num_q_vectors = 0;
1604 * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
1605 * @interface: board private structure to initialize
1606 * @v_count: q_vectors allocated on interface, used for ring interleaving
1607 * @v_idx: index of vector in interface struct
1608 * @txr_count: total number of Tx rings to allocate
1609 * @txr_idx: index of first Tx ring to allocate
1610 * @rxr_count: total number of Rx rings to allocate
1611 * @rxr_idx: index of first Rx ring to allocate
1613 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1615 static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
1616 unsigned int v_count, unsigned int v_idx,
1617 unsigned int txr_count, unsigned int txr_idx,
1618 unsigned int rxr_count, unsigned int rxr_idx)
1620 struct fm10k_q_vector *q_vector;
1621 struct fm10k_ring *ring;
1622 int ring_count, size;
1624 ring_count = txr_count + rxr_count;
1625 size = sizeof(struct fm10k_q_vector) +
1626 (sizeof(struct fm10k_ring) * ring_count);
1628 /* allocate q_vector and rings */
1629 q_vector = kzalloc(size, GFP_KERNEL);
1633 /* initialize NAPI */
1634 netif_napi_add(interface->netdev, &q_vector->napi,
1635 fm10k_poll, NAPI_POLL_WEIGHT);
1637 /* tie q_vector and interface together */
1638 interface->q_vector[v_idx] = q_vector;
1639 q_vector->interface = interface;
1640 q_vector->v_idx = v_idx;
1642 /* initialize pointer to rings */
1643 ring = q_vector->ring;
1645 /* save Tx ring container info */
1646 q_vector->tx.ring = ring;
1647 q_vector->tx.work_limit = FM10K_DEFAULT_TX_WORK;
1648 q_vector->tx.itr = interface->tx_itr;
1649 q_vector->tx.itr_scale = interface->hw.mac.itr_scale;
1650 q_vector->tx.count = txr_count;
1653 /* assign generic ring traits */
1654 ring->dev = &interface->pdev->dev;
1655 ring->netdev = interface->netdev;
1657 /* configure backlink on ring */
1658 ring->q_vector = q_vector;
1660 /* apply Tx specific ring traits */
1661 ring->count = interface->tx_ring_count;
1662 ring->queue_index = txr_idx;
1664 /* assign ring to interface */
1665 interface->tx_ring[txr_idx] = ring;
1667 /* update count and index */
1671 /* push pointer to next ring */
1675 /* save Rx ring container info */
1676 q_vector->rx.ring = ring;
1677 q_vector->rx.itr = interface->rx_itr;
1678 q_vector->rx.itr_scale = interface->hw.mac.itr_scale;
1679 q_vector->rx.count = rxr_count;
1682 /* assign generic ring traits */
1683 ring->dev = &interface->pdev->dev;
1684 ring->netdev = interface->netdev;
1685 rcu_assign_pointer(ring->l2_accel, interface->l2_accel);
1687 /* configure backlink on ring */
1688 ring->q_vector = q_vector;
1690 /* apply Rx specific ring traits */
1691 ring->count = interface->rx_ring_count;
1692 ring->queue_index = rxr_idx;
1694 /* assign ring to interface */
1695 interface->rx_ring[rxr_idx] = ring;
1697 /* update count and index */
1701 /* push pointer to next ring */
1705 fm10k_dbg_q_vector_init(q_vector);
1711 * fm10k_free_q_vector - Free memory allocated for specific interrupt vector
1712 * @interface: board private structure to initialize
1713 * @v_idx: Index of vector to be freed
1715 * This function frees the memory allocated to the q_vector. In addition if
1716 * NAPI is enabled it will delete any references to the NAPI struct prior
1717 * to freeing the q_vector.
1719 static void fm10k_free_q_vector(struct fm10k_intfc *interface, int v_idx)
1721 struct fm10k_q_vector *q_vector = interface->q_vector[v_idx];
1722 struct fm10k_ring *ring;
1724 fm10k_dbg_q_vector_exit(q_vector);
1726 fm10k_for_each_ring(ring, q_vector->tx)
1727 interface->tx_ring[ring->queue_index] = NULL;
1729 fm10k_for_each_ring(ring, q_vector->rx)
1730 interface->rx_ring[ring->queue_index] = NULL;
1732 interface->q_vector[v_idx] = NULL;
1733 netif_napi_del(&q_vector->napi);
1734 kfree_rcu(q_vector, rcu);
1738 * fm10k_alloc_q_vectors - Allocate memory for interrupt vectors
1739 * @interface: board private structure to initialize
1741 * We allocate one q_vector per queue interrupt. If allocation fails we
1744 static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
1746 unsigned int q_vectors = interface->num_q_vectors;
1747 unsigned int rxr_remaining = interface->num_rx_queues;
1748 unsigned int txr_remaining = interface->num_tx_queues;
1749 unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1752 if (q_vectors >= (rxr_remaining + txr_remaining)) {
1753 for (; rxr_remaining; v_idx++) {
1754 err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
1759 /* update counts and index */
1765 for (; v_idx < q_vectors; v_idx++) {
1766 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1767 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1769 err = fm10k_alloc_q_vector(interface, q_vectors, v_idx,
1776 /* update counts and index */
1777 rxr_remaining -= rqpv;
1778 txr_remaining -= tqpv;
1786 fm10k_reset_num_queues(interface);
1789 fm10k_free_q_vector(interface, v_idx);
1795 * fm10k_free_q_vectors - Free memory allocated for interrupt vectors
1796 * @interface: board private structure to initialize
1798 * This function frees the memory allocated to the q_vectors. In addition if
1799 * NAPI is enabled it will delete any references to the NAPI struct prior
1800 * to freeing the q_vector.
1802 static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
1804 int v_idx = interface->num_q_vectors;
1806 fm10k_reset_num_queues(interface);
1809 fm10k_free_q_vector(interface, v_idx);
1813 * f10k_reset_msix_capability - reset MSI-X capability
1814 * @interface: board private structure to initialize
1816 * Reset the MSI-X capability back to its starting state
1818 static void fm10k_reset_msix_capability(struct fm10k_intfc *interface)
1820 pci_disable_msix(interface->pdev);
1821 kfree(interface->msix_entries);
1822 interface->msix_entries = NULL;
1826 * f10k_init_msix_capability - configure MSI-X capability
1827 * @interface: board private structure to initialize
1829 * Attempt to configure the interrupts using the best available
1830 * capabilities of the hardware and the kernel.
1832 static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
1834 struct fm10k_hw *hw = &interface->hw;
1835 int v_budget, vector;
1837 /* It's easy to be greedy for MSI-X vectors, but it really
1838 * doesn't do us much good if we have a lot more vectors
1839 * than CPU's. So let's be conservative and only ask for
1840 * (roughly) the same number of vectors as there are CPU's.
1841 * the default is to use pairs of vectors
1843 v_budget = max(interface->num_rx_queues, interface->num_tx_queues);
1844 v_budget = min_t(u16, v_budget, num_online_cpus());
1846 /* account for vectors not related to queues */
1847 v_budget += NON_Q_VECTORS(hw);
1849 /* At the same time, hardware can only support a maximum of
1850 * hw.mac->max_msix_vectors vectors. With features
1851 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
1852 * descriptor queues supported by our device. Thus, we cap it off in
1853 * those rare cases where the cpu count also exceeds our vector limit.
1855 v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
1857 /* A failure in MSI-X entry allocation is fatal. */
1858 interface->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
1860 if (!interface->msix_entries)
1863 /* populate entry values */
1864 for (vector = 0; vector < v_budget; vector++)
1865 interface->msix_entries[vector].entry = vector;
1867 /* Attempt to enable MSI-X with requested value */
1868 v_budget = pci_enable_msix_range(interface->pdev,
1869 interface->msix_entries,
1873 kfree(interface->msix_entries);
1874 interface->msix_entries = NULL;
1878 /* record the number of queues available for q_vectors */
1879 interface->num_q_vectors = v_budget - NON_Q_VECTORS(hw);
1885 * fm10k_cache_ring_qos - Descriptor ring to register mapping for QoS
1886 * @interface: Interface structure continaining rings and devices
1888 * Cache the descriptor ring offsets for Qos
1890 static bool fm10k_cache_ring_qos(struct fm10k_intfc *interface)
1892 struct net_device *dev = interface->netdev;
1893 int pc, offset, rss_i, i, q_idx;
1894 u16 pc_stride = interface->ring_feature[RING_F_QOS].mask + 1;
1895 u8 num_pcs = netdev_get_num_tc(dev);
1900 rss_i = interface->ring_feature[RING_F_RSS].indices;
1902 for (pc = 0, offset = 0; pc < num_pcs; pc++, offset += rss_i) {
1904 for (i = 0; i < rss_i; i++) {
1905 interface->tx_ring[offset + i]->reg_idx = q_idx;
1906 interface->tx_ring[offset + i]->qos_pc = pc;
1907 interface->rx_ring[offset + i]->reg_idx = q_idx;
1908 interface->rx_ring[offset + i]->qos_pc = pc;
1917 * fm10k_cache_ring_rss - Descriptor ring to register mapping for RSS
1918 * @interface: Interface structure continaining rings and devices
1920 * Cache the descriptor ring offsets for RSS
1922 static void fm10k_cache_ring_rss(struct fm10k_intfc *interface)
1926 for (i = 0; i < interface->num_rx_queues; i++)
1927 interface->rx_ring[i]->reg_idx = i;
1929 for (i = 0; i < interface->num_tx_queues; i++)
1930 interface->tx_ring[i]->reg_idx = i;
1934 * fm10k_assign_rings - Map rings to network devices
1935 * @interface: Interface structure containing rings and devices
1937 * This function is meant to go though and configure both the network
1938 * devices so that they contain rings, and configure the rings so that
1939 * they function with their network devices.
1941 static void fm10k_assign_rings(struct fm10k_intfc *interface)
1943 if (fm10k_cache_ring_qos(interface))
1946 fm10k_cache_ring_rss(interface);
1949 static void fm10k_init_reta(struct fm10k_intfc *interface)
1951 u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
1954 /* If the Rx flow indirection table has been configured manually, we
1955 * need to maintain it when possible.
1957 if (netif_is_rxfh_configured(interface->netdev)) {
1958 for (i = FM10K_RETA_SIZE; i--;) {
1959 reta = interface->reta[i];
1960 if ((((reta << 24) >> 24) < rss_i) &&
1961 (((reta << 16) >> 24) < rss_i) &&
1962 (((reta << 8) >> 24) < rss_i) &&
1963 (((reta) >> 24) < rss_i))
1966 /* this should never happen */
1967 dev_err(&interface->pdev->dev,
1968 "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n");
1969 goto repopulate_reta;
1972 /* do nothing if all of the elements are in bounds */
1977 fm10k_write_reta(interface, NULL);
1981 * fm10k_init_queueing_scheme - Determine proper queueing scheme
1982 * @interface: board private structure to initialize
1984 * We determine which queueing scheme to use based on...
1985 * - Hardware queue count (num_*_queues)
1986 * - defined by miscellaneous hardware support/features (RSS, etc.)
1988 int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
1992 /* Number of supported queues */
1993 fm10k_set_num_queues(interface);
1995 /* Configure MSI-X capability */
1996 err = fm10k_init_msix_capability(interface);
1998 dev_err(&interface->pdev->dev,
1999 "Unable to initialize MSI-X capability\n");
2003 /* Allocate memory for queues */
2004 err = fm10k_alloc_q_vectors(interface);
2006 dev_err(&interface->pdev->dev,
2007 "Unable to allocate queue vectors\n");
2008 goto err_alloc_q_vectors;
2011 /* Map rings to devices, and map devices to physical queues */
2012 fm10k_assign_rings(interface);
2014 /* Initialize RSS redirection table */
2015 fm10k_init_reta(interface);
2019 err_alloc_q_vectors:
2020 fm10k_reset_msix_capability(interface);
2022 fm10k_reset_num_queues(interface);
2027 * fm10k_clear_queueing_scheme - Clear the current queueing scheme settings
2028 * @interface: board private structure to clear queueing scheme on
2030 * We go through and clear queueing specific resources and reset the structure
2031 * to pre-load conditions
2033 void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface)
2035 fm10k_free_q_vectors(interface);
2036 fm10k_reset_msix_capability(interface);