2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Common functions between PF and VF
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
41 * Chris Telfer <chris.telfer@netronome.com>
44 #include <linux/bitfield.h>
45 #include <linux/bpf.h>
46 #include <linux/bpf_trace.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/init.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/interrupt.h>
55 #include <linux/ipv6.h>
56 #include <linux/page_ref.h>
57 #include <linux/pci.h>
58 #include <linux/pci_regs.h>
59 #include <linux/msi.h>
60 #include <linux/ethtool.h>
61 #include <linux/log2.h>
62 #include <linux/if_vlan.h>
63 #include <linux/random.h>
64 #include <linux/vmalloc.h>
65 #include <linux/ktime.h>
67 #include <net/switchdev.h>
68 #include <net/vxlan.h>
70 #include "nfpcore/nfp_nsp.h"
72 #include "nfp_net_ctrl.h"
74 #include "nfp_net_sriov.h"
78 * nfp_net_get_fw_version() - Read and parse the FW version
79 * @fw_ver: Output fw_version structure to read to
80 * @ctrl_bar: Mapped address of the control BAR
82 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
83 void __iomem *ctrl_bar)
87 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
88 put_unaligned_le32(reg, fw_ver);
91 static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
93 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
94 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
95 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
99 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
101 dma_sync_single_for_device(dp->dev, dma_addr,
102 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
106 static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
108 dma_unmap_single_attrs(dp->dev, dma_addr,
109 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
110 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
113 static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
116 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
117 len, dp->rx_dma_dir);
122 * Firmware reconfig may take a while so we have two versions of it -
123 * synchronous and asynchronous (posted). All synchronous callers are holding
124 * RTNL so we don't have to worry about serializing them.
126 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
128 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
129 /* ensure update is written before pinging HW */
131 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
134 /* Pass 0 as update to run posted reconfigs. */
135 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
137 update |= nn->reconfig_posted;
138 nn->reconfig_posted = 0;
140 nfp_net_reconfig_start(nn, update);
142 nn->reconfig_timer_active = true;
143 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
146 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
150 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
153 if (reg & NFP_NET_CFG_UPDATE_ERR) {
154 nn_err(nn, "Reconfig error: 0x%08x\n", reg);
156 } else if (last_check) {
157 nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
164 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
166 bool timed_out = false;
168 /* Poll update field, waiting for NFP to ack the config */
169 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
171 timed_out = time_is_before_eq_jiffies(deadline);
174 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
177 return timed_out ? -EIO : 0;
180 static void nfp_net_reconfig_timer(unsigned long data)
182 struct nfp_net *nn = (void *)data;
184 spin_lock_bh(&nn->reconfig_lock);
186 nn->reconfig_timer_active = false;
188 /* If sync caller is present it will take over from us */
189 if (nn->reconfig_sync_present)
192 /* Read reconfig status and report errors */
193 nfp_net_reconfig_check_done(nn, true);
195 if (nn->reconfig_posted)
196 nfp_net_reconfig_start_async(nn, 0);
198 spin_unlock_bh(&nn->reconfig_lock);
202 * nfp_net_reconfig_post() - Post async reconfig request
203 * @nn: NFP Net device to reconfigure
204 * @update: The value for the update field in the BAR config
206 * Record FW reconfiguration request. Reconfiguration will be kicked off
207 * whenever reconfiguration machinery is idle. Multiple requests can be
210 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
212 spin_lock_bh(&nn->reconfig_lock);
214 /* Sync caller will kick off async reconf when it's done, just post */
215 if (nn->reconfig_sync_present) {
216 nn->reconfig_posted |= update;
220 /* Opportunistically check if the previous command is done */
221 if (!nn->reconfig_timer_active ||
222 nfp_net_reconfig_check_done(nn, false))
223 nfp_net_reconfig_start_async(nn, update);
225 nn->reconfig_posted |= update;
227 spin_unlock_bh(&nn->reconfig_lock);
230 static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
232 bool cancelled_timer = false;
233 u32 pre_posted_requests;
235 spin_lock_bh(&nn->reconfig_lock);
237 nn->reconfig_sync_present = true;
239 if (nn->reconfig_timer_active) {
240 nn->reconfig_timer_active = false;
241 cancelled_timer = true;
243 pre_posted_requests = nn->reconfig_posted;
244 nn->reconfig_posted = 0;
246 spin_unlock_bh(&nn->reconfig_lock);
248 if (cancelled_timer) {
249 del_timer_sync(&nn->reconfig_timer);
250 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
253 /* Run the posted reconfigs which were issued before we started */
254 if (pre_posted_requests) {
255 nfp_net_reconfig_start(nn, pre_posted_requests);
256 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
260 static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
262 nfp_net_reconfig_sync_enter(nn);
264 spin_lock_bh(&nn->reconfig_lock);
265 nn->reconfig_sync_present = false;
266 spin_unlock_bh(&nn->reconfig_lock);
270 * nfp_net_reconfig() - Reconfigure the firmware
271 * @nn: NFP Net device to reconfigure
272 * @update: The value for the update field in the BAR config
274 * Write the update word to the BAR and ping the reconfig queue. The
275 * poll until the firmware has acknowledged the update by zeroing the
278 * Return: Negative errno on error, 0 on success
280 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
284 nfp_net_reconfig_sync_enter(nn);
286 nfp_net_reconfig_start(nn, update);
287 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
289 spin_lock_bh(&nn->reconfig_lock);
291 if (nn->reconfig_posted)
292 nfp_net_reconfig_start_async(nn, 0);
294 nn->reconfig_sync_present = false;
296 spin_unlock_bh(&nn->reconfig_lock);
302 * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
303 * @nn: NFP Net device to reconfigure
304 * @mbox_cmd: The value for the mailbox command
306 * Helper function for mailbox updates
308 * Return: Negative errno on error, 0 on success
310 static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
314 nn_writeq(nn, NFP_NET_CFG_MBOX_CMD, mbox_cmd);
316 ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
318 nn_err(nn, "Mailbox update error\n");
322 return -nn_readl(nn, NFP_NET_CFG_MBOX_RET);
325 /* Interrupt configuration and handling
329 * nfp_net_irq_unmask() - Unmask automasked interrupt
330 * @nn: NFP Network structure
331 * @entry_nr: MSI-X table entry
333 * Clear the ICR for the IRQ entry.
335 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
337 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
342 * nfp_net_irqs_alloc() - allocates MSI-X irqs
343 * @pdev: PCI device structure
344 * @irq_entries: Array to be initialized and used to hold the irq entries
345 * @min_irqs: Minimal acceptable number of interrupts
346 * @wanted_irqs: Target number of interrupts to allocate
348 * Return: Number of irqs obtained or 0 on error.
351 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
352 unsigned int min_irqs, unsigned int wanted_irqs)
357 for (i = 0; i < wanted_irqs; i++)
358 irq_entries[i].entry = i;
360 got_irqs = pci_enable_msix_range(pdev, irq_entries,
361 min_irqs, wanted_irqs);
363 dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
364 min_irqs, wanted_irqs, got_irqs);
368 if (got_irqs < wanted_irqs)
369 dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
370 wanted_irqs, got_irqs);
376 * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
377 * @nn: NFP Network structure
378 * @irq_entries: Table of allocated interrupts
379 * @n: Size of @irq_entries (number of entries to grab)
381 * After interrupts are allocated with nfp_net_irqs_alloc() this function
382 * should be called to assign them to a specific netdev (port).
385 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
388 struct nfp_net_dp *dp = &nn->dp;
390 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
391 dp->num_r_vecs = nn->max_r_vecs;
393 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
395 if (dp->num_rx_rings > dp->num_r_vecs ||
396 dp->num_tx_rings > dp->num_r_vecs)
397 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
398 dp->num_rx_rings, dp->num_tx_rings,
401 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
402 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
403 dp->num_stack_tx_rings = dp->num_tx_rings;
407 * nfp_net_irqs_disable() - Disable interrupts
408 * @pdev: PCI device structure
410 * Undoes what @nfp_net_irqs_alloc() does.
412 void nfp_net_irqs_disable(struct pci_dev *pdev)
414 pci_disable_msix(pdev);
418 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
420 * @data: Opaque data structure
422 * Return: Indicate if the interrupt has been handled.
424 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
426 struct nfp_net_r_vector *r_vec = data;
428 napi_schedule_irqoff(&r_vec->napi);
430 /* The FW auto-masks any interrupt, either via the MASK bit in
431 * the MSI-X table or via the per entry ICR field. So there
432 * is no need to disable interrupts here.
437 static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
439 struct nfp_net_r_vector *r_vec = data;
441 tasklet_schedule(&r_vec->tasklet);
447 * nfp_net_read_link_status() - Reread link status from control BAR
448 * @nn: NFP Network structure
450 static void nfp_net_read_link_status(struct nfp_net *nn)
456 spin_lock_irqsave(&nn->link_status_lock, flags);
458 sts = nn_readl(nn, NFP_NET_CFG_STS);
459 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
461 if (nn->link_up == link_up)
464 nn->link_up = link_up;
466 set_bit(NFP_PORT_CHANGED, &nn->port->flags);
469 netif_carrier_on(nn->dp.netdev);
470 netdev_info(nn->dp.netdev, "NIC Link is Up\n");
472 netif_carrier_off(nn->dp.netdev);
473 netdev_info(nn->dp.netdev, "NIC Link is Down\n");
476 spin_unlock_irqrestore(&nn->link_status_lock, flags);
480 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
482 * @data: Opaque data structure
484 * Return: Indicate if the interrupt has been handled.
486 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
488 struct nfp_net *nn = data;
489 struct msix_entry *entry;
491 entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
493 nfp_net_read_link_status(nn);
495 nfp_net_irq_unmask(nn, entry->entry);
501 * nfp_net_irq_exn() - Interrupt service routine for exceptions
503 * @data: Opaque data structure
505 * Return: Indicate if the interrupt has been handled.
507 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
509 struct nfp_net *nn = data;
511 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
512 /* XXX TO BE IMPLEMENTED */
517 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
518 * @tx_ring: TX ring structure
519 * @r_vec: IRQ vector servicing this ring
521 * @is_xdp: Is this an XDP TX ring?
524 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
525 struct nfp_net_r_vector *r_vec, unsigned int idx,
528 struct nfp_net *nn = r_vec->nfp_net;
531 tx_ring->r_vec = r_vec;
532 tx_ring->is_xdp = is_xdp;
533 u64_stats_init(&tx_ring->r_vec->tx_sync);
535 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
536 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
540 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
541 * @rx_ring: RX ring structure
542 * @r_vec: IRQ vector servicing this ring
546 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
547 struct nfp_net_r_vector *r_vec, unsigned int idx)
549 struct nfp_net *nn = r_vec->nfp_net;
552 rx_ring->r_vec = r_vec;
553 u64_stats_init(&rx_ring->r_vec->rx_sync);
555 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
556 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
560 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
561 * @nn: NFP Network structure
562 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
563 * @format: printf-style format to construct the interrupt name
564 * @name: Pointer to allocated space for interrupt name
565 * @name_sz: Size of space for interrupt name
566 * @vector_idx: Index of MSI-X vector used for this interrupt
567 * @handler: IRQ handler to register for this interrupt
570 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
571 const char *format, char *name, size_t name_sz,
572 unsigned int vector_idx, irq_handler_t handler)
574 struct msix_entry *entry;
577 entry = &nn->irq_entries[vector_idx];
579 snprintf(name, name_sz, format, nfp_net_name(nn));
580 err = request_irq(entry->vector, handler, 0, name, nn);
582 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
586 nn_writeb(nn, ctrl_offset, entry->entry);
587 nfp_net_irq_unmask(nn, entry->entry);
593 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
594 * @nn: NFP Network structure
595 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
596 * @vector_idx: Index of MSI-X vector used for this interrupt
598 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
599 unsigned int vector_idx)
601 nn_writeb(nn, ctrl_offset, 0xff);
603 free_irq(nn->irq_entries[vector_idx].vector, nn);
608 * One queue controller peripheral queue is used for transmit. The
609 * driver en-queues packets for transmit by advancing the write
610 * pointer. The device indicates that packets have transmitted by
611 * advancing the read pointer. The driver maintains a local copy of
612 * the read and write pointer in @struct nfp_net_tx_ring. The driver
613 * keeps @wr_p in sync with the queue controller write pointer and can
614 * determine how many packets have been transmitted by comparing its
615 * copy of the read pointer @rd_p with the read pointer maintained by
616 * the queue controller peripheral.
620 * nfp_net_tx_full() - Check if the TX ring is full
621 * @tx_ring: TX ring to check
622 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
624 * This function checks, based on the *host copy* of read/write
625 * pointer if a given TX ring is full. The real TX queue may have
626 * some newly made available slots.
628 * Return: True if the ring is full.
630 static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
632 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
635 /* Wrappers for deciding when to stop and restart TX queues */
636 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
638 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
641 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
643 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
647 * nfp_net_tx_ring_stop() - stop tx ring
648 * @nd_q: netdev queue
649 * @tx_ring: driver tx queue structure
651 * Safely stop TX ring. Remember that while we are running .start_xmit()
652 * someone else may be cleaning the TX ring completions so we need to be
653 * extra careful here.
655 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
656 struct nfp_net_tx_ring *tx_ring)
658 netif_tx_stop_queue(nd_q);
660 /* We can race with the TX completion out of NAPI so recheck */
662 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
663 netif_tx_start_queue(nd_q);
667 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
668 * @r_vec: per-ring structure
669 * @txbuf: Pointer to driver soft TX descriptor
670 * @txd: Pointer to HW TX descriptor
671 * @skb: Pointer to SKB
673 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
674 * Return error on packet header greater than maximum supported LSO header size.
676 static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
677 struct nfp_net_tx_buf *txbuf,
678 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
683 if (!skb_is_gso(skb))
686 if (!skb->encapsulation) {
687 txd->l3_offset = skb_network_offset(skb);
688 txd->l4_offset = skb_transport_offset(skb);
689 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
691 txd->l3_offset = skb_inner_network_offset(skb);
692 txd->l4_offset = skb_inner_transport_offset(skb);
693 hdrlen = skb_inner_transport_header(skb) - skb->data +
694 inner_tcp_hdrlen(skb);
697 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
698 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
700 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
701 txd->lso_hdrlen = hdrlen;
702 txd->mss = cpu_to_le16(mss);
703 txd->flags |= PCIE_DESC_TX_LSO;
705 u64_stats_update_begin(&r_vec->tx_sync);
707 u64_stats_update_end(&r_vec->tx_sync);
711 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
712 * @dp: NFP Net data path struct
713 * @r_vec: per-ring structure
714 * @txbuf: Pointer to driver soft TX descriptor
715 * @txd: Pointer to TX descriptor
716 * @skb: Pointer to SKB
718 * This function sets the TX checksum flags in the TX descriptor based
719 * on the configuration and the protocol of the packet to be transmitted.
721 static void nfp_net_tx_csum(struct nfp_net_dp *dp,
722 struct nfp_net_r_vector *r_vec,
723 struct nfp_net_tx_buf *txbuf,
724 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
726 struct ipv6hdr *ipv6h;
730 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
733 if (skb->ip_summed != CHECKSUM_PARTIAL)
736 txd->flags |= PCIE_DESC_TX_CSUM;
737 if (skb->encapsulation)
738 txd->flags |= PCIE_DESC_TX_ENCAP;
740 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
741 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
743 if (iph->version == 4) {
744 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
745 l4_hdr = iph->protocol;
746 } else if (ipv6h->version == 6) {
747 l4_hdr = ipv6h->nexthdr;
749 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
755 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
758 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
761 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
765 u64_stats_update_begin(&r_vec->tx_sync);
766 if (skb->encapsulation)
767 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
769 r_vec->hw_csum_tx += txbuf->pkt_cnt;
770 u64_stats_update_end(&r_vec->tx_sync);
773 static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
776 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
777 tx_ring->wr_ptr_add = 0;
780 static int nfp_net_prep_port_id(struct sk_buff *skb)
782 struct metadata_dst *md_dst = skb_metadata_dst(skb);
787 if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
790 if (unlikely(skb_cow_head(skb, 8)))
793 data = skb_push(skb, 8);
794 put_unaligned_be32(NFP_NET_META_PORTID, data);
795 put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
801 * nfp_net_tx() - Main transmit entry point
802 * @skb: SKB to transmit
803 * @netdev: netdev structure
805 * Return: NETDEV_TX_OK on success.
807 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
809 struct nfp_net *nn = netdev_priv(netdev);
810 const struct skb_frag_struct *frag;
811 struct nfp_net_tx_desc *txd, txdg;
812 int f, nr_frags, wr_idx, md_bytes;
813 struct nfp_net_tx_ring *tx_ring;
814 struct nfp_net_r_vector *r_vec;
815 struct nfp_net_tx_buf *txbuf;
816 struct netdev_queue *nd_q;
817 struct nfp_net_dp *dp;
823 qidx = skb_get_queue_mapping(skb);
824 tx_ring = &dp->tx_rings[qidx];
825 r_vec = tx_ring->r_vec;
826 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
828 nr_frags = skb_shinfo(skb)->nr_frags;
830 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
831 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
832 qidx, tx_ring->wr_p, tx_ring->rd_p);
833 netif_tx_stop_queue(nd_q);
834 nfp_net_tx_xmit_more_flush(tx_ring);
835 u64_stats_update_begin(&r_vec->tx_sync);
837 u64_stats_update_end(&r_vec->tx_sync);
838 return NETDEV_TX_BUSY;
841 md_bytes = nfp_net_prep_port_id(skb);
842 if (unlikely(md_bytes < 0)) {
843 nfp_net_tx_xmit_more_flush(tx_ring);
844 dev_kfree_skb_any(skb);
848 /* Start with the head skbuf */
849 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
851 if (dma_mapping_error(dp->dev, dma_addr))
854 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
856 /* Stash the soft descriptor of the head then initialize it */
857 txbuf = &tx_ring->txbufs[wr_idx];
859 txbuf->dma_addr = dma_addr;
862 txbuf->real_len = skb->len;
864 /* Build TX descriptor */
865 txd = &tx_ring->txds[wr_idx];
866 txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
867 txd->dma_len = cpu_to_le16(skb_headlen(skb));
868 nfp_desc_set_dma_addr(txd, dma_addr);
869 txd->data_len = cpu_to_le16(skb->len);
875 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
876 nfp_net_tx_tso(r_vec, txbuf, txd, skb);
877 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
878 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
879 txd->flags |= PCIE_DESC_TX_VLAN;
880 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
885 /* all descs must match except for in addr, length and eop */
888 for (f = 0; f < nr_frags; f++) {
889 frag = &skb_shinfo(skb)->frags[f];
890 fsize = skb_frag_size(frag);
892 dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
893 fsize, DMA_TO_DEVICE);
894 if (dma_mapping_error(dp->dev, dma_addr))
897 wr_idx = D_IDX(tx_ring, wr_idx + 1);
898 tx_ring->txbufs[wr_idx].skb = skb;
899 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
900 tx_ring->txbufs[wr_idx].fidx = f;
902 txd = &tx_ring->txds[wr_idx];
904 txd->dma_len = cpu_to_le16(fsize);
905 nfp_desc_set_dma_addr(txd, dma_addr);
907 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
910 u64_stats_update_begin(&r_vec->tx_sync);
912 u64_stats_update_end(&r_vec->tx_sync);
915 netdev_tx_sent_queue(nd_q, txbuf->real_len);
917 skb_tx_timestamp(skb);
919 tx_ring->wr_p += nr_frags + 1;
920 if (nfp_net_tx_ring_should_stop(tx_ring))
921 nfp_net_tx_ring_stop(nd_q, tx_ring);
923 tx_ring->wr_ptr_add += nr_frags + 1;
924 if (!skb->xmit_more || netif_xmit_stopped(nd_q))
925 nfp_net_tx_xmit_more_flush(tx_ring);
931 frag = &skb_shinfo(skb)->frags[f];
932 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
933 skb_frag_size(frag), DMA_TO_DEVICE);
934 tx_ring->txbufs[wr_idx].skb = NULL;
935 tx_ring->txbufs[wr_idx].dma_addr = 0;
936 tx_ring->txbufs[wr_idx].fidx = -2;
939 wr_idx += tx_ring->cnt;
941 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
942 skb_headlen(skb), DMA_TO_DEVICE);
943 tx_ring->txbufs[wr_idx].skb = NULL;
944 tx_ring->txbufs[wr_idx].dma_addr = 0;
945 tx_ring->txbufs[wr_idx].fidx = -2;
947 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
948 nfp_net_tx_xmit_more_flush(tx_ring);
949 u64_stats_update_begin(&r_vec->tx_sync);
951 u64_stats_update_end(&r_vec->tx_sync);
952 dev_kfree_skb_any(skb);
957 * nfp_net_tx_complete() - Handled completed TX packets
958 * @tx_ring: TX ring structure
960 * Return: Number of completed TX descriptors
962 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
964 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
965 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
966 const struct skb_frag_struct *frag;
967 struct netdev_queue *nd_q;
968 u32 done_pkts = 0, done_bytes = 0;
975 if (tx_ring->wr_p == tx_ring->rd_p)
978 /* Work out how many descriptors have been transmitted */
979 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
981 if (qcp_rd_p == tx_ring->qcp_rd_p)
984 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
987 idx = D_IDX(tx_ring, tx_ring->rd_p++);
989 skb = tx_ring->txbufs[idx].skb;
993 nr_frags = skb_shinfo(skb)->nr_frags;
994 fidx = tx_ring->txbufs[idx].fidx;
998 dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
999 skb_headlen(skb), DMA_TO_DEVICE);
1001 done_pkts += tx_ring->txbufs[idx].pkt_cnt;
1002 done_bytes += tx_ring->txbufs[idx].real_len;
1004 /* unmap fragment */
1005 frag = &skb_shinfo(skb)->frags[fidx];
1006 dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
1007 skb_frag_size(frag), DMA_TO_DEVICE);
1010 /* check for last gather fragment */
1011 if (fidx == nr_frags - 1)
1012 dev_consume_skb_any(skb);
1014 tx_ring->txbufs[idx].dma_addr = 0;
1015 tx_ring->txbufs[idx].skb = NULL;
1016 tx_ring->txbufs[idx].fidx = -2;
1019 tx_ring->qcp_rd_p = qcp_rd_p;
1021 u64_stats_update_begin(&r_vec->tx_sync);
1022 r_vec->tx_bytes += done_bytes;
1023 r_vec->tx_pkts += done_pkts;
1024 u64_stats_update_end(&r_vec->tx_sync);
1029 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1030 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
1031 if (nfp_net_tx_ring_should_wake(tx_ring)) {
1032 /* Make sure TX thread will see updated tx_ring->rd_p */
1035 if (unlikely(netif_tx_queue_stopped(nd_q)))
1036 netif_tx_wake_queue(nd_q);
1039 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1040 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1041 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1044 static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
1046 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1047 u32 done_pkts = 0, done_bytes = 0;
1052 /* Work out how many descriptors have been transmitted */
1053 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
1055 if (qcp_rd_p == tx_ring->qcp_rd_p)
1058 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
1060 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
1061 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
1063 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
1067 idx = D_IDX(tx_ring, tx_ring->rd_p);
1070 done_bytes += tx_ring->txbufs[idx].real_len;
1073 u64_stats_update_begin(&r_vec->tx_sync);
1074 r_vec->tx_bytes += done_bytes;
1075 r_vec->tx_pkts += done_pkts;
1076 u64_stats_update_end(&r_vec->tx_sync);
1078 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1079 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1080 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1086 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
1087 * @dp: NFP Net data path struct
1088 * @tx_ring: TX ring structure
1090 * Assumes that the device is stopped, must be idempotent.
1093 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1095 const struct skb_frag_struct *frag;
1096 struct netdev_queue *nd_q;
1098 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
1099 struct nfp_net_tx_buf *tx_buf;
1100 struct sk_buff *skb;
1103 idx = D_IDX(tx_ring, tx_ring->rd_p);
1104 tx_buf = &tx_ring->txbufs[idx];
1106 skb = tx_ring->txbufs[idx].skb;
1107 nr_frags = skb_shinfo(skb)->nr_frags;
1109 if (tx_buf->fidx == -1) {
1111 dma_unmap_single(dp->dev, tx_buf->dma_addr,
1112 skb_headlen(skb), DMA_TO_DEVICE);
1114 /* unmap fragment */
1115 frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
1116 dma_unmap_page(dp->dev, tx_buf->dma_addr,
1117 skb_frag_size(frag), DMA_TO_DEVICE);
1120 /* check for last gather fragment */
1121 if (tx_buf->fidx == nr_frags - 1)
1122 dev_kfree_skb_any(skb);
1124 tx_buf->dma_addr = 0;
1128 tx_ring->qcp_rd_p++;
1132 memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
1135 tx_ring->qcp_rd_p = 0;
1136 tx_ring->wr_ptr_add = 0;
1138 if (tx_ring->is_xdp || !dp->netdev)
1141 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1142 netdev_tx_reset_queue(nd_q);
1145 static void nfp_net_tx_timeout(struct net_device *netdev)
1147 struct nfp_net *nn = netdev_priv(netdev);
1150 for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
1151 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1153 nn_warn(nn, "TX timeout on ring: %d\n", i);
1155 nn_warn(nn, "TX watchdog timeout\n");
1158 /* Receive processing
1161 nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
1163 unsigned int fl_bufsz;
1165 fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
1166 fl_bufsz += dp->rx_dma_off;
1167 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1168 fl_bufsz += NFP_NET_MAX_PREPEND;
1170 fl_bufsz += dp->rx_offset;
1171 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
1173 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
1174 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1180 nfp_net_free_frag(void *frag, bool xdp)
1183 skb_free_frag(frag);
1185 __free_page(virt_to_page(frag));
1189 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
1190 * @dp: NFP Net data path struct
1191 * @dma_addr: Pointer to storage for DMA address (output param)
1193 * This function will allcate a new page frag, map it for DMA.
1195 * Return: allocated page frag or NULL on failure.
1197 static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1201 if (!dp->xdp_prog) {
1202 frag = netdev_alloc_frag(dp->fl_bufsz);
1206 page = alloc_page(GFP_KERNEL | __GFP_COLD);
1207 frag = page ? page_address(page) : NULL;
1210 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1214 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1215 if (dma_mapping_error(dp->dev, *dma_addr)) {
1216 nfp_net_free_frag(frag, dp->xdp_prog);
1217 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1224 static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1228 if (!dp->xdp_prog) {
1229 frag = napi_alloc_frag(dp->fl_bufsz);
1233 page = alloc_page(GFP_ATOMIC | __GFP_COLD);
1234 frag = page ? page_address(page) : NULL;
1237 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1241 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1242 if (dma_mapping_error(dp->dev, *dma_addr)) {
1243 nfp_net_free_frag(frag, dp->xdp_prog);
1244 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1252 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1253 * @dp: NFP Net data path struct
1254 * @rx_ring: RX ring structure
1255 * @frag: page fragment buffer
1256 * @dma_addr: DMA address of skb mapping
1258 static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
1259 struct nfp_net_rx_ring *rx_ring,
1260 void *frag, dma_addr_t dma_addr)
1262 unsigned int wr_idx;
1264 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1266 nfp_net_dma_sync_dev_rx(dp, dma_addr);
1268 /* Stash SKB and DMA address away */
1269 rx_ring->rxbufs[wr_idx].frag = frag;
1270 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1272 /* Fill freelist descriptor */
1273 rx_ring->rxds[wr_idx].fld.reserved = 0;
1274 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1275 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
1276 dma_addr + dp->rx_dma_off);
1279 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
1280 /* Update write pointer of the freelist queue. Make
1281 * sure all writes are flushed before telling the hardware.
1284 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
1289 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1290 * @rx_ring: RX ring structure
1292 * Assumes that the device is stopped, must be idempotent.
1294 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1296 unsigned int wr_idx, last_idx;
1298 /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
1299 * kept at cnt - 1 FL bufs.
1301 if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
1304 /* Move the empty entry to the end of the list */
1305 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1306 last_idx = rx_ring->cnt - 1;
1307 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1308 rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
1309 rx_ring->rxbufs[last_idx].dma_addr = 0;
1310 rx_ring->rxbufs[last_idx].frag = NULL;
1312 memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
1318 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1319 * @dp: NFP Net data path struct
1320 * @rx_ring: RX ring to remove buffers from
1322 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1323 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1324 * to restore required ring geometry.
1327 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
1328 struct nfp_net_rx_ring *rx_ring)
1332 for (i = 0; i < rx_ring->cnt - 1; i++) {
1333 /* NULL skb can only happen when initial filling of the ring
1334 * fails to allocate enough buffers and calls here to free
1335 * already allocated ones.
1337 if (!rx_ring->rxbufs[i].frag)
1340 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
1341 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
1342 rx_ring->rxbufs[i].dma_addr = 0;
1343 rx_ring->rxbufs[i].frag = NULL;
1348 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1349 * @dp: NFP Net data path struct
1350 * @rx_ring: RX ring to remove buffers from
1353 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
1354 struct nfp_net_rx_ring *rx_ring)
1356 struct nfp_net_rx_buf *rxbufs;
1359 rxbufs = rx_ring->rxbufs;
1361 for (i = 0; i < rx_ring->cnt - 1; i++) {
1362 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
1363 if (!rxbufs[i].frag) {
1364 nfp_net_rx_ring_bufs_free(dp, rx_ring);
1373 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1374 * @dp: NFP Net data path struct
1375 * @rx_ring: RX ring to fill
1378 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
1379 struct nfp_net_rx_ring *rx_ring)
1383 for (i = 0; i < rx_ring->cnt - 1; i++)
1384 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
1385 rx_ring->rxbufs[i].dma_addr);
1389 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1390 * @flags: RX descriptor flags field in CPU byte order
1392 static int nfp_net_rx_csum_has_errors(u16 flags)
1394 u16 csum_all_checked, csum_all_ok;
1396 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1397 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1399 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1403 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1404 * @dp: NFP Net data path struct
1405 * @r_vec: per-ring structure
1406 * @rxd: Pointer to RX descriptor
1407 * @meta: Parsed metadata prepend
1408 * @skb: Pointer to SKB
1410 static void nfp_net_rx_csum(struct nfp_net_dp *dp,
1411 struct nfp_net_r_vector *r_vec,
1412 struct nfp_net_rx_desc *rxd,
1413 struct nfp_meta_parsed *meta, struct sk_buff *skb)
1415 skb_checksum_none_assert(skb);
1417 if (!(dp->netdev->features & NETIF_F_RXCSUM))
1420 if (meta->csum_type) {
1421 skb->ip_summed = meta->csum_type;
1422 skb->csum = meta->csum;
1423 u64_stats_update_begin(&r_vec->rx_sync);
1424 r_vec->hw_csum_rx_ok++;
1425 u64_stats_update_end(&r_vec->rx_sync);
1429 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1430 u64_stats_update_begin(&r_vec->rx_sync);
1431 r_vec->hw_csum_rx_error++;
1432 u64_stats_update_end(&r_vec->rx_sync);
1436 /* Assume that the firmware will never report inner CSUM_OK unless outer
1437 * L4 headers were successfully parsed. FW will always report zero UDP
1438 * checksum as CSUM_OK.
1440 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1441 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1442 __skb_incr_checksum_unnecessary(skb);
1443 u64_stats_update_begin(&r_vec->rx_sync);
1444 r_vec->hw_csum_rx_ok++;
1445 u64_stats_update_end(&r_vec->rx_sync);
1448 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1449 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1450 __skb_incr_checksum_unnecessary(skb);
1451 u64_stats_update_begin(&r_vec->rx_sync);
1452 r_vec->hw_csum_rx_inner_ok++;
1453 u64_stats_update_end(&r_vec->rx_sync);
1458 nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
1459 unsigned int type, __be32 *hash)
1461 if (!(netdev->features & NETIF_F_RXHASH))
1465 case NFP_NET_RSS_IPV4:
1466 case NFP_NET_RSS_IPV6:
1467 case NFP_NET_RSS_IPV6_EX:
1468 meta->hash_type = PKT_HASH_TYPE_L3;
1471 meta->hash_type = PKT_HASH_TYPE_L4;
1475 meta->hash = get_unaligned_be32(hash);
1479 nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
1480 void *data, struct nfp_net_rx_desc *rxd)
1482 struct nfp_net_rx_hash *rx_hash = data;
1484 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1487 nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
1492 nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
1493 void *data, int meta_len)
1497 meta_info = get_unaligned_be32(data);
1501 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1502 case NFP_NET_META_HASH:
1503 meta_info >>= NFP_NET_META_FIELD_SIZE;
1504 nfp_net_set_hash(netdev, meta,
1505 meta_info & NFP_NET_META_FIELD_MASK,
1509 case NFP_NET_META_MARK:
1510 meta->mark = get_unaligned_be32(data);
1513 case NFP_NET_META_PORTID:
1514 meta->portid = get_unaligned_be32(data);
1517 case NFP_NET_META_CSUM:
1518 meta->csum_type = CHECKSUM_COMPLETE;
1520 (__force __wsum)__get_unaligned_cpu32(data);
1527 meta_info >>= NFP_NET_META_FIELD_SIZE;
1534 nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
1535 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
1536 struct sk_buff *skb)
1538 u64_stats_update_begin(&r_vec->rx_sync);
1540 u64_stats_update_end(&r_vec->rx_sync);
1542 /* skb is build based on the frag, free_skb() would free the frag
1543 * so to be able to reuse it we need an extra ref.
1545 if (skb && rxbuf && skb->head == rxbuf->frag)
1546 page_ref_inc(virt_to_head_page(rxbuf->frag));
1548 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
1550 dev_kfree_skb_any(skb);
1554 nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
1555 struct nfp_net_tx_ring *tx_ring,
1556 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
1557 unsigned int pkt_len, bool *completed)
1559 struct nfp_net_tx_buf *txbuf;
1560 struct nfp_net_tx_desc *txd;
1563 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1565 nfp_net_xdp_complete(tx_ring);
1569 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1570 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
1576 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1578 /* Stash the soft descriptor of the head then initialize it */
1579 txbuf = &tx_ring->txbufs[wr_idx];
1581 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
1583 txbuf->frag = rxbuf->frag;
1584 txbuf->dma_addr = rxbuf->dma_addr;
1587 txbuf->real_len = pkt_len;
1589 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
1590 pkt_len, DMA_BIDIRECTIONAL);
1592 /* Build TX descriptor */
1593 txd = &tx_ring->txds[wr_idx];
1594 txd->offset_eop = PCIE_DESC_TX_EOP;
1595 txd->dma_len = cpu_to_le16(pkt_len);
1596 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
1597 txd->data_len = cpu_to_le16(pkt_len);
1601 txd->lso_hdrlen = 0;
1604 tx_ring->wr_ptr_add++;
1608 static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start,
1609 unsigned int *off, unsigned int *len)
1611 struct xdp_buff xdp;
1615 xdp.data_hard_start = hard_start;
1616 xdp.data = data + *off;
1617 xdp.data_end = data + *off + *len;
1619 orig_data = xdp.data;
1620 ret = bpf_prog_run_xdp(prog, &xdp);
1622 *len -= xdp.data - orig_data;
1623 *off += xdp.data - orig_data;
1629 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1630 * @rx_ring: RX ring to receive from
1631 * @budget: NAPI budget
1633 * Note, this function is separated out from the napi poll function to
1634 * more cleanly separate packet receive code from other bookkeeping
1635 * functions performed in the napi poll function.
1637 * Return: Number of packets received.
1639 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1641 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1642 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1643 struct nfp_net_tx_ring *tx_ring;
1644 struct bpf_prog *xdp_prog;
1645 bool xdp_tx_cmpl = false;
1646 unsigned int true_bufsz;
1647 struct sk_buff *skb;
1648 int pkts_polled = 0;
1652 xdp_prog = READ_ONCE(dp->xdp_prog);
1653 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1654 tx_ring = r_vec->xdp_ring;
1656 while (pkts_polled < budget) {
1657 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1658 struct nfp_net_rx_buf *rxbuf;
1659 struct nfp_net_rx_desc *rxd;
1660 struct nfp_meta_parsed meta;
1661 struct net_device *netdev;
1662 dma_addr_t new_dma_addr;
1665 idx = D_IDX(rx_ring, rx_ring->rd_p);
1667 rxd = &rx_ring->rxds[idx];
1668 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1671 /* Memory barrier to ensure that we won't do other reads
1672 * before the DD bit.
1676 memset(&meta, 0, sizeof(meta));
1681 rxbuf = &rx_ring->rxbufs[idx];
1683 * <-- [rx_offset] -->
1684 * ---------------------------------------------------------
1685 * | [XX] | metadata | packet | XXXX |
1686 * ---------------------------------------------------------
1687 * <---------------- data_len --------------->
1689 * The rx_offset is fixed for all packets, the meta_len can vary
1690 * on a packet by packet basis. If rx_offset is set to zero
1691 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1692 * buffer and is immediately followed by the packet (no [XX]).
1694 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1695 data_len = le16_to_cpu(rxd->rxd.data_len);
1696 pkt_len = data_len - meta_len;
1698 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1699 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1700 pkt_off += meta_len;
1702 pkt_off += dp->rx_offset;
1703 meta_off = pkt_off - meta_len;
1706 u64_stats_update_begin(&r_vec->rx_sync);
1708 r_vec->rx_bytes += pkt_len;
1709 u64_stats_update_end(&r_vec->rx_sync);
1711 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
1712 (dp->rx_offset && meta_len > dp->rx_offset))) {
1713 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
1715 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1719 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
1722 if (!dp->chained_metadata_format) {
1723 nfp_net_set_hash_desc(dp->netdev, &meta,
1724 rxbuf->frag + meta_off, rxd);
1725 } else if (meta_len) {
1728 end = nfp_net_parse_meta(dp->netdev, &meta,
1729 rxbuf->frag + meta_off,
1731 if (unlikely(end != rxbuf->frag + pkt_off)) {
1732 nn_dp_warn(dp, "invalid RX packet metadata\n");
1733 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
1739 if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
1740 dp->bpf_offload_xdp) && !meta.portid) {
1741 unsigned int dma_off;
1745 hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
1747 act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start,
1748 &pkt_off, &pkt_len);
1753 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
1754 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
1759 trace_xdp_exception(dp->netdev,
1763 bpf_warn_invalid_xdp_action(act);
1766 trace_xdp_exception(dp->netdev, xdp_prog, act);
1769 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1775 skb = build_skb(rxbuf->frag, true_bufsz);
1776 if (unlikely(!skb)) {
1777 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1780 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
1781 if (unlikely(!new_frag)) {
1782 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1786 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1788 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1790 if (likely(!meta.portid)) {
1791 netdev = dp->netdev;
1795 nn = netdev_priv(dp->netdev);
1796 netdev = nfp_app_repr_get(nn->app, meta.portid);
1797 if (unlikely(!netdev)) {
1798 nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
1801 nfp_repr_inc_rx_stats(netdev, pkt_len);
1804 skb_reserve(skb, pkt_off);
1805 skb_put(skb, pkt_len);
1807 skb->mark = meta.mark;
1808 skb_set_hash(skb, meta.hash, meta.hash_type);
1810 skb_record_rx_queue(skb, rx_ring->idx);
1811 skb->protocol = eth_type_trans(skb, netdev);
1813 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
1815 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1816 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1817 le16_to_cpu(rxd->rxd.vlan));
1819 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1823 if (tx_ring->wr_ptr_add)
1824 nfp_net_tx_xmit_more_flush(tx_ring);
1825 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
1827 if (!nfp_net_xdp_complete(tx_ring))
1828 pkts_polled = budget;
1836 * nfp_net_poll() - napi poll function
1837 * @napi: NAPI structure
1838 * @budget: NAPI budget
1840 * Return: number of packets polled.
1842 static int nfp_net_poll(struct napi_struct *napi, int budget)
1844 struct nfp_net_r_vector *r_vec =
1845 container_of(napi, struct nfp_net_r_vector, napi);
1846 unsigned int pkts_polled = 0;
1849 nfp_net_tx_complete(r_vec->tx_ring);
1851 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
1853 if (pkts_polled < budget)
1854 if (napi_complete_done(napi, pkts_polled))
1855 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1860 /* Control device data path
1864 nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1865 struct sk_buff *skb, bool old)
1867 unsigned int real_len = skb->len, meta_len = 0;
1868 struct nfp_net_tx_ring *tx_ring;
1869 struct nfp_net_tx_buf *txbuf;
1870 struct nfp_net_tx_desc *txd;
1871 struct nfp_net_dp *dp;
1872 dma_addr_t dma_addr;
1875 dp = &r_vec->nfp_net->dp;
1876 tx_ring = r_vec->tx_ring;
1878 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
1879 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
1883 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1884 u64_stats_update_begin(&r_vec->tx_sync);
1886 u64_stats_update_end(&r_vec->tx_sync);
1888 __skb_queue_tail(&r_vec->queue, skb);
1890 __skb_queue_head(&r_vec->queue, skb);
1894 if (nfp_app_ctrl_has_meta(nn->app)) {
1895 if (unlikely(skb_headroom(skb) < 8)) {
1896 nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
1900 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
1901 put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
1904 /* Start with the head skbuf */
1905 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
1907 if (dma_mapping_error(dp->dev, dma_addr))
1910 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1912 /* Stash the soft descriptor of the head then initialize it */
1913 txbuf = &tx_ring->txbufs[wr_idx];
1915 txbuf->dma_addr = dma_addr;
1918 txbuf->real_len = real_len;
1920 /* Build TX descriptor */
1921 txd = &tx_ring->txds[wr_idx];
1922 txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
1923 txd->dma_len = cpu_to_le16(skb_headlen(skb));
1924 nfp_desc_set_dma_addr(txd, dma_addr);
1925 txd->data_len = cpu_to_le16(skb->len);
1929 txd->lso_hdrlen = 0;
1932 tx_ring->wr_ptr_add++;
1933 nfp_net_tx_xmit_more_flush(tx_ring);
1938 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
1940 u64_stats_update_begin(&r_vec->tx_sync);
1942 u64_stats_update_end(&r_vec->tx_sync);
1943 dev_kfree_skb_any(skb);
1947 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
1949 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
1952 spin_lock_bh(&r_vec->lock);
1953 ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
1954 spin_unlock_bh(&r_vec->lock);
1959 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
1961 struct sk_buff *skb;
1963 while ((skb = __skb_dequeue(&r_vec->queue)))
1964 if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
1969 nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
1971 u32 meta_type, meta_tag;
1973 if (!nfp_app_ctrl_has_meta(nn->app))
1979 meta_type = get_unaligned_be32(data);
1980 meta_tag = get_unaligned_be32(data + 4);
1982 return (meta_type == NFP_NET_META_PORTID &&
1983 meta_tag == NFP_META_PORT_ID_CTRL);
1987 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
1988 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
1990 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1991 struct nfp_net_rx_buf *rxbuf;
1992 struct nfp_net_rx_desc *rxd;
1993 dma_addr_t new_dma_addr;
1994 struct sk_buff *skb;
1998 idx = D_IDX(rx_ring, rx_ring->rd_p);
2000 rxd = &rx_ring->rxds[idx];
2001 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
2004 /* Memory barrier to ensure that we won't do other reads
2005 * before the DD bit.
2011 rxbuf = &rx_ring->rxbufs[idx];
2012 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
2013 data_len = le16_to_cpu(rxd->rxd.data_len);
2014 pkt_len = data_len - meta_len;
2016 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
2017 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
2018 pkt_off += meta_len;
2020 pkt_off += dp->rx_offset;
2021 meta_off = pkt_off - meta_len;
2024 u64_stats_update_begin(&r_vec->rx_sync);
2026 r_vec->rx_bytes += pkt_len;
2027 u64_stats_update_end(&r_vec->rx_sync);
2029 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
2031 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
2032 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
2034 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2038 skb = build_skb(rxbuf->frag, dp->fl_bufsz);
2039 if (unlikely(!skb)) {
2040 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2043 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
2044 if (unlikely(!new_frag)) {
2045 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
2049 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
2051 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
2053 skb_reserve(skb, pkt_off);
2054 skb_put(skb, pkt_len);
2056 nfp_app_ctrl_rx(nn->app, skb);
2061 static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
2063 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
2064 struct nfp_net *nn = r_vec->nfp_net;
2065 struct nfp_net_dp *dp = &nn->dp;
2066 unsigned int budget = 512;
2068 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
2074 static void nfp_ctrl_poll(unsigned long arg)
2076 struct nfp_net_r_vector *r_vec = (void *)arg;
2078 spin_lock_bh(&r_vec->lock);
2079 nfp_net_tx_complete(r_vec->tx_ring);
2080 __nfp_ctrl_tx_queued(r_vec);
2081 spin_unlock_bh(&r_vec->lock);
2083 if (nfp_ctrl_rx(r_vec)) {
2084 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2086 tasklet_schedule(&r_vec->tasklet);
2087 nn_dp_warn(&r_vec->nfp_net->dp,
2088 "control message budget exceeded!\n");
2092 /* Setup and Configuration
2096 * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
2097 * @nn: NFP Network structure
2099 static void nfp_net_vecs_init(struct nfp_net *nn)
2101 struct nfp_net_r_vector *r_vec;
2104 nn->lsc_handler = nfp_net_irq_lsc;
2105 nn->exn_handler = nfp_net_irq_exn;
2107 for (r = 0; r < nn->max_r_vecs; r++) {
2108 struct msix_entry *entry;
2110 entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
2112 r_vec = &nn->r_vecs[r];
2113 r_vec->nfp_net = nn;
2114 r_vec->irq_entry = entry->entry;
2115 r_vec->irq_vector = entry->vector;
2117 if (nn->dp.netdev) {
2118 r_vec->handler = nfp_net_irq_rxtx;
2120 r_vec->handler = nfp_ctrl_irq_rxtx;
2122 __skb_queue_head_init(&r_vec->queue);
2123 spin_lock_init(&r_vec->lock);
2124 tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
2125 (unsigned long)r_vec);
2126 tasklet_disable(&r_vec->tasklet);
2129 cpumask_set_cpu(r, &r_vec->affinity_mask);
2134 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
2135 * @tx_ring: TX ring to free
2137 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
2139 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2140 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2142 kfree(tx_ring->txbufs);
2145 dma_free_coherent(dp->dev, tx_ring->size,
2146 tx_ring->txds, tx_ring->dma);
2149 tx_ring->txbufs = NULL;
2150 tx_ring->txds = NULL;
2156 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
2157 * @dp: NFP Net data path struct
2158 * @tx_ring: TX Ring structure to allocate
2160 * Return: 0 on success, negative errno otherwise.
2163 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
2165 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2168 tx_ring->cnt = dp->txd_cnt;
2170 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
2171 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
2173 GFP_KERNEL | __GFP_NOWARN);
2174 if (!tx_ring->txds) {
2175 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2180 sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
2181 tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
2182 if (!tx_ring->txbufs)
2185 if (!tx_ring->is_xdp && dp->netdev)
2186 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
2192 nfp_net_tx_ring_free(tx_ring);
2197 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
2198 struct nfp_net_tx_ring *tx_ring)
2202 if (!tx_ring->is_xdp)
2205 for (i = 0; i < tx_ring->cnt; i++) {
2206 if (!tx_ring->txbufs[i].frag)
2209 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
2210 __free_page(virt_to_page(tx_ring->txbufs[i].frag));
2215 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
2216 struct nfp_net_tx_ring *tx_ring)
2218 struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
2221 if (!tx_ring->is_xdp)
2224 for (i = 0; i < tx_ring->cnt; i++) {
2225 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
2226 if (!txbufs[i].frag) {
2227 nfp_net_tx_ring_bufs_free(dp, tx_ring);
2235 static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2239 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
2244 for (r = 0; r < dp->num_tx_rings; r++) {
2247 if (r >= dp->num_stack_tx_rings)
2248 bias = dp->num_stack_tx_rings;
2250 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
2253 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
2256 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
2264 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2266 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2268 kfree(dp->tx_rings);
2272 static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
2276 for (r = 0; r < dp->num_tx_rings; r++) {
2277 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2278 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2281 kfree(dp->tx_rings);
2285 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
2286 * @rx_ring: RX ring to free
2288 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
2290 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
2291 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2293 kfree(rx_ring->rxbufs);
2296 dma_free_coherent(dp->dev, rx_ring->size,
2297 rx_ring->rxds, rx_ring->dma);
2300 rx_ring->rxbufs = NULL;
2301 rx_ring->rxds = NULL;
2307 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
2308 * @dp: NFP Net data path struct
2309 * @rx_ring: RX ring to allocate
2311 * Return: 0 on success, negative errno otherwise.
2314 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2318 rx_ring->cnt = dp->rxd_cnt;
2319 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
2320 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
2322 GFP_KERNEL | __GFP_NOWARN);
2323 if (!rx_ring->rxds) {
2324 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2329 sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
2330 rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
2331 if (!rx_ring->rxbufs)
2337 nfp_net_rx_ring_free(rx_ring);
2341 static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2345 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
2350 for (r = 0; r < dp->num_rx_rings; r++) {
2351 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
2353 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
2356 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
2364 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2366 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2368 kfree(dp->rx_rings);
2372 static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
2376 for (r = 0; r < dp->num_rx_rings; r++) {
2377 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2378 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2381 kfree(dp->rx_rings);
2385 nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
2386 struct nfp_net_r_vector *r_vec, int idx)
2388 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
2390 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
2392 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
2393 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
2397 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
2404 netif_napi_add(nn->dp.netdev, &r_vec->napi,
2405 nfp_net_poll, NAPI_POLL_WEIGHT);
2407 tasklet_enable(&r_vec->tasklet);
2409 snprintf(r_vec->name, sizeof(r_vec->name),
2410 "%s-rxtx-%d", nfp_net_name(nn), idx);
2411 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
2415 netif_napi_del(&r_vec->napi);
2417 tasklet_disable(&r_vec->tasklet);
2419 nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
2422 disable_irq(r_vec->irq_vector);
2424 irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
2426 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
2433 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
2435 irq_set_affinity_hint(r_vec->irq_vector, NULL);
2437 netif_napi_del(&r_vec->napi);
2439 tasklet_disable(&r_vec->tasklet);
2441 free_irq(r_vec->irq_vector, r_vec);
2445 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
2446 * @nn: NFP Net device to reconfigure
2448 void nfp_net_rss_write_itbl(struct nfp_net *nn)
2452 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
2453 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
2454 get_unaligned_le32(nn->rss_itbl + i));
2458 * nfp_net_rss_write_key() - Write RSS hash key to device
2459 * @nn: NFP Net device to reconfigure
2461 void nfp_net_rss_write_key(struct nfp_net *nn)
2465 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
2466 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
2467 get_unaligned_le32(nn->rss_key + i));
2471 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
2472 * @nn: NFP Net device to reconfigure
2474 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
2480 /* Compute factor used to convert coalesce '_usecs' parameters to
2481 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
2484 factor = nn->me_freq_mhz / 16;
2486 /* copy RX interrupt coalesce parameters */
2487 value = (nn->rx_coalesce_max_frames << 16) |
2488 (factor * nn->rx_coalesce_usecs);
2489 for (i = 0; i < nn->dp.num_rx_rings; i++)
2490 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
2492 /* copy TX interrupt coalesce parameters */
2493 value = (nn->tx_coalesce_max_frames << 16) |
2494 (factor * nn->tx_coalesce_usecs);
2495 for (i = 0; i < nn->dp.num_tx_rings; i++)
2496 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
2500 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
2501 * @nn: NFP Net device to reconfigure
2502 * @addr: MAC address to write
2504 * Writes the MAC address from the netdev to the device control BAR. Does not
2505 * perform the required reconfig. We do a bit of byte swapping dance because
2508 static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
2510 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
2511 nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
2514 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
2516 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
2517 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
2518 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
2520 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
2521 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
2522 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
2526 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
2527 * @nn: NFP Net device to reconfigure
2529 * Warning: must be fully idempotent.
2531 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
2533 u32 new_ctrl, update;
2537 new_ctrl = nn->dp.ctrl;
2538 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
2539 update = NFP_NET_CFG_UPDATE_GEN;
2540 update |= NFP_NET_CFG_UPDATE_MSIX;
2541 update |= NFP_NET_CFG_UPDATE_RING;
2543 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2544 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
2546 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2547 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2549 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2550 err = nfp_net_reconfig(nn, update);
2552 nn_err(nn, "Could not disable device: %d\n", err);
2554 for (r = 0; r < nn->dp.num_rx_rings; r++)
2555 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
2556 for (r = 0; r < nn->dp.num_tx_rings; r++)
2557 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
2558 for (r = 0; r < nn->dp.num_r_vecs; r++)
2559 nfp_net_vec_clear_ring_data(nn, r);
2561 nn->dp.ctrl = new_ctrl;
2565 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
2566 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
2568 /* Write the DMA address, size and MSI-X info to the device */
2569 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
2570 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
2571 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
2575 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
2576 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
2578 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
2579 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
2580 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
2584 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
2585 * @nn: NFP Net device to reconfigure
2587 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
2589 u32 bufsz, new_ctrl, update = 0;
2593 new_ctrl = nn->dp.ctrl;
2595 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
2596 nfp_net_rss_write_key(nn);
2597 nfp_net_rss_write_itbl(nn);
2598 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
2599 update |= NFP_NET_CFG_UPDATE_RSS;
2602 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
2603 nfp_net_coalesce_write_cfg(nn);
2604 update |= NFP_NET_CFG_UPDATE_IRQMOD;
2607 for (r = 0; r < nn->dp.num_tx_rings; r++)
2608 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
2609 for (r = 0; r < nn->dp.num_rx_rings; r++)
2610 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
2612 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
2613 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
2615 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
2616 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
2619 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
2621 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
2623 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
2624 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
2627 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
2628 update |= NFP_NET_CFG_UPDATE_GEN;
2629 update |= NFP_NET_CFG_UPDATE_MSIX;
2630 update |= NFP_NET_CFG_UPDATE_RING;
2631 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2632 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
2634 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2635 err = nfp_net_reconfig(nn, update);
2637 nfp_net_clear_config_and_disable(nn);
2641 nn->dp.ctrl = new_ctrl;
2643 for (r = 0; r < nn->dp.num_rx_rings; r++)
2644 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
2646 /* Since reconfiguration requests while NFP is down are ignored we
2647 * have to wipe the entire VXLAN configuration and reinitialize it.
2649 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
2650 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
2651 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
2652 udp_tunnel_get_rx_info(nn->dp.netdev);
2659 * nfp_net_close_stack() - Quiesce the stack (part of close)
2660 * @nn: NFP Net device to reconfigure
2662 static void nfp_net_close_stack(struct nfp_net *nn)
2666 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2667 netif_carrier_off(nn->dp.netdev);
2668 nn->link_up = false;
2670 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2671 disable_irq(nn->r_vecs[r].irq_vector);
2672 napi_disable(&nn->r_vecs[r].napi);
2675 netif_tx_disable(nn->dp.netdev);
2679 * nfp_net_close_free_all() - Free all runtime resources
2680 * @nn: NFP Net device to reconfigure
2682 static void nfp_net_close_free_all(struct nfp_net *nn)
2686 nfp_net_tx_rings_free(&nn->dp);
2687 nfp_net_rx_rings_free(&nn->dp);
2689 for (r = 0; r < nn->dp.num_r_vecs; r++)
2690 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2692 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2693 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2697 * nfp_net_netdev_close() - Called when the device is downed
2698 * @netdev: netdev structure
2700 static int nfp_net_netdev_close(struct net_device *netdev)
2702 struct nfp_net *nn = netdev_priv(netdev);
2704 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2706 nfp_net_close_stack(nn);
2710 nfp_net_clear_config_and_disable(nn);
2711 nfp_port_configure(netdev, false);
2713 /* Step 3: Free resources
2715 nfp_net_close_free_all(nn);
2717 nn_dbg(nn, "%s down", netdev->name);
2721 void nfp_ctrl_close(struct nfp_net *nn)
2727 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2728 disable_irq(nn->r_vecs[r].irq_vector);
2729 tasklet_disable(&nn->r_vecs[r].tasklet);
2732 nfp_net_clear_config_and_disable(nn);
2734 nfp_net_close_free_all(nn);
2740 * nfp_net_open_stack() - Start the device from stack's perspective
2741 * @nn: NFP Net device to reconfigure
2743 static void nfp_net_open_stack(struct nfp_net *nn)
2747 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2748 napi_enable(&nn->r_vecs[r].napi);
2749 enable_irq(nn->r_vecs[r].irq_vector);
2752 netif_tx_wake_all_queues(nn->dp.netdev);
2754 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2755 nfp_net_read_link_status(nn);
2758 static int nfp_net_open_alloc_all(struct nfp_net *nn)
2762 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2763 nn->exn_name, sizeof(nn->exn_name),
2764 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2767 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2768 nn->lsc_name, sizeof(nn->lsc_name),
2769 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2772 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2774 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2775 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2777 goto err_cleanup_vec_p;
2780 err = nfp_net_rx_rings_prepare(nn, &nn->dp);
2782 goto err_cleanup_vec;
2784 err = nfp_net_tx_rings_prepare(nn, &nn->dp);
2786 goto err_free_rx_rings;
2788 for (r = 0; r < nn->max_r_vecs; r++)
2789 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2794 nfp_net_rx_rings_free(&nn->dp);
2796 r = nn->dp.num_r_vecs;
2799 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2800 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2802 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2806 static int nfp_net_netdev_open(struct net_device *netdev)
2808 struct nfp_net *nn = netdev_priv(netdev);
2811 /* Step 1: Allocate resources for rings and the like
2812 * - Request interrupts
2813 * - Allocate RX and TX ring resources
2814 * - Setup initial RSS table
2816 err = nfp_net_open_alloc_all(nn);
2820 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
2824 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
2828 /* Step 2: Configure the NFP
2829 * - Ifup the physical interface if it exists
2830 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2831 * - Write MAC address (in case it changed)
2833 * - Set the Freelist buffer size
2836 err = nfp_port_configure(netdev, true);
2840 err = nfp_net_set_config_and_enable(nn);
2842 goto err_port_disable;
2844 /* Step 3: Enable for kernel
2845 * - put some freelist descriptors on each RX ring
2846 * - enable NAPI on each ring
2847 * - enable all TX queues
2850 nfp_net_open_stack(nn);
2855 nfp_port_configure(netdev, false);
2857 nfp_net_close_free_all(nn);
2861 int nfp_ctrl_open(struct nfp_net *nn)
2865 /* ring dumping depends on vNICs being opened/closed under rtnl */
2868 err = nfp_net_open_alloc_all(nn);
2872 err = nfp_net_set_config_and_enable(nn);
2876 for (r = 0; r < nn->dp.num_r_vecs; r++)
2877 enable_irq(nn->r_vecs[r].irq_vector);
2884 nfp_net_close_free_all(nn);
2890 static void nfp_net_set_rx_mode(struct net_device *netdev)
2892 struct nfp_net *nn = netdev_priv(netdev);
2895 new_ctrl = nn->dp.ctrl;
2897 if (netdev->flags & IFF_PROMISC) {
2898 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2899 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2901 nn_warn(nn, "FW does not support promiscuous mode\n");
2903 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2906 if (new_ctrl == nn->dp.ctrl)
2909 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2910 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2912 nn->dp.ctrl = new_ctrl;
2915 static void nfp_net_rss_init_itbl(struct nfp_net *nn)
2919 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2921 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
2924 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
2926 struct nfp_net_dp new_dp = *dp;
2931 nn->dp.netdev->mtu = new_dp.mtu;
2933 if (!netif_is_rxfh_configured(nn->dp.netdev))
2934 nfp_net_rss_init_itbl(nn);
2937 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
2942 nfp_net_dp_swap(nn, dp);
2944 for (r = 0; r < nn->max_r_vecs; r++)
2945 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2947 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
2951 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
2952 err = netif_set_real_num_tx_queues(nn->dp.netdev,
2953 nn->dp.num_stack_tx_rings);
2958 return nfp_net_set_config_and_enable(nn);
2961 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
2963 struct nfp_net_dp *new;
2965 new = kmalloc(sizeof(*new), GFP_KERNEL);
2971 /* Clear things which need to be recomputed */
2973 new->tx_rings = NULL;
2974 new->rx_rings = NULL;
2975 new->num_r_vecs = 0;
2976 new->num_stack_tx_rings = 0;
2982 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
2983 struct netlink_ext_ack *extack)
2985 /* XDP-enabled tests */
2988 if (dp->fl_bufsz > PAGE_SIZE) {
2989 NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
2992 if (dp->num_tx_rings > nn->max_tx_rings) {
2993 NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
3000 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
3001 struct netlink_ext_ack *extack)
3005 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
3007 dp->num_stack_tx_rings = dp->num_tx_rings;
3009 dp->num_stack_tx_rings -= dp->num_rx_rings;
3011 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
3013 err = nfp_net_check_config(nn, dp, extack);
3017 if (!netif_running(dp->netdev)) {
3018 nfp_net_dp_swap(nn, dp);
3023 /* Prepare new rings */
3024 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
3025 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
3028 goto err_cleanup_vecs;
3032 err = nfp_net_rx_rings_prepare(nn, dp);
3034 goto err_cleanup_vecs;
3036 err = nfp_net_tx_rings_prepare(nn, dp);
3040 /* Stop device, swap in new rings, try to start the firmware */
3041 nfp_net_close_stack(nn);
3042 nfp_net_clear_config_and_disable(nn);
3044 err = nfp_net_dp_swap_enable(nn, dp);
3048 nfp_net_clear_config_and_disable(nn);
3050 /* Try with old configuration and old rings */
3051 err2 = nfp_net_dp_swap_enable(nn, dp);
3053 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
3056 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3057 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3059 nfp_net_rx_rings_free(dp);
3060 nfp_net_tx_rings_free(dp);
3062 nfp_net_open_stack(nn);
3069 nfp_net_rx_rings_free(dp);
3071 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3072 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3077 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
3079 struct nfp_net *nn = netdev_priv(netdev);
3080 struct nfp_net_dp *dp;
3082 dp = nfp_net_clone_dp(nn);
3088 return nfp_net_ring_reconfig(nn, dp, NULL);
3092 nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3094 struct nfp_net *nn = netdev_priv(netdev);
3096 /* Priority tagged packets with vlan id 0 are processed by the
3097 * NFP as untagged packets
3102 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
3103 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
3105 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
3109 nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3111 struct nfp_net *nn = netdev_priv(netdev);
3113 /* Priority tagged packets with vlan id 0 are processed by the
3114 * NFP as untagged packets
3119 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_VID, vid);
3120 nn_writew(nn, NFP_NET_CFG_VLAN_FILTER_PROTO, ETH_P_8021Q);
3122 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
3125 static void nfp_net_stat64(struct net_device *netdev,
3126 struct rtnl_link_stats64 *stats)
3128 struct nfp_net *nn = netdev_priv(netdev);
3131 for (r = 0; r < nn->dp.num_r_vecs; r++) {
3132 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
3137 start = u64_stats_fetch_begin(&r_vec->rx_sync);
3138 data[0] = r_vec->rx_pkts;
3139 data[1] = r_vec->rx_bytes;
3140 data[2] = r_vec->rx_drops;
3141 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
3142 stats->rx_packets += data[0];
3143 stats->rx_bytes += data[1];
3144 stats->rx_dropped += data[2];
3147 start = u64_stats_fetch_begin(&r_vec->tx_sync);
3148 data[0] = r_vec->tx_pkts;
3149 data[1] = r_vec->tx_bytes;
3150 data[2] = r_vec->tx_errors;
3151 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
3152 stats->tx_packets += data[0];
3153 stats->tx_bytes += data[1];
3154 stats->tx_errors += data[2];
3158 static int nfp_net_set_features(struct net_device *netdev,
3159 netdev_features_t features)
3161 netdev_features_t changed = netdev->features ^ features;
3162 struct nfp_net *nn = netdev_priv(netdev);
3166 /* Assume this is not called with features we have not advertised */
3168 new_ctrl = nn->dp.ctrl;
3170 if (changed & NETIF_F_RXCSUM) {
3171 if (features & NETIF_F_RXCSUM)
3172 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3174 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
3177 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3178 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
3179 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3181 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
3184 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
3185 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
3186 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3187 NFP_NET_CFG_CTRL_LSO;
3189 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3192 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
3193 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3194 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3196 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
3199 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
3200 if (features & NETIF_F_HW_VLAN_CTAG_TX)
3201 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3203 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
3206 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
3207 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3208 new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3210 new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
3213 if (changed & NETIF_F_SG) {
3214 if (features & NETIF_F_SG)
3215 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
3217 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
3220 if (changed & NETIF_F_HW_TC && nfp_app_tc_busy(nn->app, nn)) {
3221 nn_err(nn, "Cannot disable HW TC offload while in use\n");
3225 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
3226 netdev->features, features, changed);
3228 if (new_ctrl == nn->dp.ctrl)
3231 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
3232 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
3233 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
3237 nn->dp.ctrl = new_ctrl;
3242 static netdev_features_t
3243 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
3244 netdev_features_t features)
3248 /* We can't do TSO over double tagged packets (802.1AD) */
3249 features &= vlan_features_check(skb, features);
3251 if (!skb->encapsulation)
3254 /* Ensure that inner L4 header offset fits into TX descriptor field */
3255 if (skb_is_gso(skb)) {
3258 hdrlen = skb_inner_transport_header(skb) - skb->data +
3259 inner_tcp_hdrlen(skb);
3261 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
3262 features &= ~NETIF_F_GSO_MASK;
3265 /* VXLAN/GRE check */
3266 switch (vlan_get_protocol(skb)) {
3267 case htons(ETH_P_IP):
3268 l4_hdr = ip_hdr(skb)->protocol;
3270 case htons(ETH_P_IPV6):
3271 l4_hdr = ipv6_hdr(skb)->nexthdr;
3274 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3277 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
3278 skb->inner_protocol != htons(ETH_P_TEB) ||
3279 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
3280 (l4_hdr == IPPROTO_UDP &&
3281 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
3282 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
3283 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3289 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
3290 * @nn: NFP Net device to reconfigure
3291 * @idx: Index into the port table where new port should be written
3292 * @port: UDP port to configure (pass zero to remove VXLAN port)
3294 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
3298 nn->vxlan_ports[idx] = port;
3300 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
3303 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
3304 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
3305 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
3306 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
3307 be16_to_cpu(nn->vxlan_ports[i]));
3309 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
3313 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
3314 * @nn: NFP Network structure
3315 * @port: UDP port to look for
3317 * Return: if the port is already in the table -- it's position;
3318 * if the port is not in the table -- free position to use;
3319 * if the table is full -- -ENOSPC.
3321 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
3323 int i, free_idx = -ENOSPC;
3325 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
3326 if (nn->vxlan_ports[i] == port)
3328 if (!nn->vxlan_usecnt[i])
3335 static void nfp_net_add_vxlan_port(struct net_device *netdev,
3336 struct udp_tunnel_info *ti)
3338 struct nfp_net *nn = netdev_priv(netdev);
3341 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3344 idx = nfp_net_find_vxlan_idx(nn, ti->port);
3348 if (!nn->vxlan_usecnt[idx]++)
3349 nfp_net_set_vxlan_port(nn, idx, ti->port);
3352 static void nfp_net_del_vxlan_port(struct net_device *netdev,
3353 struct udp_tunnel_info *ti)
3355 struct nfp_net *nn = netdev_priv(netdev);
3358 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3361 idx = nfp_net_find_vxlan_idx(nn, ti->port);
3362 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
3365 if (!--nn->vxlan_usecnt[idx])
3366 nfp_net_set_vxlan_port(nn, idx, 0);
3370 nfp_net_xdp_setup_drv(struct nfp_net *nn, struct bpf_prog *prog,
3371 struct netlink_ext_ack *extack)
3373 struct nfp_net_dp *dp;
3375 if (!prog == !nn->dp.xdp_prog) {
3376 WRITE_ONCE(nn->dp.xdp_prog, prog);
3380 dp = nfp_net_clone_dp(nn);
3384 dp->xdp_prog = prog;
3385 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
3386 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
3387 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
3389 /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
3390 return nfp_net_ring_reconfig(nn, dp, extack);
3394 nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags,
3395 struct netlink_ext_ack *extack)
3397 struct bpf_prog *drv_prog, *offload_prog;
3400 if (nn->xdp_prog && (flags ^ nn->xdp_flags) & XDP_FLAGS_MODES)
3403 /* Load both when no flags set to allow easy activation of driver path
3404 * when program is replaced by one which can't be offloaded.
3406 drv_prog = flags & XDP_FLAGS_HW_MODE ? NULL : prog;
3407 offload_prog = flags & XDP_FLAGS_DRV_MODE ? NULL : prog;
3409 err = nfp_net_xdp_setup_drv(nn, drv_prog, extack);
3413 err = nfp_app_xdp_offload(nn->app, nn, offload_prog);
3414 if (err && flags & XDP_FLAGS_HW_MODE)
3418 bpf_prog_put(nn->xdp_prog);
3419 nn->xdp_prog = prog;
3420 nn->xdp_flags = flags;
3425 static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
3427 struct nfp_net *nn = netdev_priv(netdev);
3429 switch (xdp->command) {
3430 case XDP_SETUP_PROG:
3431 case XDP_SETUP_PROG_HW:
3432 return nfp_net_xdp_setup(nn, xdp->prog, xdp->flags,
3434 case XDP_QUERY_PROG:
3435 xdp->prog_attached = !!nn->xdp_prog;
3436 if (nn->dp.bpf_offload_xdp)
3437 xdp->prog_attached = XDP_ATTACHED_HW;
3438 xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
3445 static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
3447 struct nfp_net *nn = netdev_priv(netdev);
3448 struct sockaddr *saddr = addr;
3451 err = eth_prepare_mac_addr_change(netdev, addr);
3455 nfp_net_write_mac_addr(nn, saddr->sa_data);
3457 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
3461 eth_commit_mac_addr_change(netdev, addr);
3466 const struct net_device_ops nfp_net_netdev_ops = {
3467 .ndo_open = nfp_net_netdev_open,
3468 .ndo_stop = nfp_net_netdev_close,
3469 .ndo_start_xmit = nfp_net_tx,
3470 .ndo_get_stats64 = nfp_net_stat64,
3471 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
3472 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
3473 .ndo_set_vf_mac = nfp_app_set_vf_mac,
3474 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
3475 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
3476 .ndo_get_vf_config = nfp_app_get_vf_config,
3477 .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
3478 .ndo_setup_tc = nfp_port_setup_tc,
3479 .ndo_tx_timeout = nfp_net_tx_timeout,
3480 .ndo_set_rx_mode = nfp_net_set_rx_mode,
3481 .ndo_change_mtu = nfp_net_change_mtu,
3482 .ndo_set_mac_address = nfp_net_set_mac_address,
3483 .ndo_set_features = nfp_net_set_features,
3484 .ndo_features_check = nfp_net_features_check,
3485 .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
3486 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
3487 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
3488 .ndo_xdp = nfp_net_xdp,
3492 * nfp_net_info() - Print general info about the NIC
3493 * @nn: NFP Net device to reconfigure
3495 void nfp_net_info(struct nfp_net *nn)
3497 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
3498 nn->dp.is_vf ? "VF " : "",
3499 nn->dp.num_tx_rings, nn->max_tx_rings,
3500 nn->dp.num_rx_rings, nn->max_rx_rings);
3501 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
3502 nn->fw_ver.resv, nn->fw_ver.class,
3503 nn->fw_ver.major, nn->fw_ver.minor,
3505 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
3507 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
3508 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
3509 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
3510 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
3511 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
3512 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
3513 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
3514 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
3515 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
3516 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
3517 nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
3518 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
3519 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
3520 nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
3521 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
3522 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
3523 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
3524 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
3525 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
3526 nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
3527 "RXCSUM_COMPLETE " : "",
3528 nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
3529 nfp_app_extra_cap(nn->app, nn));
3533 * nfp_net_alloc() - Allocate netdev and related structure
3535 * @needs_netdev: Whether to allocate a netdev for this vNIC
3536 * @max_tx_rings: Maximum number of TX rings supported by device
3537 * @max_rx_rings: Maximum number of RX rings supported by device
3539 * This function allocates a netdev device and fills in the initial
3540 * part of the @struct nfp_net structure. In case of control device
3541 * nfp_net structure is allocated without the netdev.
3543 * Return: NFP Net device structure, or ERR_PTR on error.
3545 struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
3546 unsigned int max_tx_rings,
3547 unsigned int max_rx_rings)
3552 struct net_device *netdev;
3554 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
3555 max_tx_rings, max_rx_rings);
3557 return ERR_PTR(-ENOMEM);
3559 SET_NETDEV_DEV(netdev, &pdev->dev);
3560 nn = netdev_priv(netdev);
3561 nn->dp.netdev = netdev;
3563 nn = vzalloc(sizeof(*nn));
3565 return ERR_PTR(-ENOMEM);
3568 nn->dp.dev = &pdev->dev;
3571 nn->max_tx_rings = max_tx_rings;
3572 nn->max_rx_rings = max_rx_rings;
3574 nn->dp.num_tx_rings = min_t(unsigned int,
3575 max_tx_rings, num_online_cpus());
3576 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
3577 netif_get_num_default_rss_queues());
3579 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
3580 nn->dp.num_r_vecs = min_t(unsigned int,
3581 nn->dp.num_r_vecs, num_online_cpus());
3583 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
3584 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
3586 spin_lock_init(&nn->reconfig_lock);
3587 spin_lock_init(&nn->link_status_lock);
3589 setup_timer(&nn->reconfig_timer,
3590 nfp_net_reconfig_timer, (unsigned long)nn);
3596 * nfp_net_free() - Undo what @nfp_net_alloc() did
3597 * @nn: NFP Net device to reconfigure
3599 void nfp_net_free(struct nfp_net *nn)
3601 WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
3603 bpf_prog_put(nn->xdp_prog);
3606 free_netdev(nn->dp.netdev);
3612 * nfp_net_rss_key_sz() - Get current size of the RSS key
3613 * @nn: NFP Net device instance
3615 * Return: size of the RSS key for currently selected hash function.
3617 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
3619 switch (nn->rss_hfunc) {
3620 case ETH_RSS_HASH_TOP:
3621 return NFP_NET_CFG_RSS_KEY_SZ;
3622 case ETH_RSS_HASH_XOR:
3624 case ETH_RSS_HASH_CRC32:
3628 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
3633 * nfp_net_rss_init() - Set the initial RSS parameters
3634 * @nn: NFP Net device to reconfigure
3636 static void nfp_net_rss_init(struct nfp_net *nn)
3638 unsigned long func_bit, rss_cap_hfunc;
3641 /* Read the RSS function capability and select first supported func */
3642 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
3643 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
3645 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
3646 NFP_NET_CFG_RSS_TOEPLITZ);
3648 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
3649 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
3650 dev_warn(nn->dp.dev,
3651 "Bad RSS config, defaulting to Toeplitz hash\n");
3652 func_bit = ETH_RSS_HASH_TOP_BIT;
3654 nn->rss_hfunc = 1 << func_bit;
3656 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
3658 nfp_net_rss_init_itbl(nn);
3660 /* Enable IPv4/IPv6 TCP by default */
3661 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
3662 NFP_NET_CFG_RSS_IPV6_TCP |
3663 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
3664 NFP_NET_CFG_RSS_MASK;
3668 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
3669 * @nn: NFP Net device to reconfigure
3671 static void nfp_net_irqmod_init(struct nfp_net *nn)
3673 nn->rx_coalesce_usecs = 50;
3674 nn->rx_coalesce_max_frames = 64;
3675 nn->tx_coalesce_usecs = 50;
3676 nn->tx_coalesce_max_frames = 64;
3679 static void nfp_net_netdev_init(struct nfp_net *nn)
3681 struct net_device *netdev = nn->dp.netdev;
3683 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
3685 netdev->mtu = nn->dp.mtu;
3687 /* Advertise/enable offloads based on capabilities
3689 * Note: netdev->features show the currently enabled features
3690 * and netdev->hw_features advertises which features are
3691 * supported. By default we enable most features.
3693 if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
3694 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3696 netdev->hw_features = NETIF_F_HIGHDMA;
3697 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
3698 netdev->hw_features |= NETIF_F_RXCSUM;
3699 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3701 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
3702 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3703 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3705 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
3706 netdev->hw_features |= NETIF_F_SG;
3707 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
3709 if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
3710 nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3711 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3712 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3713 NFP_NET_CFG_CTRL_LSO;
3715 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
3716 netdev->hw_features |= NETIF_F_RXHASH;
3717 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
3718 nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
3719 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3720 netdev->hw_features |= NETIF_F_GSO_GRE |
3721 NETIF_F_GSO_UDP_TUNNEL;
3722 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
3724 netdev->hw_enc_features = netdev->hw_features;
3727 netdev->vlan_features = netdev->hw_features;
3729 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
3730 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3731 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3733 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
3734 if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3735 nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
3737 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
3738 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3741 if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
3742 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3743 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3746 netdev->features = netdev->hw_features;
3748 if (nfp_app_has_tc(nn->app))
3749 netdev->hw_features |= NETIF_F_HW_TC;
3751 /* Advertise but disable TSO by default. */
3752 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3753 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3755 /* Finalise the netdev setup */
3756 netdev->netdev_ops = &nfp_net_netdev_ops;
3757 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
3759 SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
3761 /* MTU range: 68 - hw-specific max */
3762 netdev->min_mtu = ETH_MIN_MTU;
3763 netdev->max_mtu = nn->max_mtu;
3765 netif_carrier_off(netdev);
3767 nfp_net_set_ethtool_ops(netdev);
3771 * nfp_net_init() - Initialise/finalise the nfp_net structure
3772 * @nn: NFP Net device structure
3774 * Return: 0 on success or negative errno on error.
3776 int nfp_net_init(struct nfp_net *nn)
3780 nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
3782 /* Get some of the read-only fields from the BAR */
3783 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
3784 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
3786 /* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
3787 * we allow use of non-chained metadata if RSS(v1) is the only
3788 * advertised capability requiring metadata.
3790 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
3792 !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
3793 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
3794 /* RSS(v1) uses non-chained metadata format, except in ABI 4.x where
3795 * it has the same meaning as RSSv2.
3797 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
3798 nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
3800 /* Determine RX packet/metadata boundary offset */
3801 if (nn->fw_ver.major >= 2) {
3804 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
3805 if (reg > NFP_NET_MAX_PREPEND) {
3806 nn_err(nn, "Invalid rx offset: %d\n", reg);
3809 nn->dp.rx_offset = reg;
3811 nn->dp.rx_offset = NFP_NET_RX_OFFSET;
3814 /* Set default MTU and Freelist buffer size */
3815 if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
3816 nn->dp.mtu = nn->max_mtu;
3818 nn->dp.mtu = NFP_NET_DEFAULT_MTU;
3819 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
3821 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
3822 nfp_net_rss_init(nn);
3823 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
3824 NFP_NET_CFG_CTRL_RSS;
3827 /* Allow L2 Broadcast and Multicast through by default, if supported */
3828 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
3829 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
3830 if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
3831 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC;
3833 /* Allow IRQ moderation, if supported */
3834 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
3835 nfp_net_irqmod_init(nn);
3836 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
3840 nfp_net_netdev_init(nn);
3842 /* Stash the re-configuration queue away. First odd queue in TX Bar */
3843 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
3845 /* Make sure the FW knows the netdev is supposed to be disabled here */
3846 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
3847 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
3848 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
3849 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
3850 NFP_NET_CFG_UPDATE_GEN);
3854 nfp_net_vecs_init(nn);
3858 return register_netdev(nn->dp.netdev);
3862 * nfp_net_clean() - Undo what nfp_net_init() did.
3863 * @nn: NFP Net device structure
3865 void nfp_net_clean(struct nfp_net *nn)
3870 unregister_netdev(nn->dp.netdev);
3871 nfp_net_reconfig_wait_posted(nn);