1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
12 #include "ice_txrx_lib.h"
16 * ice_qp_reset_stats - Resets all stats for rings of given index
17 * @vsi: VSI that contains rings of interest
18 * @q_idx: ring index in array
20 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
22 memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
23 sizeof(vsi->rx_rings[q_idx]->rx_stats));
24 memset(&vsi->tx_rings[q_idx]->stats, 0,
25 sizeof(vsi->tx_rings[q_idx]->stats));
26 if (ice_is_xdp_ena_vsi(vsi))
27 memset(&vsi->xdp_rings[q_idx]->stats, 0,
28 sizeof(vsi->xdp_rings[q_idx]->stats));
32 * ice_qp_clean_rings - Cleans all the rings of a given index
33 * @vsi: VSI that contains rings of interest
34 * @q_idx: ring index in array
36 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
38 ice_clean_tx_ring(vsi->tx_rings[q_idx]);
39 if (ice_is_xdp_ena_vsi(vsi)) {
41 ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
43 ice_clean_rx_ring(vsi->rx_rings[q_idx]);
47 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
48 * @vsi: VSI that has netdev
49 * @q_vector: q_vector that has NAPI context
50 * @enable: true for enable, false for disable
53 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
56 if (!vsi->netdev || !q_vector)
60 napi_enable(&q_vector->napi);
62 napi_disable(&q_vector->napi);
66 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
67 * @vsi: the VSI that contains queue vector being un-configured
68 * @rx_ring: Rx ring that will have its IRQ disabled
69 * @q_vector: queue vector
72 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
73 struct ice_q_vector *q_vector)
75 struct ice_pf *pf = vsi->back;
76 struct ice_hw *hw = &pf->hw;
77 int base = vsi->base_vector;
81 /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
82 * here only QINT_RQCTL
84 reg = rx_ring->reg_idx;
85 val = rd32(hw, QINT_RQCTL(reg));
86 val &= ~QINT_RQCTL_CAUSE_ENA_M;
87 wr32(hw, QINT_RQCTL(reg), val);
90 u16 v_idx = q_vector->v_idx;
92 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
94 synchronize_irq(pf->msix_entries[v_idx + base].vector);
99 * ice_qvec_cfg_msix - Enable IRQ for given queue vector
100 * @vsi: the VSI that contains queue vector
101 * @q_vector: queue vector
104 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
106 u16 reg_idx = q_vector->reg_idx;
107 struct ice_pf *pf = vsi->back;
108 struct ice_hw *hw = &pf->hw;
109 struct ice_ring *ring;
111 ice_cfg_itr(hw, q_vector);
113 wr32(hw, GLINT_RATE(reg_idx),
114 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
116 ice_for_each_ring(ring, q_vector->tx)
117 ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
118 q_vector->tx.itr_idx);
120 ice_for_each_ring(ring, q_vector->rx)
121 ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
122 q_vector->rx.itr_idx);
128 * ice_qvec_ena_irq - Enable IRQ for given queue vector
129 * @vsi: the VSI that contains queue vector
130 * @q_vector: queue vector
132 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
134 struct ice_pf *pf = vsi->back;
135 struct ice_hw *hw = &pf->hw;
137 ice_irq_dynamic_ena(hw, vsi, q_vector);
143 * ice_qp_dis - Disables a queue pair
144 * @vsi: VSI of interest
145 * @q_idx: ring index in array
147 * Returns 0 on success, negative on failure.
149 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
151 struct ice_txq_meta txq_meta = { };
152 struct ice_ring *tx_ring, *rx_ring;
153 struct ice_q_vector *q_vector;
157 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
160 tx_ring = vsi->tx_rings[q_idx];
161 rx_ring = vsi->rx_rings[q_idx];
162 q_vector = rx_ring->q_vector;
164 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) {
168 usleep_range(1000, 2000);
170 netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
172 ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
173 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
176 if (ice_is_xdp_ena_vsi(vsi)) {
177 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
179 memset(&txq_meta, 0, sizeof(txq_meta));
180 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
181 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
186 ice_qvec_dis_irq(vsi, rx_ring, q_vector);
188 err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
192 ice_qvec_toggle_napi(vsi, q_vector, false);
193 ice_qp_clean_rings(vsi, q_idx);
194 ice_qp_reset_stats(vsi, q_idx);
200 * ice_qp_ena - Enables a queue pair
201 * @vsi: VSI of interest
202 * @q_idx: ring index in array
204 * Returns 0 on success, negative on failure.
206 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
208 struct ice_aqc_add_tx_qgrp *qg_buf;
209 struct ice_ring *tx_ring, *rx_ring;
210 struct ice_q_vector *q_vector;
214 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
217 size = struct_size(qg_buf, txqs, 1);
218 qg_buf = kzalloc(size, GFP_KERNEL);
222 qg_buf->num_txqs = 1;
224 tx_ring = vsi->tx_rings[q_idx];
225 rx_ring = vsi->rx_rings[q_idx];
226 q_vector = rx_ring->q_vector;
228 err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
232 if (ice_is_xdp_ena_vsi(vsi)) {
233 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
235 memset(qg_buf, 0, size);
236 qg_buf->num_txqs = 1;
237 err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
240 ice_set_ring_xdp(xdp_ring);
241 xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
244 err = ice_setup_rx_ctx(rx_ring);
248 ice_qvec_cfg_msix(vsi, q_vector);
250 err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
254 clear_bit(__ICE_CFG_BUSY, vsi->state);
255 ice_qvec_toggle_napi(vsi, q_vector, true);
256 ice_qvec_ena_irq(vsi, q_vector);
258 netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
265 * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
266 * @vsi: VSI to allocate the buffer pool on
268 * Returns 0 on success, negative on error
270 static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
275 vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
278 if (!vsi->xsk_pools) {
279 vsi->num_xsk_pools = 0;
287 * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
288 * @vsi: VSI from which the VSI will be removed
289 * @qid: Ring/qid associated with the buffer pool
291 static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
293 vsi->xsk_pools[qid] = NULL;
294 vsi->num_xsk_pools_used--;
296 if (vsi->num_xsk_pools_used == 0) {
297 kfree(vsi->xsk_pools);
298 vsi->xsk_pools = NULL;
299 vsi->num_xsk_pools = 0;
304 * ice_xsk_pool_disable - disable a buffer pool region
308 * Returns 0 on success, negative on failure
310 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
312 if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
313 !vsi->xsk_pools[qid])
316 xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
317 ice_xsk_remove_pool(vsi, qid);
323 * ice_xsk_pool_enable - enable a buffer pool region
325 * @pool: pointer to a requested buffer pool region
328 * Returns 0 on success, negative on failure
331 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
335 if (vsi->type != ICE_VSI_PF)
338 if (!vsi->num_xsk_pools)
339 vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
340 if (qid >= vsi->num_xsk_pools)
343 err = ice_xsk_alloc_pools(vsi);
347 if (vsi->xsk_pools && vsi->xsk_pools[qid])
350 vsi->xsk_pools[qid] = pool;
351 vsi->num_xsk_pools_used++;
353 err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
362 * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
364 * @pool: buffer pool to enable/associate to a ring, NULL to disable
367 * Returns 0 on success, negative on failure
369 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
371 bool if_running, pool_present = !!pool;
372 int ret = 0, pool_failure = 0;
374 if (qid >= vsi->num_rxq || qid >= vsi->num_txq) {
375 netdev_err(vsi->netdev, "Please use queue id in scope of combined queues count\n");
376 pool_failure = -EINVAL;
380 if (!is_power_of_2(vsi->rx_rings[qid]->count) ||
381 !is_power_of_2(vsi->tx_rings[qid]->count)) {
382 netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n");
383 pool_failure = -EINVAL;
387 if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
390 ret = ice_qp_dis(vsi, qid);
392 netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
397 pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
398 ice_xsk_pool_disable(vsi, qid);
402 ret = ice_qp_ena(vsi, qid);
403 if (!ret && pool_present)
404 napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
406 netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
411 netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
412 pool_present ? "en" : "dis", pool_failure);
420 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
422 * @count: The number of buffers to allocate
424 * This function allocates a number of Rx buffers from the fill ring
425 * or the internal recycle mechanism and places them on the Rx ring.
427 * Returns false if all allocations were successful, true if any fail.
429 bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
431 union ice_32b_rx_flex_desc *rx_desc;
432 u16 ntu = rx_ring->next_to_use;
433 struct ice_rx_buf *rx_buf;
440 rx_desc = ICE_RX_DESC(rx_ring, ntu);
441 rx_buf = &rx_ring->rx_buf[ntu];
444 rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
450 dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
451 rx_desc->read.pkt_addr = cpu_to_le64(dma);
452 rx_desc->wb.status_error0 = 0;
458 if (unlikely(ntu == rx_ring->count)) {
459 rx_desc = ICE_RX_DESC(rx_ring, 0);
460 rx_buf = rx_ring->rx_buf;
465 if (rx_ring->next_to_use != ntu) {
466 /* clear the status bits for the next_to_use descriptor */
467 rx_desc->wb.status_error0 = 0;
468 ice_release_rx_desc(rx_ring, ntu);
475 * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
478 static void ice_bump_ntc(struct ice_ring *rx_ring)
480 int ntc = rx_ring->next_to_clean + 1;
482 ntc = (ntc < rx_ring->count) ? ntc : 0;
483 rx_ring->next_to_clean = ntc;
484 prefetch(ICE_RX_DESC(rx_ring, ntc));
488 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
490 * @rx_buf: zero-copy Rx buffer
492 * This function allocates a new skb from a zero-copy Rx buffer.
494 * Returns the skb on success, NULL on failure.
496 static struct sk_buff *
497 ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
499 unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
500 unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
501 unsigned int datasize_hard = rx_buf->xdp->data_end -
502 rx_buf->xdp->data_hard_start;
505 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
506 GFP_ATOMIC | __GFP_NOWARN);
510 skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
511 memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
513 skb_metadata_set(skb, metasize);
515 xsk_buff_free(rx_buf->xdp);
521 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
523 * @xdp: xdp_buff used as input to the XDP program
525 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
528 ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
530 int err, result = ICE_XDP_PASS;
531 struct bpf_prog *xdp_prog;
532 struct ice_ring *xdp_ring;
536 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
542 act = bpf_prog_run_xdp(xdp_prog, xdp);
544 if (likely(act == XDP_REDIRECT)) {
545 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
549 return ICE_XDP_REDIR;
556 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
557 result = ice_xmit_xdp_buff(xdp, xdp_ring);
558 if (result == ICE_XDP_CONSUMED)
562 bpf_warn_invalid_xdp_action(act);
566 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
569 result = ICE_XDP_CONSUMED;
578 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
579 * @rx_ring: AF_XDP Rx ring
580 * @budget: NAPI budget
582 * Returns number of processed packets on success, remaining budget on failure.
584 int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
586 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
587 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
588 unsigned int xdp_xmit = 0;
589 bool failure = false;
591 while (likely(total_rx_packets < (unsigned int)budget)) {
592 union ice_32b_rx_flex_desc *rx_desc;
593 unsigned int size, xdp_res = 0;
594 struct ice_rx_buf *rx_buf;
600 if (cleaned_count >= ICE_RX_BUF_WRITE) {
601 failure |= ice_alloc_rx_bufs_zc(rx_ring,
606 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
608 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
609 if (!ice_test_staterr(rx_desc, stat_err_bits))
612 /* This memory barrier is needed to keep us from reading
613 * any other fields out of the rx_desc until we have
614 * verified the descriptor has been written back.
618 size = le16_to_cpu(rx_desc->wb.pkt_len) &
619 ICE_RX_FLX_DESC_PKT_LEN_M;
623 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
624 rx_buf->xdp->data_end = rx_buf->xdp->data + size;
625 xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
627 xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
629 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
632 xsk_buff_free(rx_buf->xdp);
635 total_rx_bytes += size;
639 ice_bump_ntc(rx_ring);
644 skb = ice_construct_skb_zc(rx_ring, rx_buf);
646 rx_ring->rx_stats.alloc_buf_failed++;
651 ice_bump_ntc(rx_ring);
653 if (eth_skb_pad(skb)) {
658 total_rx_bytes += skb->len;
661 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
662 if (ice_test_staterr(rx_desc, stat_err_bits))
663 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
665 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
666 ICE_RX_FLEX_DESC_PTYPE_M;
668 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
669 ice_receive_skb(rx_ring, skb, vlan_tag);
672 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
673 ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
675 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
676 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
677 xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
679 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
681 return (int)total_rx_packets;
684 return failure ? budget : (int)total_rx_packets;
688 * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
689 * @xdp_ring: XDP Tx ring
690 * @budget: max number of frames to xmit
692 * Returns true if cleanup/transmission is done.
694 static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
696 struct ice_tx_desc *tx_desc = NULL;
697 bool work_done = true;
698 struct xdp_desc desc;
701 while (likely(budget-- > 0)) {
702 struct ice_tx_buf *tx_buf;
704 if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
705 xdp_ring->tx_stats.tx_busy++;
710 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
712 if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
715 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
716 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
719 tx_buf->bytecount = desc.len;
721 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
722 tx_desc->buf_addr = cpu_to_le64(dma);
723 tx_desc->cmd_type_offset_bsz =
724 ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
726 xdp_ring->next_to_use++;
727 if (xdp_ring->next_to_use == xdp_ring->count)
728 xdp_ring->next_to_use = 0;
732 ice_xdp_ring_update_tail(xdp_ring);
733 xsk_tx_release(xdp_ring->xsk_pool);
736 return budget > 0 && work_done;
740 * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
741 * @xdp_ring: XDP Tx ring
742 * @tx_buf: Tx buffer to clean
745 ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
747 xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
748 dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
749 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
750 dma_unmap_len_set(tx_buf, len, 0);
754 * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
755 * @xdp_ring: XDP Tx ring
756 * @budget: NAPI budget
758 * Returns true if cleanup/tranmission is done.
760 bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
762 int total_packets = 0, total_bytes = 0;
763 s16 ntc = xdp_ring->next_to_clean;
764 struct ice_tx_desc *tx_desc;
765 struct ice_tx_buf *tx_buf;
769 tx_desc = ICE_TX_DESC(xdp_ring, ntc);
770 tx_buf = &xdp_ring->tx_buf[ntc];
771 ntc -= xdp_ring->count;
774 if (!(tx_desc->cmd_type_offset_bsz &
775 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
778 total_bytes += tx_buf->bytecount;
781 if (tx_buf->raw_buf) {
782 ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
783 tx_buf->raw_buf = NULL;
788 tx_desc->cmd_type_offset_bsz = 0;
793 if (unlikely(!ntc)) {
794 ntc -= xdp_ring->count;
795 tx_buf = xdp_ring->tx_buf;
796 tx_desc = ICE_TX_DESC(xdp_ring, 0);
801 } while (likely(--budget));
803 ntc += xdp_ring->count;
804 xdp_ring->next_to_clean = ntc;
807 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
809 if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
810 xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
812 ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
813 xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
815 return budget > 0 && xmit_done;
819 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
820 * @netdev: net_device
821 * @queue_id: queue to wake up
822 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
824 * Returns negative on error, zero otherwise.
827 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
828 u32 __always_unused flags)
830 struct ice_netdev_priv *np = netdev_priv(netdev);
831 struct ice_q_vector *q_vector;
832 struct ice_vsi *vsi = np->vsi;
833 struct ice_ring *ring;
835 if (test_bit(__ICE_DOWN, vsi->state))
838 if (!ice_is_xdp_ena_vsi(vsi))
841 if (queue_id >= vsi->num_txq)
844 if (!vsi->xdp_rings[queue_id]->xsk_pool)
847 ring = vsi->xdp_rings[queue_id];
849 /* The idea here is that if NAPI is running, mark a miss, so
850 * it will run again. If not, trigger an interrupt and
851 * schedule the NAPI from interrupt context. If NAPI would be
852 * scheduled here, the interrupt affinity would not be
855 q_vector = ring->q_vector;
856 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
857 ice_trigger_sw_intr(&vsi->back->hw, q_vector);
863 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
864 * @vsi: VSI to be checked
866 * Returns true if any of the Rx rings has an AF_XDP buff pool attached
868 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
875 for (i = 0; i < vsi->num_xsk_pools; i++) {
876 if (vsi->xsk_pools[i])
884 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
885 * @rx_ring: ring to be cleaned
887 void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
891 for (i = 0; i < rx_ring->count; i++) {
892 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
902 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
903 * @xdp_ring: XDP_Tx ring
905 void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
907 u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
911 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
914 ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
918 tx_buf->raw_buf = NULL;
921 if (ntc >= xdp_ring->count)
926 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);