1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
9 #include "ixgbe_txrx_common.h"
11 struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
12 struct ixgbe_ring *ring)
14 bool xdp_on = READ_ONCE(adapter->xdp_prog);
15 int qid = ring->ring_idx;
17 if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
20 return xsk_get_pool_from_qid(adapter->netdev, qid);
23 static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
24 struct xsk_buff_pool *pool,
27 struct net_device *netdev = adapter->netdev;
31 if (qid >= adapter->num_rx_queues)
34 if (qid >= netdev->real_num_rx_queues ||
35 qid >= netdev->real_num_tx_queues)
38 err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
42 if_running = netif_running(adapter->netdev) &&
43 ixgbe_enabled_xdp_adapter(adapter);
46 ixgbe_txrx_ring_disable(adapter, qid);
48 set_bit(qid, adapter->af_xdp_zc_qps);
51 ixgbe_txrx_ring_enable(adapter, qid);
53 /* Kick start the NAPI context so that receiving will start */
54 err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
56 clear_bit(qid, adapter->af_xdp_zc_qps);
57 xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
65 static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
67 struct xsk_buff_pool *pool;
70 pool = xsk_get_pool_from_qid(adapter->netdev, qid);
74 if_running = netif_running(adapter->netdev) &&
75 ixgbe_enabled_xdp_adapter(adapter);
78 ixgbe_txrx_ring_disable(adapter, qid);
80 clear_bit(qid, adapter->af_xdp_zc_qps);
81 xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
84 ixgbe_txrx_ring_enable(adapter, qid);
89 int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
90 struct xsk_buff_pool *pool,
93 return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
94 ixgbe_xsk_pool_disable(adapter, qid);
97 static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
98 struct ixgbe_ring *rx_ring,
101 int err, result = IXGBE_XDP_PASS;
102 struct bpf_prog *xdp_prog;
103 struct xdp_frame *xdpf;
107 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
108 act = bpf_prog_run_xdp(xdp_prog, xdp);
110 if (likely(act == XDP_REDIRECT)) {
111 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
115 return IXGBE_XDP_REDIR;
122 xdpf = xdp_convert_buff_to_frame(xdp);
125 result = ixgbe_xmit_xdp_ring(adapter, xdpf);
126 if (result == IXGBE_XDP_CONSUMED)
130 bpf_warn_invalid_xdp_action(act);
134 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
135 fallthrough; /* handle aborts by dropping packet */
137 result = IXGBE_XDP_CONSUMED;
144 bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
146 union ixgbe_adv_rx_desc *rx_desc;
147 struct ixgbe_rx_buffer *bi;
148 u16 i = rx_ring->next_to_use;
156 rx_desc = IXGBE_RX_DESC(rx_ring, i);
157 bi = &rx_ring->rx_buffer_info[i];
161 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
167 dma = xsk_buff_xdp_get_dma(bi->xdp);
169 /* Refresh the desc even if buffer_addrs didn't change
170 * because each write-back erases this info.
172 rx_desc->read.pkt_addr = cpu_to_le64(dma);
178 rx_desc = IXGBE_RX_DESC(rx_ring, 0);
179 bi = rx_ring->rx_buffer_info;
183 /* clear the length for the next_to_use descriptor */
184 rx_desc->wb.upper.length = 0;
191 if (rx_ring->next_to_use != i) {
192 rx_ring->next_to_use = i;
194 /* Force memory writes to complete before letting h/w
195 * know there are new descriptors to fetch. (Only
196 * applicable for weak-ordered memory model archs,
200 writel(i, rx_ring->tail);
206 static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
207 struct ixgbe_rx_buffer *bi)
209 unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
210 unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
213 /* allocate a skb to store the frags */
214 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
215 bi->xdp->data_end - bi->xdp->data_hard_start,
216 GFP_ATOMIC | __GFP_NOWARN);
220 skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
221 memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
223 skb_metadata_set(skb, metasize);
225 xsk_buff_free(bi->xdp);
230 static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
232 u32 ntc = rx_ring->next_to_clean + 1;
234 ntc = (ntc < rx_ring->count) ? ntc : 0;
235 rx_ring->next_to_clean = ntc;
236 prefetch(IXGBE_RX_DESC(rx_ring, ntc));
239 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
240 struct ixgbe_ring *rx_ring,
243 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
244 struct ixgbe_adapter *adapter = q_vector->adapter;
245 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
246 unsigned int xdp_res, xdp_xmit = 0;
247 bool failure = false;
250 while (likely(total_rx_packets < budget)) {
251 union ixgbe_adv_rx_desc *rx_desc;
252 struct ixgbe_rx_buffer *bi;
255 /* return some buffers to hardware, one at a time is too slow */
256 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
258 !ixgbe_alloc_rx_buffers_zc(rx_ring,
263 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
264 size = le16_to_cpu(rx_desc->wb.upper.length);
268 /* This memory barrier is needed to keep us from reading
269 * any other fields out of the rx_desc until we know the
270 * descriptor has been written back
274 bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
276 if (unlikely(!ixgbe_test_staterr(rx_desc,
277 IXGBE_RXD_STAT_EOP))) {
278 struct ixgbe_rx_buffer *next_bi;
280 xsk_buff_free(bi->xdp);
282 ixgbe_inc_ntc(rx_ring);
284 &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
285 next_bi->discard = true;
289 if (unlikely(bi->discard)) {
290 xsk_buff_free(bi->xdp);
293 ixgbe_inc_ntc(rx_ring);
297 bi->xdp->data_end = bi->xdp->data + size;
298 xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
299 xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
302 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))
305 xsk_buff_free(bi->xdp);
309 total_rx_bytes += size;
312 ixgbe_inc_ntc(rx_ring);
317 skb = ixgbe_construct_skb_zc(rx_ring, bi);
319 rx_ring->rx_stats.alloc_rx_buff_failed++;
324 ixgbe_inc_ntc(rx_ring);
326 if (eth_skb_pad(skb))
329 total_rx_bytes += skb->len;
332 ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
333 ixgbe_rx_skb(q_vector, skb);
336 if (xdp_xmit & IXGBE_XDP_REDIR)
339 if (xdp_xmit & IXGBE_XDP_TX) {
340 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
342 /* Force memory writes to complete before letting h/w
343 * know there are new descriptors to fetch.
346 writel(ring->next_to_use, ring->tail);
349 u64_stats_update_begin(&rx_ring->syncp);
350 rx_ring->stats.packets += total_rx_packets;
351 rx_ring->stats.bytes += total_rx_bytes;
352 u64_stats_update_end(&rx_ring->syncp);
353 q_vector->rx.total_packets += total_rx_packets;
354 q_vector->rx.total_bytes += total_rx_bytes;
356 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
357 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
358 xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
360 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
362 return (int)total_rx_packets;
364 return failure ? budget : (int)total_rx_packets;
367 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
369 struct ixgbe_rx_buffer *bi;
372 for (i = 0; i < rx_ring->count; i++) {
373 bi = &rx_ring->rx_buffer_info[i];
378 xsk_buff_free(bi->xdp);
383 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
385 struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
386 union ixgbe_adv_tx_desc *tx_desc = NULL;
387 struct ixgbe_tx_buffer *tx_bi;
388 bool work_done = true;
389 struct xdp_desc desc;
393 while (budget-- > 0) {
394 if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
399 if (!netif_carrier_ok(xdp_ring->netdev))
402 if (!xsk_tx_peek_desc(pool, &desc))
405 dma = xsk_buff_raw_get_dma(pool, desc.addr);
406 xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
408 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
409 tx_bi->bytecount = desc.len;
413 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
414 tx_desc->read.buffer_addr = cpu_to_le64(dma);
416 /* put descriptor type bits */
417 cmd_type = IXGBE_ADVTXD_DTYP_DATA |
418 IXGBE_ADVTXD_DCMD_DEXT |
419 IXGBE_ADVTXD_DCMD_IFCS;
420 cmd_type |= desc.len | IXGBE_TXD_CMD;
421 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
422 tx_desc->read.olinfo_status =
423 cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
425 xdp_ring->next_to_use++;
426 if (xdp_ring->next_to_use == xdp_ring->count)
427 xdp_ring->next_to_use = 0;
431 ixgbe_xdp_ring_update_tail(xdp_ring);
432 xsk_tx_release(pool);
435 return !!budget && work_done;
438 static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
439 struct ixgbe_tx_buffer *tx_bi)
441 xdp_return_frame(tx_bi->xdpf);
442 dma_unmap_single(tx_ring->dev,
443 dma_unmap_addr(tx_bi, dma),
444 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
445 dma_unmap_len_set(tx_bi, len, 0);
448 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
449 struct ixgbe_ring *tx_ring, int napi_budget)
451 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
452 unsigned int total_packets = 0, total_bytes = 0;
453 struct xsk_buff_pool *pool = tx_ring->xsk_pool;
454 union ixgbe_adv_tx_desc *tx_desc;
455 struct ixgbe_tx_buffer *tx_bi;
458 tx_bi = &tx_ring->tx_buffer_info[ntc];
459 tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
462 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
465 total_bytes += tx_bi->bytecount;
466 total_packets += tx_bi->gso_segs;
469 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
478 if (unlikely(ntc == tx_ring->count)) {
480 tx_bi = tx_ring->tx_buffer_info;
481 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
484 /* issue prefetch for next Tx descriptor */
488 tx_ring->next_to_clean = ntc;
490 u64_stats_update_begin(&tx_ring->syncp);
491 tx_ring->stats.bytes += total_bytes;
492 tx_ring->stats.packets += total_packets;
493 u64_stats_update_end(&tx_ring->syncp);
494 q_vector->tx.total_bytes += total_bytes;
495 q_vector->tx.total_packets += total_packets;
498 xsk_tx_completed(pool, xsk_frames);
500 if (xsk_uses_need_wakeup(pool))
501 xsk_set_tx_need_wakeup(pool);
503 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
506 int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
508 struct ixgbe_adapter *adapter = netdev_priv(dev);
509 struct ixgbe_ring *ring;
511 if (test_bit(__IXGBE_DOWN, &adapter->state))
514 if (!READ_ONCE(adapter->xdp_prog))
517 if (qid >= adapter->num_xdp_queues)
520 ring = adapter->xdp_ring[qid];
522 if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
528 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
529 u64 eics = BIT_ULL(ring->q_vector->v_idx);
531 ixgbe_irq_rearm_queues(adapter, eics);
537 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
539 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
540 struct xsk_buff_pool *pool = tx_ring->xsk_pool;
541 struct ixgbe_tx_buffer *tx_bi;
545 tx_bi = &tx_ring->tx_buffer_info[ntc];
548 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
555 if (ntc == tx_ring->count)
560 xsk_tx_completed(pool, xsk_frames);