1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock_drv.h>
28 #include "xsk_queue.h"
32 #define TX_BATCH_SIZE 16
34 static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
36 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
38 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
41 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
42 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
44 EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
46 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
50 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
54 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
55 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
59 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
61 EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
63 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
65 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
68 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
69 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
71 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
73 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
77 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
81 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
82 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
86 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
88 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
90 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
92 return pool->uses_need_wakeup;
94 EXPORT_SYMBOL(xsk_uses_need_wakeup);
96 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
99 if (queue_id < dev->real_num_rx_queues)
100 return dev->_rx[queue_id].pool;
101 if (queue_id < dev->real_num_tx_queues)
102 return dev->_tx[queue_id].pool;
106 EXPORT_SYMBOL(xsk_get_pool_from_qid);
108 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
110 if (queue_id < dev->num_rx_queues)
111 dev->_rx[queue_id].pool = NULL;
112 if (queue_id < dev->num_tx_queues)
113 dev->_tx[queue_id].pool = NULL;
116 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
117 * not know if the device has more tx queues than rx, or the opposite.
118 * This might also change during run time.
120 int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
123 if (queue_id >= max_t(unsigned int,
124 dev->real_num_rx_queues,
125 dev->real_num_tx_queues))
128 if (queue_id < dev->real_num_rx_queues)
129 dev->_rx[queue_id].pool = pool;
130 if (queue_id < dev->real_num_tx_queues)
131 dev->_tx[queue_id].pool = pool;
136 void xp_release(struct xdp_buff_xsk *xskb)
138 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
141 static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
143 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
145 offset += xskb->pool->headroom;
146 if (!xskb->pool->unaligned)
147 return xskb->orig_addr + offset;
148 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
151 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
153 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
157 addr = xp_get_handle(xskb);
158 err = xskq_prod_reserve_desc(xs->rx, addr, len);
168 static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
170 void *from_buf, *to_buf;
173 if (unlikely(xdp_data_meta_unsupported(from))) {
174 from_buf = from->data;
178 from_buf = from->data_meta;
179 metalen = from->data - from->data_meta;
180 to_buf = to->data - metalen;
183 memcpy(to_buf, from_buf, len + metalen);
186 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
189 struct xdp_buff *xsk_xdp;
192 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
197 xsk_xdp = xsk_buff_alloc(xs->pool);
203 xsk_copy_xdp(xsk_xdp, xdp, len);
204 err = __xsk_rcv_zc(xs, xsk_xdp, len);
206 xsk_buff_free(xsk_xdp);
210 xdp_return_buff(xdp);
214 static bool xsk_tx_writeable(struct xdp_sock *xs)
216 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
222 static bool xsk_is_bound(struct xdp_sock *xs)
224 if (READ_ONCE(xs->state) == XSK_BOUND) {
225 /* Matches smp_wmb() in bind(). */
232 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
237 if (!xsk_is_bound(xs))
240 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
243 len = xdp->data_end - xdp->data;
245 return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
246 __xsk_rcv_zc(xs, xdp, len) :
247 __xsk_rcv(xs, xdp, len, explicit_free);
250 static void xsk_flush(struct xdp_sock *xs)
252 xskq_prod_submit(xs->rx);
253 __xskq_cons_release(xs->pool->fq);
254 sock_def_readable(&xs->sk);
257 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
261 spin_lock_bh(&xs->rx_lock);
262 err = xsk_rcv(xs, xdp, false);
264 spin_unlock_bh(&xs->rx_lock);
268 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
270 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
273 err = xsk_rcv(xs, xdp, true);
277 if (!xs->flush_node.prev)
278 list_add(&xs->flush_node, flush_list);
283 void __xsk_map_flush(void)
285 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
286 struct xdp_sock *xs, *tmp;
288 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
290 __list_del_clearprev(&xs->flush_node);
294 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
296 xskq_prod_submit_n(pool->cq, nb_entries);
298 EXPORT_SYMBOL(xsk_tx_completed);
300 void xsk_tx_release(struct xsk_buff_pool *pool)
305 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
306 __xskq_cons_release(xs->tx);
307 if (xsk_tx_writeable(xs))
308 xs->sk.sk_write_space(&xs->sk);
312 EXPORT_SYMBOL(xsk_tx_release);
314 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
319 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
320 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
321 xs->tx->queue_empty_descs++;
325 /* This is the backpressure mechanism for the Tx path.
326 * Reserve space in the completion queue and only proceed
327 * if there is space in it. This avoids having to implement
328 * any buffering in the Tx path.
330 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
333 xskq_cons_release(xs->tx);
342 EXPORT_SYMBOL(xsk_tx_peek_desc);
344 static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
346 struct net_device *dev = xs->dev;
350 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
356 static int xsk_zc_xmit(struct xdp_sock *xs)
358 return xsk_wakeup(xs, XDP_WAKEUP_TX);
361 static void xsk_destruct_skb(struct sk_buff *skb)
363 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
364 struct xdp_sock *xs = xdp_sk(skb->sk);
367 spin_lock_irqsave(&xs->pool->cq_lock, flags);
368 xskq_prod_submit_addr(xs->pool->cq, addr);
369 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
374 static int xsk_generic_xmit(struct sock *sk)
376 struct xdp_sock *xs = xdp_sk(sk);
377 u32 max_batch = TX_BATCH_SIZE;
378 bool sent_frame = false;
379 struct xdp_desc desc;
385 mutex_lock(&xs->mutex);
387 if (xs->queue_id >= xs->dev->real_num_tx_queues)
390 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
391 tr = xs->dev->needed_tailroom;
393 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
398 if (max_batch-- == 0) {
404 skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
408 skb_reserve(skb, hr);
412 buffer = xsk_buff_raw_get_data(xs->pool, addr);
413 err = skb_store_bits(skb, 0, buffer, len);
414 /* This is the backpressure mechanism for the Tx path.
415 * Reserve space in the completion queue and only proceed
416 * if there is space in it. This avoids having to implement
417 * any buffering in the Tx path.
419 spin_lock_irqsave(&xs->pool->cq_lock, flags);
420 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
421 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
425 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
428 skb->priority = sk->sk_priority;
429 skb->mark = sk->sk_mark;
430 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
431 skb->destructor = xsk_destruct_skb;
433 err = __dev_direct_xmit(skb, xs->queue_id);
434 if (err == NETDEV_TX_BUSY) {
435 /* Tell user-space to retry the send */
436 skb->destructor = sock_wfree;
437 spin_lock_irqsave(&xs->pool->cq_lock, flags);
438 xskq_prod_cancel(xs->pool->cq);
439 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
440 /* Free skb without triggering the perf drop trace */
446 xskq_cons_release(xs->tx);
447 /* Ignore NET_XMIT_CN as packet might have been sent */
448 if (err == NET_XMIT_DROP) {
449 /* SKB completed but not sent */
457 xs->tx->queue_empty_descs++;
461 if (xsk_tx_writeable(xs))
462 sk->sk_write_space(sk);
464 mutex_unlock(&xs->mutex);
468 static int __xsk_sendmsg(struct sock *sk)
470 struct xdp_sock *xs = xdp_sk(sk);
472 if (unlikely(!(xs->dev->flags & IFF_UP)))
474 if (unlikely(!xs->tx))
477 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
480 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
482 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
483 struct sock *sk = sock->sk;
484 struct xdp_sock *xs = xdp_sk(sk);
486 if (unlikely(!xsk_is_bound(xs)))
488 if (unlikely(need_wait))
491 return __xsk_sendmsg(sk);
494 static __poll_t xsk_poll(struct file *file, struct socket *sock,
495 struct poll_table_struct *wait)
498 struct sock *sk = sock->sk;
499 struct xdp_sock *xs = xdp_sk(sk);
500 struct xsk_buff_pool *pool;
502 sock_poll_wait(file, sock, wait);
504 if (unlikely(!xsk_is_bound(xs)))
509 if (pool->cached_need_wakeup) {
511 xsk_wakeup(xs, pool->cached_need_wakeup);
513 /* Poll needs to drive Tx also in copy mode */
517 if (xs->rx && !xskq_prod_is_empty(xs->rx))
518 mask |= EPOLLIN | EPOLLRDNORM;
519 if (xs->tx && xsk_tx_writeable(xs))
520 mask |= EPOLLOUT | EPOLLWRNORM;
525 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
530 if (entries == 0 || *queue || !is_power_of_2(entries))
533 q = xskq_create(entries, umem_queue);
537 /* Make sure queue is ready before it can be seen by others */
539 WRITE_ONCE(*queue, q);
543 static void xsk_unbind_dev(struct xdp_sock *xs)
545 struct net_device *dev = xs->dev;
547 if (xs->state != XSK_BOUND)
549 WRITE_ONCE(xs->state, XSK_UNBOUND);
551 /* Wait for driver to stop using the xdp socket. */
552 xp_del_xsk(xs->pool, xs);
558 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
559 struct xdp_sock ***map_entry)
561 struct xsk_map *map = NULL;
562 struct xsk_map_node *node;
566 spin_lock_bh(&xs->map_list_lock);
567 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
570 WARN_ON(xsk_map_inc(node->map));
572 *map_entry = node->map_entry;
574 spin_unlock_bh(&xs->map_list_lock);
578 static void xsk_delete_from_maps(struct xdp_sock *xs)
580 /* This function removes the current XDP socket from all the
581 * maps it resides in. We need to take extra care here, due to
582 * the two locks involved. Each map has a lock synchronizing
583 * updates to the entries, and each socket has a lock that
584 * synchronizes access to the list of maps (map_list). For
585 * deadlock avoidance the locks need to be taken in the order
586 * "map lock"->"socket map list lock". We start off by
587 * accessing the socket map list, and take a reference to the
588 * map to guarantee existence between the
589 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
590 * calls. Then we ask the map to remove the socket, which
591 * tries to remove the socket from the map. Note that there
592 * might be updates to the map between
593 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
595 struct xdp_sock **map_entry = NULL;
598 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
599 xsk_map_try_sock_delete(map, xs, map_entry);
604 static int xsk_release(struct socket *sock)
606 struct sock *sk = sock->sk;
607 struct xdp_sock *xs = xdp_sk(sk);
615 mutex_lock(&net->xdp.lock);
616 sk_del_node_init_rcu(sk);
617 mutex_unlock(&net->xdp.lock);
620 sock_prot_inuse_add(net, sk->sk_prot, -1);
623 xsk_delete_from_maps(xs);
624 mutex_lock(&xs->mutex);
626 mutex_unlock(&xs->mutex);
628 xskq_destroy(xs->rx);
629 xskq_destroy(xs->tx);
630 xskq_destroy(xs->fq_tmp);
631 xskq_destroy(xs->cq_tmp);
636 sk_refcnt_debug_release(sk);
642 static struct socket *xsk_lookup_xsk_from_fd(int fd)
647 sock = sockfd_lookup(fd, &err);
649 return ERR_PTR(-ENOTSOCK);
651 if (sock->sk->sk_family != PF_XDP) {
653 return ERR_PTR(-ENOPROTOOPT);
659 static bool xsk_validate_queues(struct xdp_sock *xs)
661 return xs->fq_tmp && xs->cq_tmp;
664 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
666 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
667 struct sock *sk = sock->sk;
668 struct xdp_sock *xs = xdp_sk(sk);
669 struct net_device *dev;
674 if (addr_len < sizeof(struct sockaddr_xdp))
676 if (sxdp->sxdp_family != AF_XDP)
679 flags = sxdp->sxdp_flags;
680 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
681 XDP_USE_NEED_WAKEUP))
684 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
685 if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
689 mutex_lock(&xs->mutex);
690 if (xs->state != XSK_READY) {
695 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
701 if (!xs->rx && !xs->tx) {
706 qid = sxdp->sxdp_queue_id;
708 if (flags & XDP_SHARED_UMEM) {
709 struct xdp_sock *umem_xs;
712 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
713 (flags & XDP_USE_NEED_WAKEUP)) {
714 /* Cannot specify flags for shared sockets. */
720 /* We have already our own. */
725 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
731 umem_xs = xdp_sk(sock->sk);
732 if (!xsk_is_bound(umem_xs)) {
738 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
739 /* Share the umem with another socket on another qid
742 xs->pool = xp_create_and_assign_umem(xs,
750 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
753 xp_destroy(xs->pool);
759 /* Share the buffer pool with the other socket. */
760 if (xs->fq_tmp || xs->cq_tmp) {
761 /* Do not allow setting your own fq or cq. */
767 xp_get_pool(umem_xs->pool);
768 xs->pool = umem_xs->pool;
771 xdp_get_umem(umem_xs->umem);
772 WRITE_ONCE(xs->umem, umem_xs->umem);
774 } else if (!xs->umem || !xsk_validate_queues(xs)) {
778 /* This xsk has its own umem. */
779 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
785 err = xp_assign_dev(xs->pool, dev, qid, flags);
787 xp_destroy(xs->pool);
793 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
798 xs->zc = xs->umem->zc;
800 xp_add_xsk(xs->pool, xs);
806 /* Matches smp_rmb() in bind() for shared umem
807 * sockets, and xsk_is_bound().
810 WRITE_ONCE(xs->state, XSK_BOUND);
813 mutex_unlock(&xs->mutex);
818 struct xdp_umem_reg_v1 {
819 __u64 addr; /* Start of packet data area */
820 __u64 len; /* Length of packet data area */
825 static int xsk_setsockopt(struct socket *sock, int level, int optname,
826 sockptr_t optval, unsigned int optlen)
828 struct sock *sk = sock->sk;
829 struct xdp_sock *xs = xdp_sk(sk);
832 if (level != SOL_XDP)
839 struct xsk_queue **q;
842 if (optlen < sizeof(entries))
844 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
847 mutex_lock(&xs->mutex);
848 if (xs->state != XSK_READY) {
849 mutex_unlock(&xs->mutex);
852 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
853 err = xsk_init_queue(entries, q, false);
854 if (!err && optname == XDP_TX_RING)
855 /* Tx needs to be explicitly woken up the first time */
856 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
857 mutex_unlock(&xs->mutex);
862 size_t mr_size = sizeof(struct xdp_umem_reg);
863 struct xdp_umem_reg mr = {};
864 struct xdp_umem *umem;
866 if (optlen < sizeof(struct xdp_umem_reg_v1))
868 else if (optlen < sizeof(mr))
869 mr_size = sizeof(struct xdp_umem_reg_v1);
871 if (copy_from_sockptr(&mr, optval, mr_size))
874 mutex_lock(&xs->mutex);
875 if (xs->state != XSK_READY || xs->umem) {
876 mutex_unlock(&xs->mutex);
880 umem = xdp_umem_create(&mr);
882 mutex_unlock(&xs->mutex);
883 return PTR_ERR(umem);
886 /* Make sure umem is ready before it can be seen by others */
888 WRITE_ONCE(xs->umem, umem);
889 mutex_unlock(&xs->mutex);
892 case XDP_UMEM_FILL_RING:
893 case XDP_UMEM_COMPLETION_RING:
895 struct xsk_queue **q;
898 if (optlen < sizeof(entries))
900 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
903 mutex_lock(&xs->mutex);
904 if (xs->state != XSK_READY) {
905 mutex_unlock(&xs->mutex);
909 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
911 err = xsk_init_queue(entries, q, true);
912 mutex_unlock(&xs->mutex);
922 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
924 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
925 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
926 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
929 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
931 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
932 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
933 ring->desc = offsetof(struct xdp_umem_ring, desc);
936 struct xdp_statistics_v1 {
938 __u64 rx_invalid_descs;
939 __u64 tx_invalid_descs;
942 static int xsk_getsockopt(struct socket *sock, int level, int optname,
943 char __user *optval, int __user *optlen)
945 struct sock *sk = sock->sk;
946 struct xdp_sock *xs = xdp_sk(sk);
949 if (level != SOL_XDP)
952 if (get_user(len, optlen))
960 struct xdp_statistics stats = {};
961 bool extra_stats = true;
964 if (len < sizeof(struct xdp_statistics_v1)) {
966 } else if (len < sizeof(stats)) {
968 stats_size = sizeof(struct xdp_statistics_v1);
970 stats_size = sizeof(stats);
973 mutex_lock(&xs->mutex);
974 stats.rx_dropped = xs->rx_dropped;
976 stats.rx_ring_full = xs->rx_queue_full;
977 stats.rx_fill_ring_empty_descs =
978 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
979 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
981 stats.rx_dropped += xs->rx_queue_full;
983 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
984 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
985 mutex_unlock(&xs->mutex);
987 if (copy_to_user(optval, &stats, stats_size))
989 if (put_user(stats_size, optlen))
994 case XDP_MMAP_OFFSETS:
996 struct xdp_mmap_offsets off;
997 struct xdp_mmap_offsets_v1 off_v1;
998 bool flags_supported = true;
1001 if (len < sizeof(off_v1))
1003 else if (len < sizeof(off))
1004 flags_supported = false;
1006 if (flags_supported) {
1007 /* xdp_ring_offset is identical to xdp_ring_offset_v1
1008 * except for the flags field added to the end.
1010 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1012 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1014 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1016 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1018 off.rx.flags = offsetof(struct xdp_rxtx_ring,
1020 off.tx.flags = offsetof(struct xdp_rxtx_ring,
1022 off.fr.flags = offsetof(struct xdp_umem_ring,
1024 off.cr.flags = offsetof(struct xdp_umem_ring,
1030 xsk_enter_rxtx_offsets(&off_v1.rx);
1031 xsk_enter_rxtx_offsets(&off_v1.tx);
1032 xsk_enter_umem_offsets(&off_v1.fr);
1033 xsk_enter_umem_offsets(&off_v1.cr);
1035 len = sizeof(off_v1);
1039 if (copy_to_user(optval, to_copy, len))
1041 if (put_user(len, optlen))
1048 struct xdp_options opts = {};
1050 if (len < sizeof(opts))
1053 mutex_lock(&xs->mutex);
1055 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1056 mutex_unlock(&xs->mutex);
1059 if (copy_to_user(optval, &opts, len))
1061 if (put_user(len, optlen))
1073 static int xsk_mmap(struct file *file, struct socket *sock,
1074 struct vm_area_struct *vma)
1076 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1077 unsigned long size = vma->vm_end - vma->vm_start;
1078 struct xdp_sock *xs = xdp_sk(sock->sk);
1079 struct xsk_queue *q = NULL;
1083 if (READ_ONCE(xs->state) != XSK_READY)
1086 if (offset == XDP_PGOFF_RX_RING) {
1087 q = READ_ONCE(xs->rx);
1088 } else if (offset == XDP_PGOFF_TX_RING) {
1089 q = READ_ONCE(xs->tx);
1091 /* Matches the smp_wmb() in XDP_UMEM_REG */
1093 if (offset == XDP_UMEM_PGOFF_FILL_RING)
1094 q = READ_ONCE(xs->fq_tmp);
1095 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1096 q = READ_ONCE(xs->cq_tmp);
1102 /* Matches the smp_wmb() in xsk_init_queue */
1104 qpg = virt_to_head_page(q->ring);
1105 if (size > page_size(qpg))
1108 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1109 return remap_pfn_range(vma, vma->vm_start, pfn,
1110 size, vma->vm_page_prot);
1113 static int xsk_notifier(struct notifier_block *this,
1114 unsigned long msg, void *ptr)
1116 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1117 struct net *net = dev_net(dev);
1121 case NETDEV_UNREGISTER:
1122 mutex_lock(&net->xdp.lock);
1123 sk_for_each(sk, &net->xdp.list) {
1124 struct xdp_sock *xs = xdp_sk(sk);
1126 mutex_lock(&xs->mutex);
1127 if (xs->dev == dev) {
1128 sk->sk_err = ENETDOWN;
1129 if (!sock_flag(sk, SOCK_DEAD))
1130 sk->sk_error_report(sk);
1134 /* Clear device references. */
1135 xp_clear_dev(xs->pool);
1137 mutex_unlock(&xs->mutex);
1139 mutex_unlock(&net->xdp.lock);
1145 static struct proto xsk_proto = {
1147 .owner = THIS_MODULE,
1148 .obj_size = sizeof(struct xdp_sock),
1151 static const struct proto_ops xsk_proto_ops = {
1153 .owner = THIS_MODULE,
1154 .release = xsk_release,
1156 .connect = sock_no_connect,
1157 .socketpair = sock_no_socketpair,
1158 .accept = sock_no_accept,
1159 .getname = sock_no_getname,
1161 .ioctl = sock_no_ioctl,
1162 .listen = sock_no_listen,
1163 .shutdown = sock_no_shutdown,
1164 .setsockopt = xsk_setsockopt,
1165 .getsockopt = xsk_getsockopt,
1166 .sendmsg = xsk_sendmsg,
1167 .recvmsg = sock_no_recvmsg,
1169 .sendpage = sock_no_sendpage,
1172 static void xsk_destruct(struct sock *sk)
1174 struct xdp_sock *xs = xdp_sk(sk);
1176 if (!sock_flag(sk, SOCK_DEAD))
1179 if (!xp_put_pool(xs->pool))
1180 xdp_put_umem(xs->umem, !xs->pool);
1182 sk_refcnt_debug_dec(sk);
1185 static int xsk_create(struct net *net, struct socket *sock, int protocol,
1188 struct xdp_sock *xs;
1191 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1193 if (sock->type != SOCK_RAW)
1194 return -ESOCKTNOSUPPORT;
1197 return -EPROTONOSUPPORT;
1199 sock->state = SS_UNCONNECTED;
1201 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1205 sock->ops = &xsk_proto_ops;
1207 sock_init_data(sock, sk);
1209 sk->sk_family = PF_XDP;
1211 sk->sk_destruct = xsk_destruct;
1212 sk_refcnt_debug_inc(sk);
1214 sock_set_flag(sk, SOCK_RCU_FREE);
1217 xs->state = XSK_READY;
1218 mutex_init(&xs->mutex);
1219 spin_lock_init(&xs->rx_lock);
1221 INIT_LIST_HEAD(&xs->map_list);
1222 spin_lock_init(&xs->map_list_lock);
1224 mutex_lock(&net->xdp.lock);
1225 sk_add_node_rcu(sk, &net->xdp.list);
1226 mutex_unlock(&net->xdp.lock);
1229 sock_prot_inuse_add(net, &xsk_proto, 1);
1235 static const struct net_proto_family xsk_family_ops = {
1237 .create = xsk_create,
1238 .owner = THIS_MODULE,
1241 static struct notifier_block xsk_netdev_notifier = {
1242 .notifier_call = xsk_notifier,
1245 static int __net_init xsk_net_init(struct net *net)
1247 mutex_init(&net->xdp.lock);
1248 INIT_HLIST_HEAD(&net->xdp.list);
1252 static void __net_exit xsk_net_exit(struct net *net)
1254 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1257 static struct pernet_operations xsk_net_ops = {
1258 .init = xsk_net_init,
1259 .exit = xsk_net_exit,
1262 static int __init xsk_init(void)
1266 err = proto_register(&xsk_proto, 0 /* no slab */);
1270 err = sock_register(&xsk_family_ops);
1274 err = register_pernet_subsys(&xsk_net_ops);
1278 err = register_netdevice_notifier(&xsk_netdev_notifier);
1282 for_each_possible_cpu(cpu)
1283 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1287 unregister_pernet_subsys(&xsk_net_ops);
1289 sock_unregister(PF_XDP);
1291 proto_unregister(&xsk_proto);
1296 fs_initcall(xsk_init);