1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock.h>
28 #include "xsk_queue.h"
31 #define TX_BATCH_SIZE 16
33 static struct xdp_sock *xdp_sk(struct sock *sk)
35 return (struct xdp_sock *)sk;
38 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
40 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
41 READ_ONCE(xs->umem->fq);
44 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
46 return xskq_peek_addr(umem->fq, addr);
48 EXPORT_SYMBOL(xsk_umem_peek_addr);
50 void xsk_umem_discard_addr(struct xdp_umem *umem)
52 xskq_discard_addr(umem->fq);
54 EXPORT_SYMBOL(xsk_umem_discard_addr);
56 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
62 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
63 len > xs->umem->chunk_size_nohr) {
68 addr += xs->umem->headroom;
70 buffer = xdp_umem_get_data(xs->umem, addr);
71 memcpy(buffer, xdp->data, len);
72 err = xskq_produce_batch_desc(xs->rx, addr, len);
74 xskq_discard_addr(xs->umem->fq);
83 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
85 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
93 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
97 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
100 len = xdp->data_end - xdp->data;
102 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
103 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
106 void xsk_flush(struct xdp_sock *xs)
108 xskq_produce_flush_desc(xs->rx);
109 xs->sk.sk_data_ready(&xs->sk);
112 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
114 u32 len = xdp->data_end - xdp->data;
119 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
122 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
123 len > xs->umem->chunk_size_nohr) {
128 addr += xs->umem->headroom;
130 buffer = xdp_umem_get_data(xs->umem, addr);
131 memcpy(buffer, xdp->data, len);
132 err = xskq_produce_batch_desc(xs->rx, addr, len);
134 xskq_discard_addr(xs->umem->fq);
143 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
145 xskq_produce_flush_addr_n(umem->cq, nb_entries);
147 EXPORT_SYMBOL(xsk_umem_complete_tx);
149 void xsk_umem_consume_tx_done(struct xdp_umem *umem)
154 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
155 xs->sk.sk_write_space(&xs->sk);
159 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
161 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
163 struct xdp_desc desc;
167 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
168 if (!xskq_peek_desc(xs->tx, &desc))
171 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
174 *dma = xdp_umem_get_dma(umem, desc.addr);
177 xskq_discard_desc(xs->tx);
186 EXPORT_SYMBOL(xsk_umem_consume_tx);
188 static int xsk_zc_xmit(struct sock *sk)
190 struct xdp_sock *xs = xdp_sk(sk);
191 struct net_device *dev = xs->dev;
193 return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
196 static void xsk_destruct_skb(struct sk_buff *skb)
198 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
199 struct xdp_sock *xs = xdp_sk(skb->sk);
202 spin_lock_irqsave(&xs->tx_completion_lock, flags);
203 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
204 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
209 static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
212 u32 max_batch = TX_BATCH_SIZE;
213 struct xdp_sock *xs = xdp_sk(sk);
214 bool sent_frame = false;
215 struct xdp_desc desc;
219 mutex_lock(&xs->mutex);
221 if (xs->queue_id >= xs->dev->real_num_tx_queues)
224 while (xskq_peek_desc(xs->tx, &desc)) {
229 if (max_batch-- == 0) {
235 skb = sock_alloc_send_skb(sk, len, 1, &err);
241 buffer = xdp_umem_get_data(xs->umem, addr);
242 err = skb_store_bits(skb, 0, buffer, len);
243 if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
249 skb->priority = sk->sk_priority;
250 skb->mark = sk->sk_mark;
251 skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
252 skb->destructor = xsk_destruct_skb;
254 err = dev_direct_xmit(skb, xs->queue_id);
255 xskq_discard_desc(xs->tx);
256 /* Ignore NET_XMIT_CN as packet might have been sent */
257 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
258 /* SKB completed but not sent */
268 sk->sk_write_space(sk);
270 mutex_unlock(&xs->mutex);
274 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
276 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
277 struct sock *sk = sock->sk;
278 struct xdp_sock *xs = xdp_sk(sk);
280 if (unlikely(!xs->dev))
282 if (unlikely(!(xs->dev->flags & IFF_UP)))
284 if (unlikely(!xs->tx))
289 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
292 static __poll_t xsk_poll(struct file *file, struct socket *sock,
293 struct poll_table_struct *wait)
295 __poll_t mask = datagram_poll(file, sock, wait);
296 struct sock *sk = sock->sk;
297 struct xdp_sock *xs = xdp_sk(sk);
299 if (xs->rx && !xskq_empty_desc(xs->rx))
300 mask |= EPOLLIN | EPOLLRDNORM;
301 if (xs->tx && !xskq_full_desc(xs->tx))
302 mask |= EPOLLOUT | EPOLLWRNORM;
307 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
312 if (entries == 0 || *queue || !is_power_of_2(entries))
315 q = xskq_create(entries, umem_queue);
319 /* Make sure queue is ready before it can be seen by others */
321 WRITE_ONCE(*queue, q);
325 static int xsk_release(struct socket *sock)
327 struct sock *sk = sock->sk;
328 struct xdp_sock *xs = xdp_sk(sk);
337 sock_prot_inuse_add(net, sk->sk_prot, -1);
341 struct net_device *dev = xs->dev;
343 /* Wait for driver to stop using the xdp socket. */
344 xdp_del_sk_umem(xs->umem, xs);
350 xskq_destroy(xs->rx);
351 xskq_destroy(xs->tx);
356 sk_refcnt_debug_release(sk);
362 static struct socket *xsk_lookup_xsk_from_fd(int fd)
367 sock = sockfd_lookup(fd, &err);
369 return ERR_PTR(-ENOTSOCK);
371 if (sock->sk->sk_family != PF_XDP) {
373 return ERR_PTR(-ENOPROTOOPT);
379 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
381 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
382 struct sock *sk = sock->sk;
383 struct xdp_sock *xs = xdp_sk(sk);
384 struct net_device *dev;
388 if (addr_len < sizeof(struct sockaddr_xdp))
390 if (sxdp->sxdp_family != AF_XDP)
393 mutex_lock(&xs->mutex);
399 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
405 if (!xs->rx && !xs->tx) {
410 qid = sxdp->sxdp_queue_id;
412 if ((xs->rx && qid >= dev->real_num_rx_queues) ||
413 (xs->tx && qid >= dev->real_num_tx_queues)) {
418 flags = sxdp->sxdp_flags;
420 if (flags & XDP_SHARED_UMEM) {
421 struct xdp_sock *umem_xs;
424 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
425 /* Cannot specify flags for shared sockets. */
431 /* We have already our own. */
436 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
442 umem_xs = xdp_sk(sock->sk);
443 if (!umem_xs->umem) {
444 /* No umem to inherit. */
448 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
454 xdp_get_umem(umem_xs->umem);
455 WRITE_ONCE(xs->umem, umem_xs->umem);
457 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
461 /* This xsk has its own umem. */
462 xskq_set_umem(xs->umem->fq, &xs->umem->props);
463 xskq_set_umem(xs->umem->cq, &xs->umem->props);
465 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
471 xs->zc = xs->umem->zc;
473 xskq_set_umem(xs->rx, &xs->umem->props);
474 xskq_set_umem(xs->tx, &xs->umem->props);
475 xdp_add_sk_umem(xs->umem, xs);
481 mutex_unlock(&xs->mutex);
485 static int xsk_setsockopt(struct socket *sock, int level, int optname,
486 char __user *optval, unsigned int optlen)
488 struct sock *sk = sock->sk;
489 struct xdp_sock *xs = xdp_sk(sk);
492 if (level != SOL_XDP)
499 struct xsk_queue **q;
502 if (optlen < sizeof(entries))
504 if (copy_from_user(&entries, optval, sizeof(entries)))
507 mutex_lock(&xs->mutex);
508 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
509 err = xsk_init_queue(entries, q, false);
510 mutex_unlock(&xs->mutex);
515 struct xdp_umem_reg mr;
516 struct xdp_umem *umem;
518 if (copy_from_user(&mr, optval, sizeof(mr)))
521 mutex_lock(&xs->mutex);
523 mutex_unlock(&xs->mutex);
527 umem = xdp_umem_create(&mr);
529 mutex_unlock(&xs->mutex);
530 return PTR_ERR(umem);
533 /* Make sure umem is ready before it can be seen by others */
535 WRITE_ONCE(xs->umem, umem);
536 mutex_unlock(&xs->mutex);
539 case XDP_UMEM_FILL_RING:
540 case XDP_UMEM_COMPLETION_RING:
542 struct xsk_queue **q;
545 if (copy_from_user(&entries, optval, sizeof(entries)))
548 mutex_lock(&xs->mutex);
550 mutex_unlock(&xs->mutex);
554 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
556 err = xsk_init_queue(entries, q, true);
557 mutex_unlock(&xs->mutex);
567 static int xsk_getsockopt(struct socket *sock, int level, int optname,
568 char __user *optval, int __user *optlen)
570 struct sock *sk = sock->sk;
571 struct xdp_sock *xs = xdp_sk(sk);
574 if (level != SOL_XDP)
577 if (get_user(len, optlen))
585 struct xdp_statistics stats;
587 if (len < sizeof(stats))
590 mutex_lock(&xs->mutex);
591 stats.rx_dropped = xs->rx_dropped;
592 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
593 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
594 mutex_unlock(&xs->mutex);
596 if (copy_to_user(optval, &stats, sizeof(stats)))
598 if (put_user(sizeof(stats), optlen))
603 case XDP_MMAP_OFFSETS:
605 struct xdp_mmap_offsets off;
607 if (len < sizeof(off))
610 off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
611 off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
612 off.rx.desc = offsetof(struct xdp_rxtx_ring, desc);
613 off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
614 off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
615 off.tx.desc = offsetof(struct xdp_rxtx_ring, desc);
617 off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
618 off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
619 off.fr.desc = offsetof(struct xdp_umem_ring, desc);
620 off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
621 off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
622 off.cr.desc = offsetof(struct xdp_umem_ring, desc);
625 if (copy_to_user(optval, &off, len))
627 if (put_user(len, optlen))
639 static int xsk_mmap(struct file *file, struct socket *sock,
640 struct vm_area_struct *vma)
642 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
643 unsigned long size = vma->vm_end - vma->vm_start;
644 struct xdp_sock *xs = xdp_sk(sock->sk);
645 struct xsk_queue *q = NULL;
646 struct xdp_umem *umem;
650 if (offset == XDP_PGOFF_RX_RING) {
651 q = READ_ONCE(xs->rx);
652 } else if (offset == XDP_PGOFF_TX_RING) {
653 q = READ_ONCE(xs->tx);
655 umem = READ_ONCE(xs->umem);
659 /* Matches the smp_wmb() in XDP_UMEM_REG */
661 if (offset == XDP_UMEM_PGOFF_FILL_RING)
662 q = READ_ONCE(umem->fq);
663 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
664 q = READ_ONCE(umem->cq);
670 /* Matches the smp_wmb() in xsk_init_queue */
672 qpg = virt_to_head_page(q->ring);
673 if (size > (PAGE_SIZE << compound_order(qpg)))
676 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
677 return remap_pfn_range(vma, vma->vm_start, pfn,
678 size, vma->vm_page_prot);
681 static struct proto xsk_proto = {
683 .owner = THIS_MODULE,
684 .obj_size = sizeof(struct xdp_sock),
687 static const struct proto_ops xsk_proto_ops = {
689 .owner = THIS_MODULE,
690 .release = xsk_release,
692 .connect = sock_no_connect,
693 .socketpair = sock_no_socketpair,
694 .accept = sock_no_accept,
695 .getname = sock_no_getname,
697 .ioctl = sock_no_ioctl,
698 .listen = sock_no_listen,
699 .shutdown = sock_no_shutdown,
700 .setsockopt = xsk_setsockopt,
701 .getsockopt = xsk_getsockopt,
702 .sendmsg = xsk_sendmsg,
703 .recvmsg = sock_no_recvmsg,
705 .sendpage = sock_no_sendpage,
708 static void xsk_destruct(struct sock *sk)
710 struct xdp_sock *xs = xdp_sk(sk);
712 if (!sock_flag(sk, SOCK_DEAD))
715 xdp_put_umem(xs->umem);
717 sk_refcnt_debug_dec(sk);
720 static int xsk_create(struct net *net, struct socket *sock, int protocol,
726 if (!ns_capable(net->user_ns, CAP_NET_RAW))
728 if (sock->type != SOCK_RAW)
729 return -ESOCKTNOSUPPORT;
732 return -EPROTONOSUPPORT;
734 sock->state = SS_UNCONNECTED;
736 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
740 sock->ops = &xsk_proto_ops;
742 sock_init_data(sock, sk);
744 sk->sk_family = PF_XDP;
746 sk->sk_destruct = xsk_destruct;
747 sk_refcnt_debug_inc(sk);
749 sock_set_flag(sk, SOCK_RCU_FREE);
752 mutex_init(&xs->mutex);
753 spin_lock_init(&xs->tx_completion_lock);
756 sock_prot_inuse_add(net, &xsk_proto, 1);
762 static const struct net_proto_family xsk_family_ops = {
764 .create = xsk_create,
765 .owner = THIS_MODULE,
768 static int __init xsk_init(void)
772 err = proto_register(&xsk_proto, 0 /* no slab */);
776 err = sock_register(&xsk_family_ops);
783 proto_unregister(&xsk_proto);
788 fs_initcall(xsk_init);