1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
50 bool seen_econnaborted;
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
63 unsigned nr_multishot_loops;
65 /* initialised and used only by !msg send variants */
69 void __user *msg_control;
70 /* used only for send zerocopy */
71 struct io_kiocb *notif;
75 * Number of times we'll try and do receives if there's more data. If we
76 * exceed this limit, then add us to the back of the queue and retry from
77 * there. This helps fairness between flooding clients.
79 #define MULTISHOT_MAX_RETRY 32
81 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
83 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
85 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
86 sqe->buf_index || sqe->splice_fd_in))
89 shutdown->how = READ_ONCE(sqe->len);
90 req->flags |= REQ_F_FORCE_ASYNC;
94 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
96 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
100 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
102 sock = sock_from_file(req->file);
106 ret = __sys_shutdown_sock(sock, shutdown->how);
107 io_req_set_res(req, ret, 0);
111 static bool io_net_retry(struct socket *sock, int flags)
113 if (!(flags & MSG_WAITALL))
115 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
118 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
120 struct io_async_msghdr *hdr = req->async_data;
122 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
125 /* Let normal cleanup path reap it if we fail adding to the cache */
126 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
127 req->async_data = NULL;
128 req->flags &= ~REQ_F_ASYNC_DATA;
132 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
133 unsigned int issue_flags)
135 struct io_ring_ctx *ctx = req->ctx;
136 struct io_cache_entry *entry;
137 struct io_async_msghdr *hdr;
139 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
140 entry = io_alloc_cache_get(&ctx->netmsg_cache);
142 hdr = container_of(entry, struct io_async_msghdr, cache);
143 hdr->free_iov = NULL;
144 req->flags |= REQ_F_ASYNC_DATA;
145 req->async_data = hdr;
150 if (!io_alloc_async_data(req)) {
151 hdr = req->async_data;
152 hdr->free_iov = NULL;
158 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
160 /* ->prep_async is always called from the submission context */
161 return io_msg_alloc_async(req, 0);
164 static int io_setup_async_msg(struct io_kiocb *req,
165 struct io_async_msghdr *kmsg,
166 unsigned int issue_flags)
168 struct io_async_msghdr *async_msg;
170 if (req_has_async_data(req))
172 async_msg = io_msg_alloc_async(req, issue_flags);
174 kfree(kmsg->free_iov);
177 req->flags |= REQ_F_NEED_CLEANUP;
178 memcpy(async_msg, kmsg, sizeof(*kmsg));
179 if (async_msg->msg.msg_name)
180 async_msg->msg.msg_name = &async_msg->addr;
182 if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
185 /* if were using fast_iov, set it to the new one */
186 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
187 size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov;
188 async_msg->msg.msg_iter.__iov = &async_msg->fast_iov[fast_idx];
195 static int io_compat_msg_copy_hdr(struct io_kiocb *req,
196 struct io_async_msghdr *iomsg,
197 struct compat_msghdr *msg, int ddir)
199 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
200 struct compat_iovec __user *uiov;
203 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
206 uiov = compat_ptr(msg->msg_iov);
207 if (req->flags & REQ_F_BUFFER_SELECT) {
210 iomsg->free_iov = NULL;
211 if (msg->msg_iovlen == 0) {
213 } else if (msg->msg_iovlen > 1) {
216 if (!access_ok(uiov, sizeof(*uiov)))
218 if (__get_user(clen, &uiov->iov_len))
228 iomsg->free_iov = iomsg->fast_iov;
229 ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
230 UIO_FASTIOV, &iomsg->free_iov,
231 &iomsg->msg.msg_iter, true);
232 if (unlikely(ret < 0))
239 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
240 struct user_msghdr *msg, int ddir)
242 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
245 if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
248 if (req->flags & REQ_F_BUFFER_SELECT) {
249 if (msg->msg_iovlen == 0) {
250 sr->len = iomsg->fast_iov[0].iov_len = 0;
251 iomsg->fast_iov[0].iov_base = NULL;
252 iomsg->free_iov = NULL;
253 } else if (msg->msg_iovlen > 1) {
256 if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
257 sizeof(*msg->msg_iov)))
259 sr->len = iomsg->fast_iov[0].iov_len;
260 iomsg->free_iov = NULL;
266 iomsg->free_iov = iomsg->fast_iov;
267 ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
268 &iomsg->free_iov, &iomsg->msg.msg_iter, false);
269 if (unlikely(ret < 0))
275 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
276 struct io_async_msghdr *iomsg)
278 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
279 struct user_msghdr msg;
282 iomsg->msg.msg_name = &iomsg->addr;
283 iomsg->msg.msg_iter.nr_segs = 0;
286 if (unlikely(req->ctx->compat)) {
287 struct compat_msghdr cmsg;
289 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
293 return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
297 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
301 ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
303 /* save msg_control as sys_sendmsg() overwrites it */
304 sr->msg_control = iomsg->msg.msg_control_user;
308 int io_send_prep_async(struct io_kiocb *req)
310 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
311 struct io_async_msghdr *io;
314 if (!zc->addr || req_has_async_data(req))
316 io = io_msg_alloc_async_prep(req);
319 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
323 static int io_setup_async_addr(struct io_kiocb *req,
324 struct sockaddr_storage *addr_storage,
325 unsigned int issue_flags)
327 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
328 struct io_async_msghdr *io;
330 if (!sr->addr || req_has_async_data(req))
332 io = io_msg_alloc_async(req, issue_flags);
335 memcpy(&io->addr, addr_storage, sizeof(io->addr));
339 int io_sendmsg_prep_async(struct io_kiocb *req)
343 if (!io_msg_alloc_async_prep(req))
345 ret = io_sendmsg_copy_hdr(req, req->async_data);
347 req->flags |= REQ_F_NEED_CLEANUP;
351 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
353 struct io_async_msghdr *io = req->async_data;
358 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
360 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
362 if (req->opcode == IORING_OP_SEND) {
363 if (READ_ONCE(sqe->__pad3[0]))
365 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
366 sr->addr_len = READ_ONCE(sqe->addr_len);
367 } else if (sqe->addr2 || sqe->file_index) {
371 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
372 sr->len = READ_ONCE(sqe->len);
373 sr->flags = READ_ONCE(sqe->ioprio);
374 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
376 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
377 if (sr->msg_flags & MSG_DONTWAIT)
378 req->flags |= REQ_F_NOWAIT;
381 if (req->ctx->compat)
382 sr->msg_flags |= MSG_CMSG_COMPAT;
388 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
390 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
391 struct io_async_msghdr iomsg, *kmsg;
397 sock = sock_from_file(req->file);
401 if (req_has_async_data(req)) {
402 kmsg = req->async_data;
403 kmsg->msg.msg_control_user = sr->msg_control;
405 ret = io_sendmsg_copy_hdr(req, &iomsg);
411 if (!(req->flags & REQ_F_POLLED) &&
412 (sr->flags & IORING_RECVSEND_POLL_FIRST))
413 return io_setup_async_msg(req, kmsg, issue_flags);
415 flags = sr->msg_flags;
416 if (issue_flags & IO_URING_F_NONBLOCK)
417 flags |= MSG_DONTWAIT;
418 if (flags & MSG_WAITALL)
419 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
421 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
424 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
425 return io_setup_async_msg(req, kmsg, issue_flags);
426 if (ret > 0 && io_net_retry(sock, flags)) {
427 kmsg->msg.msg_controllen = 0;
428 kmsg->msg.msg_control = NULL;
430 req->flags |= REQ_F_PARTIAL_IO;
431 return io_setup_async_msg(req, kmsg, issue_flags);
433 if (ret == -ERESTARTSYS)
437 /* fast path, check for non-NULL to avoid function call */
439 kfree(kmsg->free_iov);
440 req->flags &= ~REQ_F_NEED_CLEANUP;
441 io_netmsg_recycle(req, issue_flags);
444 else if (sr->done_io)
446 io_req_set_res(req, ret, 0);
450 int io_send(struct io_kiocb *req, unsigned int issue_flags)
452 struct sockaddr_storage __address;
453 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
461 msg.msg_control = NULL;
462 msg.msg_controllen = 0;
467 if (req_has_async_data(req)) {
468 struct io_async_msghdr *io = req->async_data;
470 msg.msg_name = &io->addr;
472 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
473 if (unlikely(ret < 0))
475 msg.msg_name = (struct sockaddr *)&__address;
477 msg.msg_namelen = sr->addr_len;
480 if (!(req->flags & REQ_F_POLLED) &&
481 (sr->flags & IORING_RECVSEND_POLL_FIRST))
482 return io_setup_async_addr(req, &__address, issue_flags);
484 sock = sock_from_file(req->file);
488 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
492 flags = sr->msg_flags;
493 if (issue_flags & IO_URING_F_NONBLOCK)
494 flags |= MSG_DONTWAIT;
495 if (flags & MSG_WAITALL)
496 min_ret = iov_iter_count(&msg.msg_iter);
498 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
499 msg.msg_flags = flags;
500 ret = sock_sendmsg(sock, &msg);
502 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
503 return io_setup_async_addr(req, &__address, issue_flags);
505 if (ret > 0 && io_net_retry(sock, flags)) {
509 req->flags |= REQ_F_PARTIAL_IO;
510 return io_setup_async_addr(req, &__address, issue_flags);
512 if (ret == -ERESTARTSYS)
518 else if (sr->done_io)
520 io_req_set_res(req, ret, 0);
524 static int io_recvmsg_mshot_prep(struct io_kiocb *req,
525 struct io_async_msghdr *iomsg,
526 int namelen, size_t controllen)
528 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
529 (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
532 if (unlikely(namelen < 0))
534 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
537 if (check_add_overflow(hdr, controllen, &hdr))
540 iomsg->namelen = namelen;
541 iomsg->controllen = controllen;
548 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
549 struct io_async_msghdr *iomsg)
551 struct user_msghdr msg;
554 iomsg->msg.msg_name = &iomsg->addr;
555 iomsg->msg.msg_iter.nr_segs = 0;
558 if (unlikely(req->ctx->compat)) {
559 struct compat_msghdr cmsg;
561 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
565 ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
569 return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
570 cmsg.msg_controllen);
574 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
578 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
582 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
586 int io_recvmsg_prep_async(struct io_kiocb *req)
588 struct io_async_msghdr *iomsg;
591 if (!io_msg_alloc_async_prep(req))
593 iomsg = req->async_data;
594 ret = io_recvmsg_copy_hdr(req, iomsg);
596 req->flags |= REQ_F_NEED_CLEANUP;
600 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
602 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
604 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
606 if (unlikely(sqe->file_index || sqe->addr2))
609 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
610 sr->len = READ_ONCE(sqe->len);
611 sr->flags = READ_ONCE(sqe->ioprio);
612 if (sr->flags & ~(RECVMSG_FLAGS))
614 sr->msg_flags = READ_ONCE(sqe->msg_flags);
615 if (sr->msg_flags & MSG_DONTWAIT)
616 req->flags |= REQ_F_NOWAIT;
617 if (sr->msg_flags & MSG_ERRQUEUE)
618 req->flags |= REQ_F_CLEAR_POLLIN;
619 if (sr->flags & IORING_RECV_MULTISHOT) {
620 if (!(req->flags & REQ_F_BUFFER_SELECT))
622 if (sr->msg_flags & MSG_WAITALL)
624 if (req->opcode == IORING_OP_RECV && sr->len)
626 req->flags |= REQ_F_APOLL_MULTISHOT;
628 * Store the buffer group for this multishot receive separately,
629 * as if we end up doing an io-wq based issue that selects a
630 * buffer, it has to be committed immediately and that will
631 * clear ->buf_list. This means we lose the link to the buffer
632 * list, and the eventual buffer put on completion then cannot
635 sr->buf_group = req->buf_index;
639 if (req->ctx->compat)
640 sr->msg_flags |= MSG_CMSG_COMPAT;
643 sr->nr_multishot_loops = 0;
647 static inline void io_recv_prep_retry(struct io_kiocb *req)
649 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
652 sr->len = 0; /* get from the provided buffer */
653 req->buf_index = sr->buf_group;
657 * Finishes io_recv and io_recvmsg.
659 * Returns true if it is actually finished, or false if it should run
660 * again (for multishot).
662 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
663 struct msghdr *msg, bool mshot_finished,
664 unsigned issue_flags)
668 cflags = io_put_kbuf(req, issue_flags);
669 if (msg->msg_inq && msg->msg_inq != -1)
670 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
672 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
673 io_req_set_res(req, *ret, cflags);
682 * Fill CQE for this receive and see if we should keep trying to
683 * receive from this socket.
685 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
686 *ret, cflags | IORING_CQE_F_MORE)) {
687 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
688 int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
690 io_recv_prep_retry(req);
691 /* Known not-empty or unknown state, retry */
692 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || msg->msg_inq == -1) {
693 if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
695 /* mshot retries exceeded, force a requeue */
696 sr->nr_multishot_loops = 0;
697 mshot_retry_ret = IOU_REQUEUE;
699 if (issue_flags & IO_URING_F_MULTISHOT)
700 *ret = mshot_retry_ret;
705 /* Otherwise stop multishot but use the current result. */
707 io_req_set_res(req, *ret, cflags);
709 if (issue_flags & IO_URING_F_MULTISHOT)
710 *ret = IOU_STOP_MULTISHOT;
716 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
717 struct io_sr_msg *sr, void __user **buf,
720 unsigned long ubuf = (unsigned long) *buf;
723 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
728 if (kmsg->controllen) {
729 unsigned long control = ubuf + hdr - kmsg->controllen;
731 kmsg->msg.msg_control_user = (void __user *) control;
732 kmsg->msg.msg_controllen = kmsg->controllen;
735 sr->buf = *buf; /* stash for later copy */
736 *buf = (void __user *) (ubuf + hdr);
737 kmsg->payloadlen = *len = *len - hdr;
741 struct io_recvmsg_multishot_hdr {
742 struct io_uring_recvmsg_out msg;
743 struct sockaddr_storage addr;
746 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
747 struct io_async_msghdr *kmsg,
748 unsigned int flags, bool *finished)
752 struct io_recvmsg_multishot_hdr hdr;
755 kmsg->msg.msg_name = &hdr.addr;
756 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
757 kmsg->msg.msg_namelen = 0;
759 if (sock->file->f_flags & O_NONBLOCK)
760 flags |= MSG_DONTWAIT;
762 err = sock_recvmsg(sock, &kmsg->msg, flags);
763 *finished = err <= 0;
767 hdr.msg = (struct io_uring_recvmsg_out) {
768 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
769 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
772 hdr.msg.payloadlen = err;
773 if (err > kmsg->payloadlen)
774 err = kmsg->payloadlen;
776 copy_len = sizeof(struct io_uring_recvmsg_out);
777 if (kmsg->msg.msg_namelen > kmsg->namelen)
778 copy_len += kmsg->namelen;
780 copy_len += kmsg->msg.msg_namelen;
783 * "fromlen shall refer to the value before truncation.."
786 hdr.msg.namelen = kmsg->msg.msg_namelen;
788 /* ensure that there is no gap between hdr and sockaddr_storage */
789 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
790 sizeof(struct io_uring_recvmsg_out));
791 if (copy_to_user(io->buf, &hdr, copy_len)) {
796 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
797 kmsg->controllen + err;
800 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
802 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
803 struct io_async_msghdr iomsg, *kmsg;
806 int ret, min_ret = 0;
807 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
808 bool mshot_finished = true;
810 sock = sock_from_file(req->file);
814 if (req_has_async_data(req)) {
815 kmsg = req->async_data;
817 ret = io_recvmsg_copy_hdr(req, &iomsg);
823 if (!(req->flags & REQ_F_POLLED) &&
824 (sr->flags & IORING_RECVSEND_POLL_FIRST))
825 return io_setup_async_msg(req, kmsg, issue_flags);
828 if (io_do_buffer_select(req)) {
830 size_t len = sr->len;
832 buf = io_buffer_select(req, &len, issue_flags);
836 if (req->flags & REQ_F_APOLL_MULTISHOT) {
837 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
839 io_kbuf_recycle(req, issue_flags);
844 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
847 flags = sr->msg_flags;
849 flags |= MSG_DONTWAIT;
851 kmsg->msg.msg_get_inq = 1;
852 kmsg->msg.msg_inq = -1;
853 if (req->flags & REQ_F_APOLL_MULTISHOT) {
854 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
857 /* disable partial retry for recvmsg with cmsg attached */
858 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
859 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
861 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
866 if (ret == -EAGAIN && force_nonblock) {
867 ret = io_setup_async_msg(req, kmsg, issue_flags);
868 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
869 io_kbuf_recycle(req, issue_flags);
870 return IOU_ISSUE_SKIP_COMPLETE;
874 if (ret > 0 && io_net_retry(sock, flags)) {
876 req->flags |= REQ_F_PARTIAL_IO;
877 return io_setup_async_msg(req, kmsg, issue_flags);
879 if (ret == -ERESTARTSYS)
882 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
888 else if (sr->done_io)
891 io_kbuf_recycle(req, issue_flags);
893 if (!io_recv_finish(req, &ret, &kmsg->msg, mshot_finished, issue_flags))
894 goto retry_multishot;
896 if (mshot_finished) {
897 /* fast path, check for non-NULL to avoid function call */
899 kfree(kmsg->free_iov);
900 io_netmsg_recycle(req, issue_flags);
901 req->flags &= ~REQ_F_NEED_CLEANUP;
902 } else if (ret == -EAGAIN)
903 return io_setup_async_msg(req, kmsg, issue_flags);
908 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
910 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
914 int ret, min_ret = 0;
915 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
916 size_t len = sr->len;
918 if (!(req->flags & REQ_F_POLLED) &&
919 (sr->flags & IORING_RECVSEND_POLL_FIRST))
922 sock = sock_from_file(req->file);
928 msg.msg_control = NULL;
930 msg.msg_controllen = 0;
935 if (io_do_buffer_select(req)) {
938 buf = io_buffer_select(req, &len, issue_flags);
945 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
952 flags = sr->msg_flags;
954 flags |= MSG_DONTWAIT;
955 if (flags & MSG_WAITALL)
956 min_ret = iov_iter_count(&msg.msg_iter);
958 ret = sock_recvmsg(sock, &msg, flags);
960 if (ret == -EAGAIN && force_nonblock) {
961 if (issue_flags & IO_URING_F_MULTISHOT) {
962 io_kbuf_recycle(req, issue_flags);
963 return IOU_ISSUE_SKIP_COMPLETE;
968 if (ret > 0 && io_net_retry(sock, flags)) {
972 req->flags |= REQ_F_PARTIAL_IO;
975 if (ret == -ERESTARTSYS)
978 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
985 else if (sr->done_io)
988 io_kbuf_recycle(req, issue_flags);
990 if (!io_recv_finish(req, &ret, &msg, ret <= 0, issue_flags))
991 goto retry_multishot;
996 void io_send_zc_cleanup(struct io_kiocb *req)
998 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
999 struct io_async_msghdr *io;
1001 if (req_has_async_data(req)) {
1002 io = req->async_data;
1003 /* might be ->fast_iov if *msg_copy_hdr failed */
1004 if (io->free_iov != io->fast_iov)
1005 kfree(io->free_iov);
1008 io_notif_flush(zc->notif);
1013 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
1014 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
1016 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1018 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1019 struct io_ring_ctx *ctx = req->ctx;
1020 struct io_kiocb *notif;
1022 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1024 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1025 if (req->flags & REQ_F_CQE_SKIP)
1028 notif = zc->notif = io_alloc_notif(ctx);
1031 notif->cqe.user_data = req->cqe.user_data;
1033 notif->cqe.flags = IORING_CQE_F_NOTIF;
1034 req->flags |= REQ_F_NEED_CLEANUP;
1036 zc->flags = READ_ONCE(sqe->ioprio);
1037 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1038 if (zc->flags & ~IO_ZC_FLAGS_VALID)
1040 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1041 io_notif_set_extended(notif);
1042 io_notif_to_data(notif)->zc_report = true;
1046 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1047 unsigned idx = READ_ONCE(sqe->buf_index);
1049 if (unlikely(idx >= ctx->nr_user_bufs))
1051 idx = array_index_nospec(idx, ctx->nr_user_bufs);
1052 req->imu = READ_ONCE(ctx->user_bufs[idx]);
1053 io_req_set_rsrc_node(notif, ctx, 0);
1056 if (req->opcode == IORING_OP_SEND_ZC) {
1057 if (READ_ONCE(sqe->__pad3[0]))
1059 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1060 zc->addr_len = READ_ONCE(sqe->addr_len);
1062 if (unlikely(sqe->addr2 || sqe->file_index))
1064 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1068 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1069 zc->len = READ_ONCE(sqe->len);
1070 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1071 if (zc->msg_flags & MSG_DONTWAIT)
1072 req->flags |= REQ_F_NOWAIT;
1076 #ifdef CONFIG_COMPAT
1077 if (req->ctx->compat)
1078 zc->msg_flags |= MSG_CMSG_COMPAT;
1083 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1084 struct iov_iter *from, size_t length)
1086 skb_zcopy_downgrade_managed(skb);
1087 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1090 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1091 struct iov_iter *from, size_t length)
1093 struct skb_shared_info *shinfo = skb_shinfo(skb);
1094 int frag = shinfo->nr_frags;
1096 struct bvec_iter bi;
1098 unsigned long truesize = 0;
1101 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1102 else if (unlikely(!skb_zcopy_managed(skb)))
1103 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1105 bi.bi_size = min(from->count, length);
1106 bi.bi_bvec_done = from->iov_offset;
1109 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1110 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1113 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1114 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1115 v.bv_offset, v.bv_len);
1116 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1121 shinfo->nr_frags = frag;
1122 from->bvec += bi.bi_idx;
1123 from->nr_segs -= bi.bi_idx;
1124 from->count -= copied;
1125 from->iov_offset = bi.bi_bvec_done;
1127 skb->data_len += copied;
1129 skb->truesize += truesize;
1131 if (sk && sk->sk_type == SOCK_STREAM) {
1132 sk_wmem_queued_add(sk, truesize);
1133 if (!skb_zcopy_pure(skb))
1134 sk_mem_charge(sk, truesize);
1136 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1141 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1143 struct sockaddr_storage __address;
1144 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1146 struct socket *sock;
1148 int ret, min_ret = 0;
1150 sock = sock_from_file(req->file);
1151 if (unlikely(!sock))
1153 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1156 msg.msg_name = NULL;
1157 msg.msg_control = NULL;
1158 msg.msg_controllen = 0;
1159 msg.msg_namelen = 0;
1162 if (req_has_async_data(req)) {
1163 struct io_async_msghdr *io = req->async_data;
1165 msg.msg_name = &io->addr;
1167 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1168 if (unlikely(ret < 0))
1170 msg.msg_name = (struct sockaddr *)&__address;
1172 msg.msg_namelen = zc->addr_len;
1175 if (!(req->flags & REQ_F_POLLED) &&
1176 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1177 return io_setup_async_addr(req, &__address, issue_flags);
1179 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1180 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1181 (u64)(uintptr_t)zc->buf, zc->len);
1184 msg.sg_from_iter = io_sg_from_iter;
1186 io_notif_set_extended(zc->notif);
1187 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
1190 ret = io_notif_account_mem(zc->notif, zc->len);
1193 msg.sg_from_iter = io_sg_from_iter_iovec;
1196 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1197 if (issue_flags & IO_URING_F_NONBLOCK)
1198 msg_flags |= MSG_DONTWAIT;
1199 if (msg_flags & MSG_WAITALL)
1200 min_ret = iov_iter_count(&msg.msg_iter);
1201 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1203 msg.msg_flags = msg_flags;
1204 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1205 ret = sock_sendmsg(sock, &msg);
1207 if (unlikely(ret < min_ret)) {
1208 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1209 return io_setup_async_addr(req, &__address, issue_flags);
1211 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1215 req->flags |= REQ_F_PARTIAL_IO;
1216 return io_setup_async_addr(req, &__address, issue_flags);
1218 if (ret == -ERESTARTSYS)
1225 else if (zc->done_io)
1229 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1230 * flushing notif to io_send_zc_cleanup()
1232 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1233 io_notif_flush(zc->notif);
1234 req->flags &= ~REQ_F_NEED_CLEANUP;
1236 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1240 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1242 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1243 struct io_async_msghdr iomsg, *kmsg;
1244 struct socket *sock;
1246 int ret, min_ret = 0;
1248 io_notif_set_extended(sr->notif);
1250 sock = sock_from_file(req->file);
1251 if (unlikely(!sock))
1253 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1256 if (req_has_async_data(req)) {
1257 kmsg = req->async_data;
1258 kmsg->msg.msg_control_user = sr->msg_control;
1260 ret = io_sendmsg_copy_hdr(req, &iomsg);
1266 if (!(req->flags & REQ_F_POLLED) &&
1267 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1268 return io_setup_async_msg(req, kmsg, issue_flags);
1270 flags = sr->msg_flags | MSG_ZEROCOPY;
1271 if (issue_flags & IO_URING_F_NONBLOCK)
1272 flags |= MSG_DONTWAIT;
1273 if (flags & MSG_WAITALL)
1274 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1276 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1277 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1278 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1280 if (unlikely(ret < min_ret)) {
1281 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1282 return io_setup_async_msg(req, kmsg, issue_flags);
1284 if (ret > 0 && io_net_retry(sock, flags)) {
1286 req->flags |= REQ_F_PARTIAL_IO;
1287 return io_setup_async_msg(req, kmsg, issue_flags);
1289 if (ret == -ERESTARTSYS)
1293 /* fast path, check for non-NULL to avoid function call */
1294 if (kmsg->free_iov) {
1295 kfree(kmsg->free_iov);
1296 kmsg->free_iov = NULL;
1299 io_netmsg_recycle(req, issue_flags);
1302 else if (sr->done_io)
1306 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1307 * flushing notif to io_send_zc_cleanup()
1309 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1310 io_notif_flush(sr->notif);
1311 req->flags &= ~REQ_F_NEED_CLEANUP;
1313 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1317 void io_sendrecv_fail(struct io_kiocb *req)
1319 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1321 if (req->flags & REQ_F_PARTIAL_IO)
1322 req->cqe.res = sr->done_io;
1324 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1325 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1326 req->cqe.flags |= IORING_CQE_F_MORE;
1329 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1331 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1334 if (sqe->len || sqe->buf_index)
1337 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1338 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1339 accept->flags = READ_ONCE(sqe->accept_flags);
1340 accept->nofile = rlimit(RLIMIT_NOFILE);
1341 flags = READ_ONCE(sqe->ioprio);
1342 if (flags & ~IORING_ACCEPT_MULTISHOT)
1345 accept->file_slot = READ_ONCE(sqe->file_index);
1346 if (accept->file_slot) {
1347 if (accept->flags & SOCK_CLOEXEC)
1349 if (flags & IORING_ACCEPT_MULTISHOT &&
1350 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1353 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1355 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1356 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1357 if (flags & IORING_ACCEPT_MULTISHOT)
1358 req->flags |= REQ_F_APOLL_MULTISHOT;
1362 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1364 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1365 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1366 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1367 bool fixed = !!accept->file_slot;
1373 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1374 if (unlikely(fd < 0))
1377 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1382 ret = PTR_ERR(file);
1383 if (ret == -EAGAIN && force_nonblock) {
1385 * if it's multishot and polled, we don't need to
1386 * return EAGAIN to arm the poll infra since it
1387 * has already been done
1389 if (issue_flags & IO_URING_F_MULTISHOT)
1390 return IOU_ISSUE_SKIP_COMPLETE;
1393 if (ret == -ERESTARTSYS)
1396 } else if (!fixed) {
1397 fd_install(fd, file);
1400 ret = io_fixed_fd_install(req, issue_flags, file,
1404 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1405 io_req_set_res(req, ret, 0);
1411 if (io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
1412 ret, IORING_CQE_F_MORE))
1415 io_req_set_res(req, ret, 0);
1416 return IOU_STOP_MULTISHOT;
1419 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1421 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1423 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1426 sock->domain = READ_ONCE(sqe->fd);
1427 sock->type = READ_ONCE(sqe->off);
1428 sock->protocol = READ_ONCE(sqe->len);
1429 sock->file_slot = READ_ONCE(sqe->file_index);
1430 sock->nofile = rlimit(RLIMIT_NOFILE);
1432 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1433 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1435 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1440 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1442 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1443 bool fixed = !!sock->file_slot;
1448 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1449 if (unlikely(fd < 0))
1452 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1456 ret = PTR_ERR(file);
1457 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1459 if (ret == -ERESTARTSYS)
1462 } else if (!fixed) {
1463 fd_install(fd, file);
1466 ret = io_fixed_fd_install(req, issue_flags, file,
1469 io_req_set_res(req, ret, 0);
1473 int io_connect_prep_async(struct io_kiocb *req)
1475 struct io_async_connect *io = req->async_data;
1476 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1478 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1481 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1483 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1485 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1488 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1489 conn->addr_len = READ_ONCE(sqe->addr2);
1490 conn->in_progress = conn->seen_econnaborted = false;
1494 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1496 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1497 struct io_async_connect __io, *io;
1498 unsigned file_flags;
1500 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1502 if (req_has_async_data(req)) {
1503 io = req->async_data;
1505 ret = move_addr_to_kernel(connect->addr,
1513 file_flags = force_nonblock ? O_NONBLOCK : 0;
1515 ret = __sys_connect_file(req->file, &io->address,
1516 connect->addr_len, file_flags);
1517 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1518 && force_nonblock) {
1519 if (ret == -EINPROGRESS) {
1520 connect->in_progress = true;
1521 } else if (ret == -ECONNABORTED) {
1522 if (connect->seen_econnaborted)
1524 connect->seen_econnaborted = true;
1526 if (req_has_async_data(req))
1528 if (io_alloc_async_data(req)) {
1532 memcpy(req->async_data, &__io, sizeof(__io));
1535 if (connect->in_progress) {
1537 * At least bluetooth will return -EBADFD on a re-connect
1538 * attempt, and it's (supposedly) also valid to get -EISCONN
1539 * which means the previous result is good. For both of these,
1540 * grab the sock_error() and use that for the completion.
1542 if (ret == -EBADFD || ret == -EISCONN)
1543 ret = sock_error(sock_from_file(req->file)->sk);
1545 if (ret == -ERESTARTSYS)
1550 io_req_set_res(req, ret, 0);
1554 void io_netmsg_cache_free(struct io_cache_entry *entry)
1556 kfree(container_of(entry, struct io_async_msghdr, cache));