1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
50 bool seen_econnaborted;
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
64 /* initialised and used only by !msg send variants */
68 void __user *msg_control;
69 /* used only for send zerocopy */
70 struct io_kiocb *notif;
73 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
75 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
77 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
78 sqe->buf_index || sqe->splice_fd_in))
81 shutdown->how = READ_ONCE(sqe->len);
85 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
87 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
91 if (issue_flags & IO_URING_F_NONBLOCK)
94 sock = sock_from_file(req->file);
98 ret = __sys_shutdown_sock(sock, shutdown->how);
99 io_req_set_res(req, ret, 0);
103 static bool io_net_retry(struct socket *sock, int flags)
105 if (!(flags & MSG_WAITALL))
107 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
110 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
112 struct io_async_msghdr *hdr = req->async_data;
114 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
117 /* Let normal cleanup path reap it if we fail adding to the cache */
118 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
119 req->async_data = NULL;
120 req->flags &= ~REQ_F_ASYNC_DATA;
124 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
125 unsigned int issue_flags)
127 struct io_ring_ctx *ctx = req->ctx;
128 struct io_cache_entry *entry;
129 struct io_async_msghdr *hdr;
131 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
132 entry = io_alloc_cache_get(&ctx->netmsg_cache);
134 hdr = container_of(entry, struct io_async_msghdr, cache);
135 hdr->free_iov = NULL;
136 req->flags |= REQ_F_ASYNC_DATA;
137 req->async_data = hdr;
142 if (!io_alloc_async_data(req)) {
143 hdr = req->async_data;
144 hdr->free_iov = NULL;
150 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
152 /* ->prep_async is always called from the submission context */
153 return io_msg_alloc_async(req, 0);
156 static int io_setup_async_msg(struct io_kiocb *req,
157 struct io_async_msghdr *kmsg,
158 unsigned int issue_flags)
160 struct io_async_msghdr *async_msg;
162 if (req_has_async_data(req))
164 async_msg = io_msg_alloc_async(req, issue_flags);
166 kfree(kmsg->free_iov);
169 req->flags |= REQ_F_NEED_CLEANUP;
170 memcpy(async_msg, kmsg, sizeof(*kmsg));
171 if (async_msg->msg.msg_name)
172 async_msg->msg.msg_name = &async_msg->addr;
174 if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
177 /* if were using fast_iov, set it to the new one */
178 if (!kmsg->free_iov) {
179 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
180 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
187 static int io_compat_msg_copy_hdr(struct io_kiocb *req,
188 struct io_async_msghdr *iomsg,
189 struct compat_msghdr *msg, int ddir)
191 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
192 struct compat_iovec __user *uiov;
195 if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
198 uiov = compat_ptr(msg->msg_iov);
199 if (req->flags & REQ_F_BUFFER_SELECT) {
202 iomsg->free_iov = NULL;
203 if (msg->msg_iovlen == 0) {
205 } else if (msg->msg_iovlen > 1) {
208 if (!access_ok(uiov, sizeof(*uiov)))
210 if (__get_user(clen, &uiov->iov_len))
220 iomsg->free_iov = iomsg->fast_iov;
221 ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
222 UIO_FASTIOV, &iomsg->free_iov,
223 &iomsg->msg.msg_iter, true);
224 if (unlikely(ret < 0))
231 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
232 struct user_msghdr *msg, int ddir)
234 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
237 if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
240 if (req->flags & REQ_F_BUFFER_SELECT) {
241 if (msg->msg_iovlen == 0) {
242 sr->len = iomsg->fast_iov[0].iov_len = 0;
243 iomsg->fast_iov[0].iov_base = NULL;
244 iomsg->free_iov = NULL;
245 } else if (msg->msg_iovlen > 1) {
248 if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
249 sizeof(*msg->msg_iov)))
251 sr->len = iomsg->fast_iov[0].iov_len;
252 iomsg->free_iov = NULL;
258 iomsg->free_iov = iomsg->fast_iov;
259 ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
260 &iomsg->free_iov, &iomsg->msg.msg_iter, false);
261 if (unlikely(ret < 0))
267 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
268 struct io_async_msghdr *iomsg)
270 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
271 struct user_msghdr msg;
274 iomsg->msg.msg_name = &iomsg->addr;
275 iomsg->msg.msg_iter.nr_segs = 0;
278 if (unlikely(req->ctx->compat)) {
279 struct compat_msghdr cmsg;
281 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
285 return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
289 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
293 ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
295 /* save msg_control as sys_sendmsg() overwrites it */
296 sr->msg_control = iomsg->msg.msg_control_user;
300 int io_send_prep_async(struct io_kiocb *req)
302 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
303 struct io_async_msghdr *io;
306 if (!zc->addr || req_has_async_data(req))
308 io = io_msg_alloc_async_prep(req);
311 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
315 static int io_setup_async_addr(struct io_kiocb *req,
316 struct sockaddr_storage *addr_storage,
317 unsigned int issue_flags)
319 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
320 struct io_async_msghdr *io;
322 if (!sr->addr || req_has_async_data(req))
324 io = io_msg_alloc_async(req, issue_flags);
327 memcpy(&io->addr, addr_storage, sizeof(io->addr));
331 int io_sendmsg_prep_async(struct io_kiocb *req)
335 if (!io_msg_alloc_async_prep(req))
337 ret = io_sendmsg_copy_hdr(req, req->async_data);
339 req->flags |= REQ_F_NEED_CLEANUP;
343 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
345 struct io_async_msghdr *io = req->async_data;
350 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
352 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
354 if (req->opcode == IORING_OP_SEND) {
355 if (READ_ONCE(sqe->__pad3[0]))
357 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
358 sr->addr_len = READ_ONCE(sqe->addr_len);
359 } else if (sqe->addr2 || sqe->file_index) {
363 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
364 sr->len = READ_ONCE(sqe->len);
365 sr->flags = READ_ONCE(sqe->ioprio);
366 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
368 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
369 if (sr->msg_flags & MSG_DONTWAIT)
370 req->flags |= REQ_F_NOWAIT;
373 if (req->ctx->compat)
374 sr->msg_flags |= MSG_CMSG_COMPAT;
380 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
382 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
383 struct io_async_msghdr iomsg, *kmsg;
389 sock = sock_from_file(req->file);
393 if (req_has_async_data(req)) {
394 kmsg = req->async_data;
395 kmsg->msg.msg_control_user = sr->msg_control;
397 ret = io_sendmsg_copy_hdr(req, &iomsg);
403 if (!(req->flags & REQ_F_POLLED) &&
404 (sr->flags & IORING_RECVSEND_POLL_FIRST))
405 return io_setup_async_msg(req, kmsg, issue_flags);
407 flags = sr->msg_flags;
408 if (issue_flags & IO_URING_F_NONBLOCK)
409 flags |= MSG_DONTWAIT;
410 if (flags & MSG_WAITALL)
411 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
413 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
416 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
417 return io_setup_async_msg(req, kmsg, issue_flags);
418 if (ret > 0 && io_net_retry(sock, flags)) {
419 kmsg->msg.msg_controllen = 0;
420 kmsg->msg.msg_control = NULL;
422 req->flags |= REQ_F_PARTIAL_IO;
423 return io_setup_async_msg(req, kmsg, issue_flags);
425 if (ret == -ERESTARTSYS)
429 /* fast path, check for non-NULL to avoid function call */
431 kfree(kmsg->free_iov);
432 req->flags &= ~REQ_F_NEED_CLEANUP;
433 io_netmsg_recycle(req, issue_flags);
436 else if (sr->done_io)
438 io_req_set_res(req, ret, 0);
442 int io_send(struct io_kiocb *req, unsigned int issue_flags)
444 struct sockaddr_storage __address;
445 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
454 msg.msg_control = NULL;
455 msg.msg_controllen = 0;
460 if (req_has_async_data(req)) {
461 struct io_async_msghdr *io = req->async_data;
463 msg.msg_name = &io->addr;
465 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
466 if (unlikely(ret < 0))
468 msg.msg_name = (struct sockaddr *)&__address;
470 msg.msg_namelen = sr->addr_len;
473 if (!(req->flags & REQ_F_POLLED) &&
474 (sr->flags & IORING_RECVSEND_POLL_FIRST))
475 return io_setup_async_addr(req, &__address, issue_flags);
477 sock = sock_from_file(req->file);
481 ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
485 flags = sr->msg_flags;
486 if (issue_flags & IO_URING_F_NONBLOCK)
487 flags |= MSG_DONTWAIT;
488 if (flags & MSG_WAITALL)
489 min_ret = iov_iter_count(&msg.msg_iter);
491 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
492 msg.msg_flags = flags;
493 ret = sock_sendmsg(sock, &msg);
495 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
496 return io_setup_async_addr(req, &__address, issue_flags);
498 if (ret > 0 && io_net_retry(sock, flags)) {
502 req->flags |= REQ_F_PARTIAL_IO;
503 return io_setup_async_addr(req, &__address, issue_flags);
505 if (ret == -ERESTARTSYS)
511 else if (sr->done_io)
513 io_req_set_res(req, ret, 0);
517 static int io_recvmsg_mshot_prep(struct io_kiocb *req,
518 struct io_async_msghdr *iomsg,
519 int namelen, size_t controllen)
521 if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
522 (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
525 if (unlikely(namelen < 0))
527 if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
530 if (check_add_overflow(hdr, controllen, &hdr))
533 iomsg->namelen = namelen;
534 iomsg->controllen = controllen;
541 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
542 struct io_async_msghdr *iomsg)
544 struct user_msghdr msg;
547 iomsg->msg.msg_name = &iomsg->addr;
548 iomsg->msg.msg_iter.nr_segs = 0;
551 if (unlikely(req->ctx->compat)) {
552 struct compat_msghdr cmsg;
554 ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
558 ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
562 return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
563 cmsg.msg_controllen);
567 ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
571 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
575 return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
579 int io_recvmsg_prep_async(struct io_kiocb *req)
581 struct io_async_msghdr *iomsg;
584 if (!io_msg_alloc_async_prep(req))
586 iomsg = req->async_data;
587 ret = io_recvmsg_copy_hdr(req, iomsg);
589 req->flags |= REQ_F_NEED_CLEANUP;
593 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
595 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
597 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
599 if (unlikely(sqe->file_index || sqe->addr2))
602 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
603 sr->len = READ_ONCE(sqe->len);
604 sr->flags = READ_ONCE(sqe->ioprio);
605 if (sr->flags & ~(RECVMSG_FLAGS))
607 sr->msg_flags = READ_ONCE(sqe->msg_flags);
608 if (sr->msg_flags & MSG_DONTWAIT)
609 req->flags |= REQ_F_NOWAIT;
610 if (sr->msg_flags & MSG_ERRQUEUE)
611 req->flags |= REQ_F_CLEAR_POLLIN;
612 if (sr->flags & IORING_RECV_MULTISHOT) {
613 if (!(req->flags & REQ_F_BUFFER_SELECT))
615 if (sr->msg_flags & MSG_WAITALL)
617 if (req->opcode == IORING_OP_RECV && sr->len)
619 req->flags |= REQ_F_APOLL_MULTISHOT;
621 * Store the buffer group for this multishot receive separately,
622 * as if we end up doing an io-wq based issue that selects a
623 * buffer, it has to be committed immediately and that will
624 * clear ->buf_list. This means we lose the link to the buffer
625 * list, and the eventual buffer put on completion then cannot
628 sr->buf_group = req->buf_index;
632 if (req->ctx->compat)
633 sr->msg_flags |= MSG_CMSG_COMPAT;
639 static inline void io_recv_prep_retry(struct io_kiocb *req)
641 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
644 sr->len = 0; /* get from the provided buffer */
645 req->buf_index = sr->buf_group;
649 * Finishes io_recv and io_recvmsg.
651 * Returns true if it is actually finished, or false if it should run
652 * again (for multishot).
654 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
655 unsigned int cflags, bool mshot_finished,
656 unsigned issue_flags)
658 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
659 io_req_set_res(req, *ret, cflags);
664 if (!mshot_finished) {
665 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
666 cflags | IORING_CQE_F_MORE, false)) {
667 io_recv_prep_retry(req);
671 * Otherwise stop multishot but use the current result.
672 * Probably will end up going into overflow, but this means
673 * we cannot trust the ordering anymore
677 io_req_set_res(req, *ret, cflags);
679 if (issue_flags & IO_URING_F_MULTISHOT)
680 *ret = IOU_STOP_MULTISHOT;
686 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
687 struct io_sr_msg *sr, void __user **buf,
690 unsigned long ubuf = (unsigned long) *buf;
693 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
698 if (kmsg->controllen) {
699 unsigned long control = ubuf + hdr - kmsg->controllen;
701 kmsg->msg.msg_control_user = (void __user *) control;
702 kmsg->msg.msg_controllen = kmsg->controllen;
705 sr->buf = *buf; /* stash for later copy */
706 *buf = (void __user *) (ubuf + hdr);
707 kmsg->payloadlen = *len = *len - hdr;
711 struct io_recvmsg_multishot_hdr {
712 struct io_uring_recvmsg_out msg;
713 struct sockaddr_storage addr;
716 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
717 struct io_async_msghdr *kmsg,
718 unsigned int flags, bool *finished)
722 struct io_recvmsg_multishot_hdr hdr;
725 kmsg->msg.msg_name = &hdr.addr;
726 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
727 kmsg->msg.msg_namelen = 0;
729 if (sock->file->f_flags & O_NONBLOCK)
730 flags |= MSG_DONTWAIT;
732 err = sock_recvmsg(sock, &kmsg->msg, flags);
733 *finished = err <= 0;
737 hdr.msg = (struct io_uring_recvmsg_out) {
738 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
739 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
742 hdr.msg.payloadlen = err;
743 if (err > kmsg->payloadlen)
744 err = kmsg->payloadlen;
746 copy_len = sizeof(struct io_uring_recvmsg_out);
747 if (kmsg->msg.msg_namelen > kmsg->namelen)
748 copy_len += kmsg->namelen;
750 copy_len += kmsg->msg.msg_namelen;
753 * "fromlen shall refer to the value before truncation.."
756 hdr.msg.namelen = kmsg->msg.msg_namelen;
758 /* ensure that there is no gap between hdr and sockaddr_storage */
759 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
760 sizeof(struct io_uring_recvmsg_out));
761 if (copy_to_user(io->buf, &hdr, copy_len)) {
766 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
767 kmsg->controllen + err;
770 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
772 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
773 struct io_async_msghdr iomsg, *kmsg;
777 int ret, min_ret = 0;
778 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
779 bool mshot_finished = true;
781 sock = sock_from_file(req->file);
785 if (req_has_async_data(req)) {
786 kmsg = req->async_data;
788 ret = io_recvmsg_copy_hdr(req, &iomsg);
794 if (!(req->flags & REQ_F_POLLED) &&
795 (sr->flags & IORING_RECVSEND_POLL_FIRST))
796 return io_setup_async_msg(req, kmsg, issue_flags);
799 if (io_do_buffer_select(req)) {
801 size_t len = sr->len;
803 buf = io_buffer_select(req, &len, issue_flags);
807 if (req->flags & REQ_F_APOLL_MULTISHOT) {
808 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
810 io_kbuf_recycle(req, issue_flags);
815 kmsg->fast_iov[0].iov_base = buf;
816 kmsg->fast_iov[0].iov_len = len;
817 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
821 flags = sr->msg_flags;
823 flags |= MSG_DONTWAIT;
825 kmsg->msg.msg_get_inq = 1;
826 if (req->flags & REQ_F_APOLL_MULTISHOT) {
827 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
830 /* disable partial retry for recvmsg with cmsg attached */
831 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
832 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
834 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
839 if (ret == -EAGAIN && force_nonblock) {
840 ret = io_setup_async_msg(req, kmsg, issue_flags);
841 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
842 io_kbuf_recycle(req, issue_flags);
843 return IOU_ISSUE_SKIP_COMPLETE;
847 if (ret > 0 && io_net_retry(sock, flags)) {
849 req->flags |= REQ_F_PARTIAL_IO;
850 return io_setup_async_msg(req, kmsg, issue_flags);
852 if (ret == -ERESTARTSYS)
855 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
861 else if (sr->done_io)
864 io_kbuf_recycle(req, issue_flags);
866 cflags = io_put_kbuf(req, issue_flags);
867 if (kmsg->msg.msg_inq)
868 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
870 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
871 goto retry_multishot;
873 if (mshot_finished) {
874 /* fast path, check for non-NULL to avoid function call */
876 kfree(kmsg->free_iov);
877 io_netmsg_recycle(req, issue_flags);
878 req->flags &= ~REQ_F_NEED_CLEANUP;
879 } else if (ret == -EAGAIN)
880 return io_setup_async_msg(req, kmsg, issue_flags);
885 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
887 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
893 int ret, min_ret = 0;
894 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
895 size_t len = sr->len;
897 if (!(req->flags & REQ_F_POLLED) &&
898 (sr->flags & IORING_RECVSEND_POLL_FIRST))
901 sock = sock_from_file(req->file);
906 if (io_do_buffer_select(req)) {
909 buf = io_buffer_select(req, &len, issue_flags);
916 ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
922 msg.msg_control = NULL;
925 msg.msg_controllen = 0;
929 flags = sr->msg_flags;
931 flags |= MSG_DONTWAIT;
932 if (flags & MSG_WAITALL)
933 min_ret = iov_iter_count(&msg.msg_iter);
935 ret = sock_recvmsg(sock, &msg, flags);
937 if (ret == -EAGAIN && force_nonblock) {
938 if (issue_flags & IO_URING_F_MULTISHOT) {
939 io_kbuf_recycle(req, issue_flags);
940 return IOU_ISSUE_SKIP_COMPLETE;
945 if (ret > 0 && io_net_retry(sock, flags)) {
949 req->flags |= REQ_F_PARTIAL_IO;
952 if (ret == -ERESTARTSYS)
955 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
962 else if (sr->done_io)
965 io_kbuf_recycle(req, issue_flags);
967 cflags = io_put_kbuf(req, issue_flags);
969 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
971 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
972 goto retry_multishot;
977 void io_send_zc_cleanup(struct io_kiocb *req)
979 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
980 struct io_async_msghdr *io;
982 if (req_has_async_data(req)) {
983 io = req->async_data;
984 /* might be ->fast_iov if *msg_copy_hdr failed */
985 if (io->free_iov != io->fast_iov)
989 io_notif_flush(zc->notif);
994 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
996 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
997 struct io_ring_ctx *ctx = req->ctx;
998 struct io_kiocb *notif;
1000 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1002 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1003 if (req->flags & REQ_F_CQE_SKIP)
1006 zc->flags = READ_ONCE(sqe->ioprio);
1007 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
1008 IORING_RECVSEND_FIXED_BUF |
1009 IORING_SEND_ZC_REPORT_USAGE))
1011 notif = zc->notif = io_alloc_notif(ctx);
1014 notif->cqe.user_data = req->cqe.user_data;
1016 notif->cqe.flags = IORING_CQE_F_NOTIF;
1017 req->flags |= REQ_F_NEED_CLEANUP;
1018 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1019 unsigned idx = READ_ONCE(sqe->buf_index);
1021 if (unlikely(idx >= ctx->nr_user_bufs))
1023 idx = array_index_nospec(idx, ctx->nr_user_bufs);
1024 req->imu = READ_ONCE(ctx->user_bufs[idx]);
1025 io_req_set_rsrc_node(notif, ctx, 0);
1027 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1028 io_notif_to_data(notif)->zc_report = true;
1031 if (req->opcode == IORING_OP_SEND_ZC) {
1032 if (READ_ONCE(sqe->__pad3[0]))
1034 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1035 zc->addr_len = READ_ONCE(sqe->addr_len);
1037 if (unlikely(sqe->addr2 || sqe->file_index))
1039 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1043 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1044 zc->len = READ_ONCE(sqe->len);
1045 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1046 if (zc->msg_flags & MSG_DONTWAIT)
1047 req->flags |= REQ_F_NOWAIT;
1051 #ifdef CONFIG_COMPAT
1052 if (req->ctx->compat)
1053 zc->msg_flags |= MSG_CMSG_COMPAT;
1058 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1059 struct iov_iter *from, size_t length)
1061 skb_zcopy_downgrade_managed(skb);
1062 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1065 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1066 struct iov_iter *from, size_t length)
1068 struct skb_shared_info *shinfo = skb_shinfo(skb);
1069 int frag = shinfo->nr_frags;
1071 struct bvec_iter bi;
1073 unsigned long truesize = 0;
1076 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1077 else if (unlikely(!skb_zcopy_managed(skb)))
1078 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1080 bi.bi_size = min(from->count, length);
1081 bi.bi_bvec_done = from->iov_offset;
1084 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1085 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1088 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1089 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1090 v.bv_offset, v.bv_len);
1091 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1096 shinfo->nr_frags = frag;
1097 from->bvec += bi.bi_idx;
1098 from->nr_segs -= bi.bi_idx;
1099 from->count -= copied;
1100 from->iov_offset = bi.bi_bvec_done;
1102 skb->data_len += copied;
1104 skb->truesize += truesize;
1106 if (sk && sk->sk_type == SOCK_STREAM) {
1107 sk_wmem_queued_add(sk, truesize);
1108 if (!skb_zcopy_pure(skb))
1109 sk_mem_charge(sk, truesize);
1111 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1116 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1118 struct sockaddr_storage __address;
1119 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1122 struct socket *sock;
1124 int ret, min_ret = 0;
1126 sock = sock_from_file(req->file);
1127 if (unlikely(!sock))
1129 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1132 msg.msg_name = NULL;
1133 msg.msg_control = NULL;
1134 msg.msg_controllen = 0;
1135 msg.msg_namelen = 0;
1138 if (req_has_async_data(req)) {
1139 struct io_async_msghdr *io = req->async_data;
1141 msg.msg_name = &io->addr;
1143 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1144 if (unlikely(ret < 0))
1146 msg.msg_name = (struct sockaddr *)&__address;
1148 msg.msg_namelen = zc->addr_len;
1151 if (!(req->flags & REQ_F_POLLED) &&
1152 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1153 return io_setup_async_addr(req, &__address, issue_flags);
1155 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1156 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1157 (u64)(uintptr_t)zc->buf, zc->len);
1160 msg.sg_from_iter = io_sg_from_iter;
1162 ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
1166 ret = io_notif_account_mem(zc->notif, zc->len);
1169 msg.sg_from_iter = io_sg_from_iter_iovec;
1172 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1173 if (issue_flags & IO_URING_F_NONBLOCK)
1174 msg_flags |= MSG_DONTWAIT;
1175 if (msg_flags & MSG_WAITALL)
1176 min_ret = iov_iter_count(&msg.msg_iter);
1177 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1179 msg.msg_flags = msg_flags;
1180 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1181 ret = sock_sendmsg(sock, &msg);
1183 if (unlikely(ret < min_ret)) {
1184 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1185 return io_setup_async_addr(req, &__address, issue_flags);
1187 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1191 req->flags |= REQ_F_PARTIAL_IO;
1192 return io_setup_async_addr(req, &__address, issue_flags);
1194 if (ret == -ERESTARTSYS)
1201 else if (zc->done_io)
1205 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1206 * flushing notif to io_send_zc_cleanup()
1208 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1209 io_notif_flush(zc->notif);
1210 req->flags &= ~REQ_F_NEED_CLEANUP;
1212 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1216 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1218 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1219 struct io_async_msghdr iomsg, *kmsg;
1220 struct socket *sock;
1222 int ret, min_ret = 0;
1224 sock = sock_from_file(req->file);
1225 if (unlikely(!sock))
1227 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1230 if (req_has_async_data(req)) {
1231 kmsg = req->async_data;
1232 kmsg->msg.msg_control_user = sr->msg_control;
1234 ret = io_sendmsg_copy_hdr(req, &iomsg);
1240 if (!(req->flags & REQ_F_POLLED) &&
1241 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1242 return io_setup_async_msg(req, kmsg, issue_flags);
1244 flags = sr->msg_flags | MSG_ZEROCOPY;
1245 if (issue_flags & IO_URING_F_NONBLOCK)
1246 flags |= MSG_DONTWAIT;
1247 if (flags & MSG_WAITALL)
1248 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1250 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1251 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1252 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1254 if (unlikely(ret < min_ret)) {
1255 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1256 return io_setup_async_msg(req, kmsg, issue_flags);
1258 if (ret > 0 && io_net_retry(sock, flags)) {
1260 req->flags |= REQ_F_PARTIAL_IO;
1261 return io_setup_async_msg(req, kmsg, issue_flags);
1263 if (ret == -ERESTARTSYS)
1267 /* fast path, check for non-NULL to avoid function call */
1268 if (kmsg->free_iov) {
1269 kfree(kmsg->free_iov);
1270 kmsg->free_iov = NULL;
1273 io_netmsg_recycle(req, issue_flags);
1276 else if (sr->done_io)
1280 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1281 * flushing notif to io_send_zc_cleanup()
1283 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1284 io_notif_flush(sr->notif);
1285 req->flags &= ~REQ_F_NEED_CLEANUP;
1287 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1291 void io_sendrecv_fail(struct io_kiocb *req)
1293 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1295 if (req->flags & REQ_F_PARTIAL_IO)
1296 req->cqe.res = sr->done_io;
1298 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1299 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1300 req->cqe.flags |= IORING_CQE_F_MORE;
1303 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1305 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1308 if (sqe->len || sqe->buf_index)
1311 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1312 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1313 accept->flags = READ_ONCE(sqe->accept_flags);
1314 accept->nofile = rlimit(RLIMIT_NOFILE);
1315 flags = READ_ONCE(sqe->ioprio);
1316 if (flags & ~IORING_ACCEPT_MULTISHOT)
1319 accept->file_slot = READ_ONCE(sqe->file_index);
1320 if (accept->file_slot) {
1321 if (accept->flags & SOCK_CLOEXEC)
1323 if (flags & IORING_ACCEPT_MULTISHOT &&
1324 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1327 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1329 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1330 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1331 if (flags & IORING_ACCEPT_MULTISHOT)
1332 req->flags |= REQ_F_APOLL_MULTISHOT;
1336 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1338 struct io_ring_ctx *ctx = req->ctx;
1339 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1340 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1341 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1342 bool fixed = !!accept->file_slot;
1348 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1349 if (unlikely(fd < 0))
1352 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1357 ret = PTR_ERR(file);
1358 if (ret == -EAGAIN && force_nonblock) {
1360 * if it's multishot and polled, we don't need to
1361 * return EAGAIN to arm the poll infra since it
1362 * has already been done
1364 if (issue_flags & IO_URING_F_MULTISHOT)
1365 return IOU_ISSUE_SKIP_COMPLETE;
1368 if (ret == -ERESTARTSYS)
1371 } else if (!fixed) {
1372 fd_install(fd, file);
1375 ret = io_fixed_fd_install(req, issue_flags, file,
1379 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1380 io_req_set_res(req, ret, 0);
1386 if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1389 io_req_set_res(req, ret, 0);
1390 return IOU_STOP_MULTISHOT;
1393 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1395 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1397 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1400 sock->domain = READ_ONCE(sqe->fd);
1401 sock->type = READ_ONCE(sqe->off);
1402 sock->protocol = READ_ONCE(sqe->len);
1403 sock->file_slot = READ_ONCE(sqe->file_index);
1404 sock->nofile = rlimit(RLIMIT_NOFILE);
1406 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1407 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1409 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1414 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1416 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1417 bool fixed = !!sock->file_slot;
1422 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1423 if (unlikely(fd < 0))
1426 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1430 ret = PTR_ERR(file);
1431 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1433 if (ret == -ERESTARTSYS)
1436 } else if (!fixed) {
1437 fd_install(fd, file);
1440 ret = io_fixed_fd_install(req, issue_flags, file,
1443 io_req_set_res(req, ret, 0);
1447 int io_connect_prep_async(struct io_kiocb *req)
1449 struct io_async_connect *io = req->async_data;
1450 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1452 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1455 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1457 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1459 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1462 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1463 conn->addr_len = READ_ONCE(sqe->addr2);
1464 conn->in_progress = conn->seen_econnaborted = false;
1468 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1470 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1471 struct io_async_connect __io, *io;
1472 unsigned file_flags;
1474 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1476 if (req_has_async_data(req)) {
1477 io = req->async_data;
1479 ret = move_addr_to_kernel(connect->addr,
1487 file_flags = force_nonblock ? O_NONBLOCK : 0;
1489 ret = __sys_connect_file(req->file, &io->address,
1490 connect->addr_len, file_flags);
1491 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1492 && force_nonblock) {
1493 if (ret == -EINPROGRESS) {
1494 connect->in_progress = true;
1495 } else if (ret == -ECONNABORTED) {
1496 if (connect->seen_econnaborted)
1498 connect->seen_econnaborted = true;
1500 if (req_has_async_data(req))
1502 if (io_alloc_async_data(req)) {
1506 memcpy(req->async_data, &__io, sizeof(__io));
1509 if (connect->in_progress) {
1511 * At least bluetooth will return -EBADFD on a re-connect
1512 * attempt, and it's (supposedly) also valid to get -EISCONN
1513 * which means the previous result is good. For both of these,
1514 * grab the sock_error() and use that for the completion.
1516 if (ret == -EBADFD || ret == -EISCONN)
1517 ret = sock_error(sock_from_file(req->file)->sk);
1519 if (ret == -ERESTARTSYS)
1524 io_req_set_res(req, ret, 0);
1528 void io_netmsg_cache_free(struct io_cache_entry *entry)
1530 kfree(container_of(entry, struct io_async_msghdr, cache));