1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring/cmd.h>
15 #include <uapi/linux/io_uring.h>
25 /* NOTE: kiocb has the file as the first member, so don't do it here */
32 static inline bool io_file_supports_nowait(struct io_kiocb *req)
34 return req->flags & REQ_F_SUPPORT_NOWAIT;
38 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
40 struct compat_iovec __user *uiov;
43 uiov = u64_to_user_ptr(rw->addr);
44 if (!access_ok(uiov, sizeof(*uiov)))
46 if (__get_user(clen, &uiov->iov_len))
56 static int io_iov_buffer_select_prep(struct io_kiocb *req)
58 struct iovec __user *uiov;
60 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
67 return io_iov_compat_buffer_select_prep(rw);
70 uiov = u64_to_user_ptr(rw->addr);
71 if (copy_from_user(&iov, uiov, sizeof(*uiov)))
73 rw->len = iov.iov_len;
77 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
79 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
83 rw->kiocb.ki_pos = READ_ONCE(sqe->off);
84 /* used for fixed read/write too - just read unconditionally */
85 req->buf_index = READ_ONCE(sqe->buf_index);
87 ioprio = READ_ONCE(sqe->ioprio);
89 ret = ioprio_check_cap(ioprio);
93 rw->kiocb.ki_ioprio = ioprio;
95 rw->kiocb.ki_ioprio = get_current_ioprio();
97 rw->kiocb.dio_complete = NULL;
99 rw->addr = READ_ONCE(sqe->addr);
100 rw->len = READ_ONCE(sqe->len);
101 rw->flags = READ_ONCE(sqe->rw_flags);
105 int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
109 ret = io_prep_rw(req, sqe);
114 * Have to do this validation here, as this is in io_read() rw->len
115 * might have chanaged due to buffer selection
117 if (req->flags & REQ_F_BUFFER_SELECT)
118 return io_iov_buffer_select_prep(req);
123 int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
125 struct io_ring_ctx *ctx = req->ctx;
129 ret = io_prep_rw(req, sqe);
133 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
135 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
136 req->imu = ctx->user_bufs[index];
137 io_req_set_rsrc_node(req, ctx, 0);
142 * Multishot read is prepared just like a normal read/write request, only
143 * difference is that we set the MULTISHOT flag.
145 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
147 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
150 /* must be used with provided buffers */
151 if (!(req->flags & REQ_F_BUFFER_SELECT))
154 ret = io_prep_rw(req, sqe);
158 if (rw->addr || rw->len)
161 req->flags |= REQ_F_APOLL_MULTISHOT;
165 void io_readv_writev_cleanup(struct io_kiocb *req)
167 struct io_async_rw *io = req->async_data;
169 kfree(io->free_iovec);
172 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
174 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
176 if (rw->kiocb.ki_pos != -1)
177 return &rw->kiocb.ki_pos;
179 if (!(req->file->f_mode & FMODE_STREAM)) {
180 req->flags |= REQ_F_CUR_POS;
181 rw->kiocb.ki_pos = req->file->f_pos;
182 return &rw->kiocb.ki_pos;
185 rw->kiocb.ki_pos = 0;
189 static void io_req_task_queue_reissue(struct io_kiocb *req)
191 req->io_task_work.func = io_queue_iowq;
192 io_req_task_work_add(req);
196 static bool io_resubmit_prep(struct io_kiocb *req)
198 struct io_async_rw *io = req->async_data;
200 if (!req_has_async_data(req))
201 return !io_req_prep_async(req);
202 iov_iter_restore(&io->s.iter, &io->s.iter_state);
206 static bool io_rw_should_reissue(struct io_kiocb *req)
208 umode_t mode = file_inode(req->file)->i_mode;
209 struct io_ring_ctx *ctx = req->ctx;
211 if (!S_ISBLK(mode) && !S_ISREG(mode))
213 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
214 !(ctx->flags & IORING_SETUP_IOPOLL)))
217 * If ref is dying, we might be running poll reap from the exit work.
218 * Don't attempt to reissue from that path, just let it fail with
221 if (percpu_ref_is_dying(&ctx->refs))
224 * Play it safe and assume not safe to re-import and reissue if we're
225 * not in the original thread group (or in task context).
227 if (!same_thread_group(req->task, current) || !in_task())
232 static bool io_resubmit_prep(struct io_kiocb *req)
236 static bool io_rw_should_reissue(struct io_kiocb *req)
242 static void io_req_end_write(struct io_kiocb *req)
244 if (req->flags & REQ_F_ISREG) {
245 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
247 kiocb_end_write(&rw->kiocb);
252 * Trigger the notifications after having done some IO, and finish the write
253 * accounting, if any.
255 static void io_req_io_end(struct io_kiocb *req)
257 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
259 if (rw->kiocb.ki_flags & IOCB_WRITE) {
260 io_req_end_write(req);
261 fsnotify_modify(req->file);
263 fsnotify_access(req->file);
267 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
269 if (unlikely(res != req->cqe.res)) {
270 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
271 io_rw_should_reissue(req)) {
273 * Reissue will start accounting again, finish the
277 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
286 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
288 struct io_async_rw *io = req->async_data;
290 /* add previously done IO, if any */
291 if (req_has_async_data(req) && io->bytes_done > 0) {
293 res = io->bytes_done;
295 res += io->bytes_done;
300 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
302 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
303 struct kiocb *kiocb = &rw->kiocb;
305 if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
306 long res = kiocb->dio_complete(rw->kiocb.private);
308 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
313 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
314 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
316 req->cqe.flags |= io_put_kbuf(req, issue_flags);
318 io_req_task_complete(req, ts);
321 static void io_complete_rw(struct kiocb *kiocb, long res)
323 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
324 struct io_kiocb *req = cmd_to_io_kiocb(rw);
326 if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
327 if (__io_complete_rw_common(req, res))
329 io_req_set_res(req, io_fixup_rw_res(req, res), 0);
331 req->io_task_work.func = io_req_rw_complete;
332 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
335 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
337 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
338 struct io_kiocb *req = cmd_to_io_kiocb(rw);
340 if (kiocb->ki_flags & IOCB_WRITE)
341 io_req_end_write(req);
342 if (unlikely(res != req->cqe.res)) {
343 if (res == -EAGAIN && io_rw_should_reissue(req)) {
344 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
350 /* order with io_iopoll_complete() checking ->iopoll_completed */
351 smp_store_release(&req->iopoll_completed, 1);
354 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
356 /* IO was queued async, completion will happen later */
357 if (ret == -EIOCBQUEUED)
360 /* transform internal restart error codes */
361 if (unlikely(ret < 0)) {
364 case -ERESTARTNOINTR:
365 case -ERESTARTNOHAND:
366 case -ERESTART_RESTARTBLOCK:
368 * We can't just restart the syscall, since previously
369 * submitted sqes may already be in progress. Just fail
370 * this IO with EINTR.
377 INDIRECT_CALL_2(kiocb->ki_complete, io_complete_rw_iopoll,
378 io_complete_rw, kiocb, ret);
381 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
382 unsigned int issue_flags)
384 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
385 unsigned final_ret = io_fixup_rw_res(req, ret);
387 if (ret >= 0 && req->flags & REQ_F_CUR_POS)
388 req->file->f_pos = rw->kiocb.ki_pos;
389 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
390 if (!__io_complete_rw_common(req, ret)) {
392 * Safe to call io_end from here as we're inline
393 * from the submission path.
396 io_req_set_res(req, final_ret,
397 io_put_kbuf(req, issue_flags));
401 io_rw_done(&rw->kiocb, ret);
404 if (req->flags & REQ_F_REISSUE) {
405 req->flags &= ~REQ_F_REISSUE;
406 if (io_resubmit_prep(req))
407 io_req_task_queue_reissue(req);
409 io_req_task_queue_fail(req, final_ret);
411 return IOU_ISSUE_SKIP_COMPLETE;
414 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
415 struct io_rw_state *s,
416 unsigned int issue_flags)
418 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
419 struct iov_iter *iter = &s->iter;
420 u8 opcode = req->opcode;
426 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
427 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
433 buf = u64_to_user_ptr(rw->addr);
436 if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) {
437 if (io_do_buffer_select(req)) {
438 buf = io_buffer_select(req, &sqe_len, issue_flags);
440 return ERR_PTR(-ENOBUFS);
441 rw->addr = (unsigned long) buf;
445 ret = import_ubuf(ddir, buf, sqe_len, iter);
452 ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
454 if (unlikely(ret < 0))
459 static inline int io_import_iovec(int rw, struct io_kiocb *req,
460 struct iovec **iovec, struct io_rw_state *s,
461 unsigned int issue_flags)
463 *iovec = __io_import_iovec(rw, req, s, issue_flags);
465 return PTR_ERR(*iovec);
467 iov_iter_save_state(&s->iter, &s->iter_state);
471 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
473 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
477 * For files that don't have ->read_iter() and ->write_iter(), handle them
478 * by looping over ->read() or ->write() manually.
480 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
482 struct kiocb *kiocb = &rw->kiocb;
483 struct file *file = kiocb->ki_filp;
488 * Don't support polled IO through this interface, and we can't
489 * support non-blocking either. For the latter, this just causes
490 * the kiocb to be handled from an async context.
492 if (kiocb->ki_flags & IOCB_HIPRI)
494 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
495 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
498 ppos = io_kiocb_ppos(kiocb);
500 while (iov_iter_count(iter)) {
505 if (iter_is_ubuf(iter)) {
506 addr = iter->ubuf + iter->iov_offset;
507 len = iov_iter_count(iter);
508 } else if (!iov_iter_is_bvec(iter)) {
509 addr = iter_iov_addr(iter);
510 len = iter_iov_len(iter);
512 addr = u64_to_user_ptr(rw->addr);
517 nr = file->f_op->read(file, addr, len, ppos);
519 nr = file->f_op->write(file, addr, len, ppos);
527 if (!iov_iter_is_bvec(iter)) {
528 iov_iter_advance(iter, nr);
542 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
543 const struct iovec *fast_iov, struct iov_iter *iter)
545 struct io_async_rw *io = req->async_data;
547 memcpy(&io->s.iter, iter, sizeof(*iter));
548 io->free_iovec = iovec;
550 /* can only be fixed buffers, no need to do anything */
551 if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
554 unsigned iov_off = 0;
556 io->s.iter.__iov = io->s.fast_iov;
557 if (iter->__iov != fast_iov) {
558 iov_off = iter_iov(iter) - fast_iov;
559 io->s.iter.__iov += iov_off;
561 if (io->s.fast_iov != fast_iov)
562 memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
563 sizeof(struct iovec) * iter->nr_segs);
565 req->flags |= REQ_F_NEED_CLEANUP;
569 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
570 struct io_rw_state *s, bool force)
572 if (!force && !io_cold_defs[req->opcode].prep_async)
574 /* opcode type doesn't need async data */
575 if (!io_cold_defs[req->opcode].async_size)
577 if (!req_has_async_data(req)) {
578 struct io_async_rw *iorw;
580 if (io_alloc_async_data(req)) {
585 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
586 iorw = req->async_data;
587 /* we've copied and mapped the iter, ensure state is saved */
588 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
593 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
595 struct io_async_rw *iorw = req->async_data;
599 iorw->bytes_done = 0;
600 iorw->free_iovec = NULL;
602 /* submission path, ->uring_lock should already be taken */
603 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
604 if (unlikely(ret < 0))
608 iorw->free_iovec = iov;
609 req->flags |= REQ_F_NEED_CLEANUP;
615 int io_readv_prep_async(struct io_kiocb *req)
617 return io_rw_prep_async(req, ITER_DEST);
620 int io_writev_prep_async(struct io_kiocb *req)
622 return io_rw_prep_async(req, ITER_SOURCE);
626 * This is our waitqueue callback handler, registered through __folio_lock_async()
627 * when we initially tried to do the IO with the iocb armed our waitqueue.
628 * This gets called when the page is unlocked, and we generally expect that to
629 * happen when the page IO is completed and the page is now uptodate. This will
630 * queue a task_work based retry of the operation, attempting to copy the data
631 * again. If the latter fails because the page was NOT uptodate, then we will
632 * do a thread based blocking retry of the operation. That's the unexpected
635 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
638 struct wait_page_queue *wpq;
639 struct io_kiocb *req = wait->private;
640 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
641 struct wait_page_key *key = arg;
643 wpq = container_of(wait, struct wait_page_queue, wait);
645 if (!wake_page_match(wpq, key))
648 rw->kiocb.ki_flags &= ~IOCB_WAITQ;
649 list_del_init(&wait->entry);
650 io_req_task_queue(req);
655 * This controls whether a given IO request should be armed for async page
656 * based retry. If we return false here, the request is handed to the async
657 * worker threads for retry. If we're doing buffered reads on a regular file,
658 * we prepare a private wait_page_queue entry and retry the operation. This
659 * will either succeed because the page is now uptodate and unlocked, or it
660 * will register a callback when the page is unlocked at IO completion. Through
661 * that callback, io_uring uses task_work to setup a retry of the operation.
662 * That retry will attempt the buffered read again. The retry will generally
663 * succeed, or in rare cases where it fails, we then fall back to using the
664 * async worker threads for a blocking retry.
666 static bool io_rw_should_retry(struct io_kiocb *req)
668 struct io_async_rw *io = req->async_data;
669 struct wait_page_queue *wait = &io->wpq;
670 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
671 struct kiocb *kiocb = &rw->kiocb;
673 /* never retry for NOWAIT, we just complete with -EAGAIN */
674 if (req->flags & REQ_F_NOWAIT)
677 /* Only for buffered IO */
678 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
682 * just use poll if we can, and don't attempt if the fs doesn't
683 * support callback based unlocks
685 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
688 wait->wait.func = io_async_buf_func;
689 wait->wait.private = req;
690 wait->wait.flags = 0;
691 INIT_LIST_HEAD(&wait->wait.entry);
692 kiocb->ki_flags |= IOCB_WAITQ;
693 kiocb->ki_flags &= ~IOCB_NOWAIT;
694 kiocb->ki_waitq = wait;
698 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
700 struct file *file = rw->kiocb.ki_filp;
702 if (likely(file->f_op->read_iter))
703 return call_read_iter(file, &rw->kiocb, iter);
704 else if (file->f_op->read)
705 return loop_rw_iter(READ, rw, iter);
710 static bool need_complete_io(struct io_kiocb *req)
712 return req->flags & REQ_F_ISREG ||
713 S_ISBLK(file_inode(req->file)->i_mode);
716 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
718 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
719 struct kiocb *kiocb = &rw->kiocb;
720 struct io_ring_ctx *ctx = req->ctx;
721 struct file *file = req->file;
724 if (unlikely(!file || !(file->f_mode & mode)))
727 if (!(req->flags & REQ_F_FIXED_FILE))
728 req->flags |= io_file_get_flags(file);
730 kiocb->ki_flags = file->f_iocb_flags;
731 ret = kiocb_set_rw_flags(kiocb, rw->flags);
734 kiocb->ki_flags |= IOCB_ALLOC_CACHE;
737 * If the file is marked O_NONBLOCK, still allow retry for it if it
738 * supports async. Otherwise it's impossible to use O_NONBLOCK files
739 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
741 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
742 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
743 req->flags |= REQ_F_NOWAIT;
745 if (ctx->flags & IORING_SETUP_IOPOLL) {
746 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
749 kiocb->private = NULL;
750 kiocb->ki_flags |= IOCB_HIPRI;
751 kiocb->ki_complete = io_complete_rw_iopoll;
752 req->iopoll_completed = 0;
754 if (kiocb->ki_flags & IOCB_HIPRI)
756 kiocb->ki_complete = io_complete_rw;
762 static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
764 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
765 struct io_rw_state __s, *s = &__s;
767 struct kiocb *kiocb = &rw->kiocb;
768 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
769 struct io_async_rw *io;
773 if (!req_has_async_data(req)) {
774 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
775 if (unlikely(ret < 0))
778 io = req->async_data;
782 * Safe and required to re-import if we're using provided
783 * buffers, as we dropped the selected one before retry.
785 if (io_do_buffer_select(req)) {
786 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
787 if (unlikely(ret < 0))
792 * We come here from an earlier attempt, restore our state to
793 * match in case it doesn't. It's cheap enough that we don't
794 * need to make this conditional.
796 iov_iter_restore(&s->iter, &s->iter_state);
799 ret = io_rw_init_file(req, FMODE_READ);
804 req->cqe.res = iov_iter_count(&s->iter);
806 if (force_nonblock) {
807 /* If the file doesn't support async, just async punt */
808 if (unlikely(!io_file_supports_nowait(req))) {
809 ret = io_setup_async_rw(req, iovec, s, true);
810 return ret ?: -EAGAIN;
812 kiocb->ki_flags |= IOCB_NOWAIT;
814 /* Ensure we clear previously set non-block flag */
815 kiocb->ki_flags &= ~IOCB_NOWAIT;
818 ppos = io_kiocb_update_pos(req);
820 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
826 ret = io_iter_do_read(rw, &s->iter);
828 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
829 req->flags &= ~REQ_F_REISSUE;
831 * If we can poll, just do that. For a vectored read, we'll
832 * need to copy state first.
834 if (file_can_poll(req->file) && !io_issue_defs[req->opcode].vectored)
836 /* IOPOLL retry should happen for io-wq threads */
837 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
839 /* no retry on NONBLOCK nor RWF_NOWAIT */
840 if (req->flags & REQ_F_NOWAIT)
843 } else if (ret == -EIOCBQUEUED) {
846 return IOU_ISSUE_SKIP_COMPLETE;
847 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
848 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
849 /* read all, failed, already did sync or don't want to retry */
854 * Don't depend on the iter state matching what was consumed, or being
855 * untouched in case of error. Restore it and we'll advance it
856 * manually if we need to.
858 iov_iter_restore(&s->iter, &s->iter_state);
860 ret2 = io_setup_async_rw(req, iovec, s, true);
863 ret = ret > 0 ? ret : ret2;
867 io = req->async_data;
870 * Now use our persistent iterator and state, if we aren't already.
871 * We've restored and mapped the iter to match.
876 * We end up here because of a partial read, either from
877 * above or inside this loop. Advance the iter by the bytes
878 * that were consumed.
880 iov_iter_advance(&s->iter, ret);
881 if (!iov_iter_count(&s->iter))
883 io->bytes_done += ret;
884 iov_iter_save_state(&s->iter, &s->iter_state);
886 /* if we can retry, do so with the callbacks armed */
887 if (!io_rw_should_retry(req)) {
888 kiocb->ki_flags &= ~IOCB_WAITQ;
892 req->cqe.res = iov_iter_count(&s->iter);
894 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
895 * we get -EIOCBQUEUED, then we'll get a notification when the
896 * desired page gets unlocked. We can also get a partial read
897 * here, and if we do, then just retry at the new offset.
899 ret = io_iter_do_read(rw, &s->iter);
900 if (ret == -EIOCBQUEUED)
901 return IOU_ISSUE_SKIP_COMPLETE;
902 /* we got some bytes, but not all. retry. */
903 kiocb->ki_flags &= ~IOCB_WAITQ;
904 iov_iter_restore(&s->iter, &s->iter_state);
907 /* it's faster to check here then delegate to kfree */
913 int io_read(struct io_kiocb *req, unsigned int issue_flags)
917 ret = __io_read(req, issue_flags);
919 return kiocb_done(req, ret, issue_flags);
924 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
926 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
927 unsigned int cflags = 0;
931 * Multishot MUST be used on a pollable file
933 if (!file_can_poll(req->file))
936 ret = __io_read(req, issue_flags);
939 * If the file doesn't support proper NOWAIT, then disable multishot
940 * and stay in single shot mode.
942 if (!io_file_supports_nowait(req))
943 req->flags &= ~REQ_F_APOLL_MULTISHOT;
946 * If we get -EAGAIN, recycle our buffer and just let normal poll
949 if (ret == -EAGAIN) {
951 * Reset rw->len to 0 again to avoid clamping future mshot
952 * reads, in case the buffer size varies.
954 if (io_kbuf_recycle(req, issue_flags))
956 if (issue_flags & IO_URING_F_MULTISHOT)
957 return IOU_ISSUE_SKIP_COMPLETE;
962 * Any successful return value will keep the multishot read armed.
964 if (ret > 0 && req->flags & REQ_F_APOLL_MULTISHOT) {
966 * Put our buffer and post a CQE. If we fail to post a CQE, then
967 * jump to the termination path. This request is then done.
969 cflags = io_put_kbuf(req, issue_flags);
970 rw->len = 0; /* similarly to above, reset len to 0 */
972 if (io_fill_cqe_req_aux(req,
973 issue_flags & IO_URING_F_COMPLETE_DEFER,
974 ret, cflags | IORING_CQE_F_MORE)) {
975 if (issue_flags & IO_URING_F_MULTISHOT) {
977 * Force retry, as we might have more data to
978 * be read and otherwise it won't get retried
979 * until (if ever) another poll is triggered.
981 io_poll_multishot_retry(req);
982 return IOU_ISSUE_SKIP_COMPLETE;
989 * Either an error, or we've hit overflow posting the CQE. For any
990 * multishot request, hitting overflow will terminate it.
992 io_req_set_res(req, ret, cflags);
993 if (issue_flags & IO_URING_F_MULTISHOT)
994 return IOU_STOP_MULTISHOT;
998 int io_write(struct io_kiocb *req, unsigned int issue_flags)
1000 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1001 struct io_rw_state __s, *s = &__s;
1002 struct iovec *iovec;
1003 struct kiocb *kiocb = &rw->kiocb;
1004 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1008 if (!req_has_async_data(req)) {
1009 ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
1010 if (unlikely(ret < 0))
1013 struct io_async_rw *io = req->async_data;
1016 iov_iter_restore(&s->iter, &s->iter_state);
1019 ret = io_rw_init_file(req, FMODE_WRITE);
1020 if (unlikely(ret)) {
1024 req->cqe.res = iov_iter_count(&s->iter);
1026 if (force_nonblock) {
1027 /* If the file doesn't support async, just async punt */
1028 if (unlikely(!io_file_supports_nowait(req)))
1031 /* File path supports NOWAIT for non-direct_IO only for block devices. */
1032 if (!(kiocb->ki_flags & IOCB_DIRECT) &&
1033 !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
1034 (req->flags & REQ_F_ISREG))
1037 kiocb->ki_flags |= IOCB_NOWAIT;
1039 /* Ensure we clear previously set non-block flag */
1040 kiocb->ki_flags &= ~IOCB_NOWAIT;
1043 ppos = io_kiocb_update_pos(req);
1045 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
1046 if (unlikely(ret)) {
1051 if (req->flags & REQ_F_ISREG)
1052 kiocb_start_write(kiocb);
1053 kiocb->ki_flags |= IOCB_WRITE;
1055 if (likely(req->file->f_op->write_iter))
1056 ret2 = call_write_iter(req->file, kiocb, &s->iter);
1057 else if (req->file->f_op->write)
1058 ret2 = loop_rw_iter(WRITE, rw, &s->iter);
1062 if (req->flags & REQ_F_REISSUE) {
1063 req->flags &= ~REQ_F_REISSUE;
1068 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1069 * retry them without IOCB_NOWAIT.
1071 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
1073 /* no retry on NONBLOCK nor RWF_NOWAIT */
1074 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1076 if (!force_nonblock || ret2 != -EAGAIN) {
1077 /* IOPOLL retry should happen for io-wq threads */
1078 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1081 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
1082 struct io_async_rw *io;
1084 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1085 req->cqe.res, ret2);
1087 /* This is a partial write. The file pos has already been
1088 * updated, setup the async struct to complete the request
1089 * in the worker. Also update bytes_done to account for
1090 * the bytes already written.
1092 iov_iter_save_state(&s->iter, &s->iter_state);
1093 ret = io_setup_async_rw(req, iovec, s, true);
1095 io = req->async_data;
1097 io->bytes_done += ret2;
1099 if (kiocb->ki_flags & IOCB_WRITE)
1100 io_req_end_write(req);
1101 return ret ? ret : -EAGAIN;
1104 ret = kiocb_done(req, ret2, issue_flags);
1107 iov_iter_restore(&s->iter, &s->iter_state);
1108 ret = io_setup_async_rw(req, iovec, s, false);
1110 if (kiocb->ki_flags & IOCB_WRITE)
1111 io_req_end_write(req);
1116 /* it's reportedly faster than delegating the null check to kfree() */
1122 void io_rw_fail(struct io_kiocb *req)
1126 res = io_fixup_rw_res(req, req->cqe.res);
1127 io_req_set_res(req, res, req->cqe.flags);
1130 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1132 struct io_wq_work_node *pos, *start, *prev;
1133 unsigned int poll_flags = 0;
1134 DEFINE_IO_COMP_BATCH(iob);
1138 * Only spin for completions if we don't have multiple devices hanging
1139 * off our complete list.
1141 if (ctx->poll_multi_queue || force_nonspin)
1142 poll_flags |= BLK_POLL_ONESHOT;
1144 wq_list_for_each(pos, start, &ctx->iopoll_list) {
1145 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1146 struct file *file = req->file;
1150 * Move completed and retryable entries to our local lists.
1151 * If we find a request that requires polling, break out
1152 * and complete those lists first, if we have entries there.
1154 if (READ_ONCE(req->iopoll_completed))
1157 if (req->opcode == IORING_OP_URING_CMD) {
1158 struct io_uring_cmd *ioucmd;
1160 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1161 ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1164 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1166 ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1168 if (unlikely(ret < 0))
1171 poll_flags |= BLK_POLL_ONESHOT;
1173 /* iopoll may have completed current req */
1174 if (!rq_list_empty(iob.req_list) ||
1175 READ_ONCE(req->iopoll_completed))
1179 if (!rq_list_empty(iob.req_list))
1185 wq_list_for_each_resume(pos, prev) {
1186 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1188 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1189 if (!smp_load_acquire(&req->iopoll_completed))
1192 req->cqe.flags = io_put_kbuf(req, 0);
1194 if (unlikely(!nr_events))
1197 pos = start ? start->next : ctx->iopoll_list.first;
1198 wq_list_cut(&ctx->iopoll_list, prev, start);
1200 if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1202 ctx->submit_state.compl_reqs.first = pos;
1203 __io_submit_flush_completions(ctx);