1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
15 #include <uapi/linux/io_uring.h>
17 #include "io_uring_types.h"
25 /* NOTE: kiocb has the file as the first member, so don't do it here */
32 static inline bool io_file_supports_nowait(struct io_kiocb *req)
34 return req->flags & REQ_F_SUPPORT_NOWAIT;
37 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
39 struct io_rw *rw = io_kiocb_to_cmd(req);
43 rw->kiocb.ki_pos = READ_ONCE(sqe->off);
44 /* used for fixed read/write too - just read unconditionally */
45 req->buf_index = READ_ONCE(sqe->buf_index);
47 if (req->opcode == IORING_OP_READ_FIXED ||
48 req->opcode == IORING_OP_WRITE_FIXED) {
49 struct io_ring_ctx *ctx = req->ctx;
52 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
54 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
55 req->imu = ctx->user_bufs[index];
56 io_req_set_rsrc_node(req, ctx, 0);
59 ioprio = READ_ONCE(sqe->ioprio);
61 ret = ioprio_check_cap(ioprio);
65 rw->kiocb.ki_ioprio = ioprio;
67 rw->kiocb.ki_ioprio = get_current_ioprio();
70 rw->addr = READ_ONCE(sqe->addr);
71 rw->len = READ_ONCE(sqe->len);
72 rw->flags = READ_ONCE(sqe->rw_flags);
76 void io_readv_writev_cleanup(struct io_kiocb *req)
78 struct io_async_rw *io = req->async_data;
80 kfree(io->free_iovec);
83 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
91 case -ERESTART_RESTARTBLOCK:
93 * We can't just restart the syscall, since previously
94 * submitted sqes may already be in progress. Just fail this
100 kiocb->ki_complete(kiocb, ret);
104 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
106 struct io_rw *rw = io_kiocb_to_cmd(req);
108 if (rw->kiocb.ki_pos != -1)
109 return &rw->kiocb.ki_pos;
111 if (!(req->file->f_mode & FMODE_STREAM)) {
112 req->flags |= REQ_F_CUR_POS;
113 rw->kiocb.ki_pos = req->file->f_pos;
114 return &rw->kiocb.ki_pos;
117 rw->kiocb.ki_pos = 0;
121 static void io_req_task_queue_reissue(struct io_kiocb *req)
123 req->io_task_work.func = io_queue_iowq;
124 io_req_task_work_add(req);
128 static bool io_resubmit_prep(struct io_kiocb *req)
130 struct io_async_rw *io = req->async_data;
132 if (!req_has_async_data(req))
133 return !io_req_prep_async(req);
134 iov_iter_restore(&io->s.iter, &io->s.iter_state);
138 static bool io_rw_should_reissue(struct io_kiocb *req)
140 umode_t mode = file_inode(req->file)->i_mode;
141 struct io_ring_ctx *ctx = req->ctx;
143 if (!S_ISBLK(mode) && !S_ISREG(mode))
145 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
146 !(ctx->flags & IORING_SETUP_IOPOLL)))
149 * If ref is dying, we might be running poll reap from the exit work.
150 * Don't attempt to reissue from that path, just let it fail with
153 if (percpu_ref_is_dying(&ctx->refs))
156 * Play it safe and assume not safe to re-import and reissue if we're
157 * not in the original thread group (or in task context).
159 if (!same_thread_group(req->task, current) || !in_task())
164 static bool io_resubmit_prep(struct io_kiocb *req)
168 static bool io_rw_should_reissue(struct io_kiocb *req)
174 static void kiocb_end_write(struct io_kiocb *req)
177 * Tell lockdep we inherited freeze protection from submission
180 if (req->flags & REQ_F_ISREG) {
181 struct super_block *sb = file_inode(req->file)->i_sb;
183 __sb_writers_acquired(sb, SB_FREEZE_WRITE);
188 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
190 struct io_rw *rw = io_kiocb_to_cmd(req);
192 if (rw->kiocb.ki_flags & IOCB_WRITE) {
193 kiocb_end_write(req);
194 fsnotify_modify(req->file);
196 fsnotify_access(req->file);
198 if (unlikely(res != req->cqe.res)) {
199 if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
200 io_rw_should_reissue(req)) {
201 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
210 static void __io_complete_rw(struct io_kiocb *req, long res,
211 unsigned int issue_flags)
213 if (__io_complete_rw_common(req, res))
215 io_req_set_res(req, req->cqe.res, io_put_kbuf(req, issue_flags));
216 __io_req_complete(req, issue_flags);
219 static void io_complete_rw(struct kiocb *kiocb, long res)
221 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
222 struct io_kiocb *req = cmd_to_io_kiocb(rw);
224 if (__io_complete_rw_common(req, res))
226 io_req_set_res(req, res, 0);
227 req->io_task_work.func = io_req_task_complete;
228 io_req_task_prio_work_add(req);
231 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
233 struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
234 struct io_kiocb *req = cmd_to_io_kiocb(rw);
236 if (kiocb->ki_flags & IOCB_WRITE)
237 kiocb_end_write(req);
238 if (unlikely(res != req->cqe.res)) {
239 if (res == -EAGAIN && io_rw_should_reissue(req)) {
240 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
246 /* order with io_iopoll_complete() checking ->iopoll_completed */
247 smp_store_release(&req->iopoll_completed, 1);
250 static void kiocb_done(struct io_kiocb *req, ssize_t ret,
251 unsigned int issue_flags)
253 struct io_async_rw *io = req->async_data;
254 struct io_rw *rw = io_kiocb_to_cmd(req);
256 /* add previously done IO, if any */
257 if (req_has_async_data(req) && io->bytes_done > 0) {
259 ret = io->bytes_done;
261 ret += io->bytes_done;
264 if (req->flags & REQ_F_CUR_POS)
265 req->file->f_pos = rw->kiocb.ki_pos;
266 if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw))
267 __io_complete_rw(req, ret, issue_flags);
269 io_rw_done(&rw->kiocb, ret);
271 if (req->flags & REQ_F_REISSUE) {
272 req->flags &= ~REQ_F_REISSUE;
273 if (io_resubmit_prep(req))
274 io_req_task_queue_reissue(req);
276 io_req_task_queue_fail(req, ret);
280 static int __io_import_fixed(struct io_kiocb *req, int ddir,
281 struct iov_iter *iter, struct io_mapped_ubuf *imu)
283 struct io_rw *rw = io_kiocb_to_cmd(req);
284 size_t len = rw->len;
285 u64 buf_end, buf_addr = rw->addr;
288 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
290 /* not inside the mapped region */
291 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
295 * May not be a start of buffer, set size appropriately
296 * and advance us to the beginning.
298 offset = buf_addr - imu->ubuf;
299 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
303 * Don't use iov_iter_advance() here, as it's really slow for
304 * using the latter parts of a big fixed buffer - it iterates
305 * over each segment manually. We can cheat a bit here, because
308 * 1) it's a BVEC iter, we set it up
309 * 2) all bvecs are PAGE_SIZE in size, except potentially the
310 * first and last bvec
312 * So just find our index, and adjust the iterator afterwards.
313 * If the offset is within the first bvec (or the whole first
314 * bvec, just use iov_iter_advance(). This makes it easier
315 * since we can just skip the first segment, which may not
316 * be PAGE_SIZE aligned.
318 const struct bio_vec *bvec = imu->bvec;
320 if (offset <= bvec->bv_len) {
321 iov_iter_advance(iter, offset);
323 unsigned long seg_skip;
326 offset -= bvec->bv_len;
327 seg_skip = 1 + (offset >> PAGE_SHIFT);
329 iter->bvec = bvec + seg_skip;
330 iter->nr_segs -= seg_skip;
331 iter->count -= bvec->bv_len + offset;
332 iter->iov_offset = offset & ~PAGE_MASK;
339 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
340 unsigned int issue_flags)
342 if (WARN_ON_ONCE(!req->imu))
344 return __io_import_fixed(req, rw, iter, req->imu);
348 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
349 unsigned int issue_flags)
351 struct io_rw *rw = io_kiocb_to_cmd(req);
352 struct compat_iovec __user *uiov;
357 uiov = u64_to_user_ptr(rw->addr);
358 if (!access_ok(uiov, sizeof(*uiov)))
360 if (__get_user(clen, &uiov->iov_len))
366 buf = io_buffer_select(req, &len, issue_flags);
369 rw->addr = (unsigned long) buf;
370 iov[0].iov_base = buf;
371 rw->len = iov[0].iov_len = (compat_size_t) len;
376 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
377 unsigned int issue_flags)
379 struct io_rw *rw = io_kiocb_to_cmd(req);
380 struct iovec __user *uiov = u64_to_user_ptr(rw->addr);
384 if (copy_from_user(iov, uiov, sizeof(*uiov)))
387 len = iov[0].iov_len;
390 buf = io_buffer_select(req, &len, issue_flags);
393 rw->addr = (unsigned long) buf;
394 iov[0].iov_base = buf;
395 rw->len = iov[0].iov_len = len;
399 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
400 unsigned int issue_flags)
402 struct io_rw *rw = io_kiocb_to_cmd(req);
404 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
405 iov[0].iov_base = u64_to_user_ptr(rw->addr);
406 iov[0].iov_len = rw->len;
413 if (req->ctx->compat)
414 return io_compat_import(req, iov, issue_flags);
417 return __io_iov_buffer_select(req, iov, issue_flags);
420 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
421 struct io_rw_state *s,
422 unsigned int issue_flags)
424 struct io_rw *rw = io_kiocb_to_cmd(req);
425 struct iov_iter *iter = &s->iter;
426 u8 opcode = req->opcode;
432 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
433 ret = io_import_fixed(req, ddir, iter, issue_flags);
439 buf = u64_to_user_ptr(rw->addr);
442 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
443 if (io_do_buffer_select(req)) {
444 buf = io_buffer_select(req, &sqe_len, issue_flags);
446 return ERR_PTR(-ENOBUFS);
447 rw->addr = (unsigned long) buf;
451 ret = import_single_range(ddir, buf, sqe_len, s->fast_iov, iter);
458 if (req->flags & REQ_F_BUFFER_SELECT) {
459 ret = io_iov_buffer_select(req, iovec, issue_flags);
462 iov_iter_init(iter, ddir, iovec, 1, iovec->iov_len);
466 ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
468 if (unlikely(ret < 0))
473 static inline int io_import_iovec(int rw, struct io_kiocb *req,
474 struct iovec **iovec, struct io_rw_state *s,
475 unsigned int issue_flags)
477 *iovec = __io_import_iovec(rw, req, s, issue_flags);
478 if (unlikely(IS_ERR(*iovec)))
479 return PTR_ERR(*iovec);
481 iov_iter_save_state(&s->iter, &s->iter_state);
485 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
487 return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
491 * For files that don't have ->read_iter() and ->write_iter(), handle them
492 * by looping over ->read() or ->write() manually.
494 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
496 struct kiocb *kiocb = &rw->kiocb;
497 struct file *file = kiocb->ki_filp;
502 * Don't support polled IO through this interface, and we can't
503 * support non-blocking either. For the latter, this just causes
504 * the kiocb to be handled from an async context.
506 if (kiocb->ki_flags & IOCB_HIPRI)
508 if ((kiocb->ki_flags & IOCB_NOWAIT) &&
509 !(kiocb->ki_filp->f_flags & O_NONBLOCK))
512 ppos = io_kiocb_ppos(kiocb);
514 while (iov_iter_count(iter)) {
518 if (!iov_iter_is_bvec(iter)) {
519 iovec = iov_iter_iovec(iter);
521 iovec.iov_base = u64_to_user_ptr(rw->addr);
522 iovec.iov_len = rw->len;
526 nr = file->f_op->read(file, iovec.iov_base,
527 iovec.iov_len, ppos);
529 nr = file->f_op->write(file, iovec.iov_base,
530 iovec.iov_len, ppos);
539 if (!iov_iter_is_bvec(iter)) {
540 iov_iter_advance(iter, nr);
547 if (nr != iovec.iov_len)
554 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
555 const struct iovec *fast_iov, struct iov_iter *iter)
557 struct io_async_rw *io = req->async_data;
559 memcpy(&io->s.iter, iter, sizeof(*iter));
560 io->free_iovec = iovec;
562 /* can only be fixed buffers, no need to do anything */
563 if (iov_iter_is_bvec(iter))
566 unsigned iov_off = 0;
568 io->s.iter.iov = io->s.fast_iov;
569 if (iter->iov != fast_iov) {
570 iov_off = iter->iov - fast_iov;
571 io->s.iter.iov += iov_off;
573 if (io->s.fast_iov != fast_iov)
574 memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
575 sizeof(struct iovec) * iter->nr_segs);
577 req->flags |= REQ_F_NEED_CLEANUP;
581 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
582 struct io_rw_state *s, bool force)
584 if (!force && !io_op_defs[req->opcode].prep_async)
586 if (!req_has_async_data(req)) {
587 struct io_async_rw *iorw;
589 if (io_alloc_async_data(req)) {
594 io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
595 iorw = req->async_data;
596 /* we've copied and mapped the iter, ensure state is saved */
597 iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
602 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
604 struct io_async_rw *iorw = req->async_data;
608 /* submission path, ->uring_lock should already be taken */
609 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
610 if (unlikely(ret < 0))
613 iorw->bytes_done = 0;
614 iorw->free_iovec = iov;
616 req->flags |= REQ_F_NEED_CLEANUP;
620 int io_readv_prep_async(struct io_kiocb *req)
622 return io_rw_prep_async(req, READ);
625 int io_writev_prep_async(struct io_kiocb *req)
627 return io_rw_prep_async(req, WRITE);
631 * This is our waitqueue callback handler, registered through __folio_lock_async()
632 * when we initially tried to do the IO with the iocb armed our waitqueue.
633 * This gets called when the page is unlocked, and we generally expect that to
634 * happen when the page IO is completed and the page is now uptodate. This will
635 * queue a task_work based retry of the operation, attempting to copy the data
636 * again. If the latter fails because the page was NOT uptodate, then we will
637 * do a thread based blocking retry of the operation. That's the unexpected
640 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
643 struct wait_page_queue *wpq;
644 struct io_kiocb *req = wait->private;
645 struct io_rw *rw = io_kiocb_to_cmd(req);
646 struct wait_page_key *key = arg;
648 wpq = container_of(wait, struct wait_page_queue, wait);
650 if (!wake_page_match(wpq, key))
653 rw->kiocb.ki_flags &= ~IOCB_WAITQ;
654 list_del_init(&wait->entry);
655 io_req_task_queue(req);
660 * This controls whether a given IO request should be armed for async page
661 * based retry. If we return false here, the request is handed to the async
662 * worker threads for retry. If we're doing buffered reads on a regular file,
663 * we prepare a private wait_page_queue entry and retry the operation. This
664 * will either succeed because the page is now uptodate and unlocked, or it
665 * will register a callback when the page is unlocked at IO completion. Through
666 * that callback, io_uring uses task_work to setup a retry of the operation.
667 * That retry will attempt the buffered read again. The retry will generally
668 * succeed, or in rare cases where it fails, we then fall back to using the
669 * async worker threads for a blocking retry.
671 static bool io_rw_should_retry(struct io_kiocb *req)
673 struct io_async_rw *io = req->async_data;
674 struct wait_page_queue *wait = &io->wpq;
675 struct io_rw *rw = io_kiocb_to_cmd(req);
676 struct kiocb *kiocb = &rw->kiocb;
678 /* never retry for NOWAIT, we just complete with -EAGAIN */
679 if (req->flags & REQ_F_NOWAIT)
682 /* Only for buffered IO */
683 if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
687 * just use poll if we can, and don't attempt if the fs doesn't
688 * support callback based unlocks
690 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
693 wait->wait.func = io_async_buf_func;
694 wait->wait.private = req;
695 wait->wait.flags = 0;
696 INIT_LIST_HEAD(&wait->wait.entry);
697 kiocb->ki_flags |= IOCB_WAITQ;
698 kiocb->ki_flags &= ~IOCB_NOWAIT;
699 kiocb->ki_waitq = wait;
703 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
705 struct file *file = rw->kiocb.ki_filp;
707 if (likely(file->f_op->read_iter))
708 return call_read_iter(file, &rw->kiocb, iter);
709 else if (file->f_op->read)
710 return loop_rw_iter(READ, rw, iter);
715 static bool need_read_all(struct io_kiocb *req)
717 return req->flags & REQ_F_ISREG ||
718 S_ISBLK(file_inode(req->file)->i_mode);
721 static inline bool io_req_ffs_set(struct io_kiocb *req)
723 return req->flags & REQ_F_FIXED_FILE;
726 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
728 struct io_rw *rw = io_kiocb_to_cmd(req);
729 struct kiocb *kiocb = &rw->kiocb;
730 struct io_ring_ctx *ctx = req->ctx;
731 struct file *file = req->file;
734 if (unlikely(!file || !(file->f_mode & mode)))
737 if (!io_req_ffs_set(req))
738 req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
740 kiocb->ki_flags = iocb_flags(file);
741 ret = kiocb_set_rw_flags(kiocb, rw->flags);
746 * If the file is marked O_NONBLOCK, still allow retry for it if it
747 * supports async. Otherwise it's impossible to use O_NONBLOCK files
748 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
750 if ((kiocb->ki_flags & IOCB_NOWAIT) ||
751 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
752 req->flags |= REQ_F_NOWAIT;
754 if (ctx->flags & IORING_SETUP_IOPOLL) {
755 if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
758 kiocb->private = NULL;
759 kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
760 kiocb->ki_complete = io_complete_rw_iopoll;
761 req->iopoll_completed = 0;
763 if (kiocb->ki_flags & IOCB_HIPRI)
765 kiocb->ki_complete = io_complete_rw;
771 int io_read(struct io_kiocb *req, unsigned int issue_flags)
773 struct io_rw *rw = io_kiocb_to_cmd(req);
774 struct io_rw_state __s, *s = &__s;
776 struct kiocb *kiocb = &rw->kiocb;
777 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
778 struct io_async_rw *io;
782 if (!req_has_async_data(req)) {
783 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
784 if (unlikely(ret < 0))
787 io = req->async_data;
791 * Safe and required to re-import if we're using provided
792 * buffers, as we dropped the selected one before retry.
794 if (io_do_buffer_select(req)) {
795 ret = io_import_iovec(READ, req, &iovec, s, issue_flags);
796 if (unlikely(ret < 0))
801 * We come here from an earlier attempt, restore our state to
802 * match in case it doesn't. It's cheap enough that we don't
803 * need to make this conditional.
805 iov_iter_restore(&s->iter, &s->iter_state);
808 ret = io_rw_init_file(req, FMODE_READ);
813 req->cqe.res = iov_iter_count(&s->iter);
815 if (force_nonblock) {
816 /* If the file doesn't support async, just async punt */
817 if (unlikely(!io_file_supports_nowait(req))) {
818 ret = io_setup_async_rw(req, iovec, s, true);
819 return ret ?: -EAGAIN;
821 kiocb->ki_flags |= IOCB_NOWAIT;
823 /* Ensure we clear previously set non-block flag */
824 kiocb->ki_flags &= ~IOCB_NOWAIT;
827 ppos = io_kiocb_update_pos(req);
829 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
835 ret = io_iter_do_read(rw, &s->iter);
837 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
838 req->flags &= ~REQ_F_REISSUE;
839 /* if we can poll, just do that */
840 if (req->opcode == IORING_OP_READ && file_can_poll(req->file))
842 /* IOPOLL retry should happen for io-wq threads */
843 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
845 /* no retry on NONBLOCK nor RWF_NOWAIT */
846 if (req->flags & REQ_F_NOWAIT)
849 } else if (ret == -EIOCBQUEUED) {
851 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
852 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) {
853 /* read all, failed, already did sync or don't want to retry */
858 * Don't depend on the iter state matching what was consumed, or being
859 * untouched in case of error. Restore it and we'll advance it
860 * manually if we need to.
862 iov_iter_restore(&s->iter, &s->iter_state);
864 ret2 = io_setup_async_rw(req, iovec, s, true);
869 io = req->async_data;
872 * Now use our persistent iterator and state, if we aren't already.
873 * We've restored and mapped the iter to match.
878 * We end up here because of a partial read, either from
879 * above or inside this loop. Advance the iter by the bytes
880 * that were consumed.
882 iov_iter_advance(&s->iter, ret);
883 if (!iov_iter_count(&s->iter))
885 io->bytes_done += ret;
886 iov_iter_save_state(&s->iter, &s->iter_state);
888 /* if we can retry, do so with the callbacks armed */
889 if (!io_rw_should_retry(req)) {
890 kiocb->ki_flags &= ~IOCB_WAITQ;
895 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
896 * we get -EIOCBQUEUED, then we'll get a notification when the
897 * desired page gets unlocked. We can also get a partial read
898 * here, and if we do, then just retry at the new offset.
900 ret = io_iter_do_read(rw, &s->iter);
901 if (ret == -EIOCBQUEUED)
902 return IOU_ISSUE_SKIP_COMPLETE;
903 /* we got some bytes, but not all. retry. */
904 kiocb->ki_flags &= ~IOCB_WAITQ;
905 iov_iter_restore(&s->iter, &s->iter_state);
908 kiocb_done(req, ret, issue_flags);
910 /* it's faster to check here then delegate to kfree */
913 return IOU_ISSUE_SKIP_COMPLETE;
916 int io_write(struct io_kiocb *req, unsigned int issue_flags)
918 struct io_rw *rw = io_kiocb_to_cmd(req);
919 struct io_rw_state __s, *s = &__s;
921 struct kiocb *kiocb = &rw->kiocb;
922 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
926 if (!req_has_async_data(req)) {
927 ret = io_import_iovec(WRITE, req, &iovec, s, issue_flags);
928 if (unlikely(ret < 0))
931 struct io_async_rw *io = req->async_data;
934 iov_iter_restore(&s->iter, &s->iter_state);
937 ret = io_rw_init_file(req, FMODE_WRITE);
942 req->cqe.res = iov_iter_count(&s->iter);
944 if (force_nonblock) {
945 /* If the file doesn't support async, just async punt */
946 if (unlikely(!io_file_supports_nowait(req)))
949 /* file path doesn't support NOWAIT for non-direct_IO */
950 if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
951 (req->flags & REQ_F_ISREG))
954 kiocb->ki_flags |= IOCB_NOWAIT;
956 /* Ensure we clear previously set non-block flag */
957 kiocb->ki_flags &= ~IOCB_NOWAIT;
960 ppos = io_kiocb_update_pos(req);
962 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
967 * Open-code file_start_write here to grab freeze protection,
968 * which will be released by another thread in
969 * io_complete_rw(). Fool lockdep by telling it the lock got
970 * released so that it doesn't complain about the held lock when
971 * we return to userspace.
973 if (req->flags & REQ_F_ISREG) {
974 sb_start_write(file_inode(req->file)->i_sb);
975 __sb_writers_release(file_inode(req->file)->i_sb,
978 kiocb->ki_flags |= IOCB_WRITE;
980 if (likely(req->file->f_op->write_iter))
981 ret2 = call_write_iter(req->file, kiocb, &s->iter);
982 else if (req->file->f_op->write)
983 ret2 = loop_rw_iter(WRITE, rw, &s->iter);
987 if (req->flags & REQ_F_REISSUE) {
988 req->flags &= ~REQ_F_REISSUE;
993 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
994 * retry them without IOCB_NOWAIT.
996 if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
998 /* no retry on NONBLOCK nor RWF_NOWAIT */
999 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
1001 if (!force_nonblock || ret2 != -EAGAIN) {
1002 /* IOPOLL retry should happen for io-wq threads */
1003 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
1006 kiocb_done(req, ret2, issue_flags);
1007 ret = IOU_ISSUE_SKIP_COMPLETE;
1010 iov_iter_restore(&s->iter, &s->iter_state);
1011 ret = io_setup_async_rw(req, iovec, s, false);
1012 return ret ?: -EAGAIN;
1015 /* it's reportedly faster than delegating the null check to kfree() */
1021 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
1023 if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
1025 __io_commit_cqring_flush(ctx);
1027 if (ctx->flags & IORING_SETUP_SQPOLL)
1028 io_cqring_wake(ctx);
1031 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1033 struct io_wq_work_node *pos, *start, *prev;
1034 unsigned int poll_flags = BLK_POLL_NOSLEEP;
1035 DEFINE_IO_COMP_BATCH(iob);
1039 * Only spin for completions if we don't have multiple devices hanging
1040 * off our complete list.
1042 if (ctx->poll_multi_queue || force_nonspin)
1043 poll_flags |= BLK_POLL_ONESHOT;
1045 wq_list_for_each(pos, start, &ctx->iopoll_list) {
1046 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1047 struct io_rw *rw = io_kiocb_to_cmd(req);
1051 * Move completed and retryable entries to our local lists.
1052 * If we find a request that requires polling, break out
1053 * and complete those lists first, if we have entries there.
1055 if (READ_ONCE(req->iopoll_completed))
1058 ret = rw->kiocb.ki_filp->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1059 if (unlikely(ret < 0))
1062 poll_flags |= BLK_POLL_ONESHOT;
1064 /* iopoll may have completed current req */
1065 if (!rq_list_empty(iob.req_list) ||
1066 READ_ONCE(req->iopoll_completed))
1070 if (!rq_list_empty(iob.req_list))
1076 wq_list_for_each_resume(pos, prev) {
1077 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1079 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1080 if (!smp_load_acquire(&req->iopoll_completed))
1083 if (unlikely(req->flags & REQ_F_CQE_SKIP))
1086 req->cqe.flags = io_put_kbuf(req, 0);
1087 __io_fill_cqe_req(req->ctx, req);
1090 if (unlikely(!nr_events))
1093 io_commit_cqring(ctx);
1094 io_cqring_ev_posted_iopoll(ctx);
1095 pos = start ? start->next : ctx->iopoll_list.first;
1096 wq_list_cut(&ctx->iopoll_list, prev, start);
1097 io_free_batch_list(ctx, pos);