Merge tag 'x86-build-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / fs / io_uring.c
index 70ae7e8..bb25e39 100644 (file)
@@ -619,6 +619,8 @@ struct io_kiocb {
        bool                            needs_fixed_file;
        u8                              opcode;
 
+       u16                             buf_index;
+
        struct io_ring_ctx      *ctx;
        struct list_head        list;
        unsigned int            flags;
@@ -924,6 +926,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
                goto err;
 
        ctx->flags = p->flags;
+       init_waitqueue_head(&ctx->sqo_wait);
        init_waitqueue_head(&ctx->cq_wait);
        INIT_LIST_HEAD(&ctx->cq_overflow_list);
        init_completion(&ctx->completions[0]);
@@ -2100,9 +2103,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
        req->rw.addr = READ_ONCE(sqe->addr);
        req->rw.len = READ_ONCE(sqe->len);
-       /* we own ->private, reuse it for the buffer index  / buffer ID */
-       req->rw.kiocb.private = (void *) (unsigned long)
-                                       READ_ONCE(sqe->buf_index);
+       req->buf_index = READ_ONCE(sqe->buf_index);
        return 0;
 }
 
@@ -2145,7 +2146,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
        struct io_ring_ctx *ctx = req->ctx;
        size_t len = req->rw.len;
        struct io_mapped_ubuf *imu;
-       unsigned index, buf_index;
+       u16 index, buf_index;
        size_t offset;
        u64 buf_addr;
 
@@ -2153,7 +2154,7 @@ static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
        if (unlikely(!ctx->user_bufs))
                return -EFAULT;
 
-       buf_index = (unsigned long) req->rw.kiocb.private;
+       buf_index = req->buf_index;
        if (unlikely(buf_index >= ctx->nr_user_bufs))
                return -EFAULT;
 
@@ -2269,10 +2270,10 @@ static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
                                        bool needs_lock)
 {
        struct io_buffer *kbuf;
-       int bgid;
+       u16 bgid;
 
        kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
-       bgid = (int) (unsigned long) req->rw.kiocb.private;
+       bgid = req->buf_index;
        kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
        if (IS_ERR(kbuf))
                return kbuf;
@@ -2363,7 +2364,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
        }
 
        /* buffer index only valid with fixed read/write, or buffer select  */
-       if (req->rw.kiocb.private && !(req->flags & REQ_F_BUFFER_SELECT))
+       if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
                return -EINVAL;
 
        if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
@@ -2771,11 +2772,8 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock)
        poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
        poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
 
-       if (sp->len) {
+       if (sp->len)
                ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
-               if (force_nonblock && ret == -EAGAIN)
-                       return -EAGAIN;
-       }
 
        io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
        req->flags &= ~REQ_F_NEED_CLEANUP;
@@ -4137,12 +4135,14 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
        req->result = mask;
        init_task_work(&req->task_work, func);
        /*
-        * If this fails, then the task is exiting. Punt to one of the io-wq
-        * threads to ensure the work gets run, we can't always rely on exit
-        * cancelation taking care of this.
+        * If this fails, then the task is exiting. When a task exits, the
+        * work gets canceled, so just cancel this request as well instead
+        * of executing it. We can't safely execute it anyway, as we may not
+        * have the needed state needed for it anyway.
         */
        ret = task_work_add(tsk, &req->task_work, true);
        if (unlikely(ret)) {
+               WRITE_ONCE(poll->canceled, true);
                tsk = io_wq_get_task(req->ctx->io_wq);
                task_work_add(tsk, &req->task_work, true);
        }
@@ -5013,12 +5013,13 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
                return 0;
 
-       if (!req->io && io_alloc_async_ctx(req))
-               return -EAGAIN;
-
-       ret = io_req_defer_prep(req, sqe);
-       if (ret < 0)
-               return ret;
+       if (!req->io) {
+               if (io_alloc_async_ctx(req))
+                       return -EAGAIN;
+               ret = io_req_defer_prep(req, sqe);
+               if (ret < 0)
+                       return ret;
+       }
 
        spin_lock_irq(&ctx->completion_lock);
        if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
@@ -5305,7 +5306,8 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        if (ret)
                return ret;
 
-       if (ctx->flags & IORING_SETUP_IOPOLL) {
+       /* If the op doesn't have a file, we're not polling for it */
+       if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
                const bool in_async = io_wq_current_is_worker();
 
                if (req->result == -EAGAIN)
@@ -5606,9 +5608,15 @@ fail_req:
                        io_double_put_req(req);
                }
        } else if (req->flags & REQ_F_FORCE_ASYNC) {
-               ret = io_req_defer_prep(req, sqe);
-               if (unlikely(ret < 0))
-                       goto fail_req;
+               if (!req->io) {
+                       ret = -EAGAIN;
+                       if (io_alloc_async_ctx(req))
+                               goto fail_req;
+                       ret = io_req_defer_prep(req, sqe);
+                       if (unlikely(ret < 0))
+                               goto fail_req;
+               }
+
                /*
                 * Never try inline submit of IOSQE_ASYNC is set, go straight
                 * to async execution.
@@ -6024,6 +6032,7 @@ static int io_sq_thread(void *data)
                                finish_wait(&ctx->sqo_wait, &wait);
 
                                ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+                               ret = 0;
                                continue;
                        }
                        finish_wait(&ctx->sqo_wait, &wait);
@@ -6837,7 +6846,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
 {
        int ret;
 
-       init_waitqueue_head(&ctx->sqo_wait);
        mmgrab(current->mm);
        ctx->sqo_mm = current->mm;