Merge branch 'topic/tasklet-convert' into for-linus
[linux-2.6-microblaze.git] / fs / io_uring.c
index 1f2f31d..91e2cc8 100644 (file)
@@ -540,7 +540,6 @@ enum {
        REQ_F_ISREG_BIT,
        REQ_F_COMP_LOCKED_BIT,
        REQ_F_NEED_CLEANUP_BIT,
-       REQ_F_OVERFLOW_BIT,
        REQ_F_POLLED_BIT,
        REQ_F_BUFFER_SELECTED_BIT,
        REQ_F_NO_FILE_TABLE_BIT,
@@ -583,8 +582,6 @@ enum {
        REQ_F_COMP_LOCKED       = BIT(REQ_F_COMP_LOCKED_BIT),
        /* needs cleanup */
        REQ_F_NEED_CLEANUP      = BIT(REQ_F_NEED_CLEANUP_BIT),
-       /* in overflow list */
-       REQ_F_OVERFLOW          = BIT(REQ_F_OVERFLOW_BIT),
        /* already went through poll handler */
        REQ_F_POLLED            = BIT(REQ_F_POLLED_BIT),
        /* buffer already selected */
@@ -946,7 +943,8 @@ static void io_get_req_task(struct io_kiocb *req)
 
 static inline void io_clean_op(struct io_kiocb *req)
 {
-       if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+       if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
+                         REQ_F_INFLIGHT))
                __io_clean_op(req);
 }
 
@@ -1366,7 +1364,6 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
                req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
                                                compl.list);
                list_move(&req->compl.list, &list);
-               req->flags &= ~REQ_F_OVERFLOW;
                if (cqe) {
                        WRITE_ONCE(cqe->user_data, req->user_data);
                        WRITE_ONCE(cqe->res, req->result);
@@ -1419,7 +1416,6 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
                        ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
                }
                io_clean_op(req);
-               req->flags |= REQ_F_OVERFLOW;
                req->result = res;
                req->compl.cflags = cflags;
                refcount_inc(&req->refs);
@@ -1563,17 +1559,6 @@ static bool io_dismantle_req(struct io_kiocb *req)
        if (req->file)
                io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
 
-       if (req->flags & REQ_F_INFLIGHT) {
-               struct io_ring_ctx *ctx = req->ctx;
-               unsigned long flags;
-
-               spin_lock_irqsave(&ctx->inflight_lock, flags);
-               list_del(&req->inflight_entry);
-               if (waitqueue_active(&ctx->inflight_wait))
-                       wake_up(&ctx->inflight_wait);
-               spin_unlock_irqrestore(&ctx->inflight_lock, flags);
-       }
-
        return io_req_clean_work(req);
 }
 
@@ -2841,10 +2826,8 @@ static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
        if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
                if (req->flags & REQ_F_BUFFER_SELECT) {
                        buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
-                       if (IS_ERR(buf)) {
-                               *iovec = NULL;
+                       if (IS_ERR(buf))
                                return PTR_ERR(buf);
-                       }
                        req->rw.len = sqe_len;
                }
 
@@ -3219,6 +3202,7 @@ done:
        kiocb_done(kiocb, ret, cs);
        ret = 0;
 out_free:
+       /* it's reportedly faster than delegating the null check to kfree() */
        if (iovec)
                kfree(iovec);
        return ret;
@@ -3315,6 +3299,7 @@ copy_iov:
                        return -EAGAIN;
        }
 out_free:
+       /* it's reportedly faster than delegating the null check to kfree() */
        if (iovec)
                kfree(iovec);
        return ret;
@@ -5634,6 +5619,18 @@ static void __io_clean_op(struct io_kiocb *req)
                }
                req->flags &= ~REQ_F_NEED_CLEANUP;
        }
+
+       if (req->flags & REQ_F_INFLIGHT) {
+               struct io_ring_ctx *ctx = req->ctx;
+               unsigned long flags;
+
+               spin_lock_irqsave(&ctx->inflight_lock, flags);
+               list_del(&req->inflight_entry);
+               if (waitqueue_active(&ctx->inflight_wait))
+                       wake_up(&ctx->inflight_wait);
+               spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+               req->flags &= ~REQ_F_INFLIGHT;
+       }
 }
 
 static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
@@ -8108,33 +8105,9 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                /* We need to keep going until we don't find a matching req */
                if (!cancel_req)
                        break;
-
-               if (cancel_req->flags & REQ_F_OVERFLOW) {
-                       spin_lock_irq(&ctx->completion_lock);
-                       list_del(&cancel_req->compl.list);
-                       cancel_req->flags &= ~REQ_F_OVERFLOW;
-
-                       io_cqring_mark_overflow(ctx);
-                       WRITE_ONCE(ctx->rings->cq_overflow,
-                               atomic_inc_return(&ctx->cached_cq_overflow));
-                       io_commit_cqring(ctx);
-                       spin_unlock_irq(&ctx->completion_lock);
-
-                       /*
-                        * Put inflight ref and overflow ref. If that's
-                        * all we had, then we're done with this request.
-                        */
-                       if (refcount_sub_and_test(2, &cancel_req->refs)) {
-                               io_free_req(cancel_req);
-                               finish_wait(&ctx->inflight_wait, &wait);
-                               continue;
-                       }
-               } else {
-                       /* cancel this request, or head link requests */
-                       io_attempt_cancel(ctx, cancel_req);
-                       io_put_req(cancel_req);
-               }
-
+               /* cancel this request, or head link requests */
+               io_attempt_cancel(ctx, cancel_req);
+               io_put_req(cancel_req);
                schedule();
                finish_wait(&ctx->inflight_wait, &wait);
        }