Merge tag 'trace-v5.14-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux-2.6-microblaze.git] / fs / io_uring.c
index 5a0fd6b..bf548af 100644 (file)
@@ -1279,8 +1279,17 @@ static void io_prep_async_link(struct io_kiocb *req)
 {
        struct io_kiocb *cur;
 
-       io_for_each_link(cur, req)
-               io_prep_async_work(cur);
+       if (req->flags & REQ_F_LINK_TIMEOUT) {
+               struct io_ring_ctx *ctx = req->ctx;
+
+               spin_lock_irq(&ctx->completion_lock);
+               io_for_each_link(cur, req)
+                       io_prep_async_work(cur);
+               spin_unlock_irq(&ctx->completion_lock);
+       } else {
+               io_for_each_link(cur, req)
+                       io_prep_async_work(cur);
+       }
 }
 
 static void io_queue_async_work(struct io_kiocb *req)
@@ -1950,9 +1959,13 @@ static void tctx_task_work(struct callback_head *cb)
                        node = next;
                }
                if (wq_list_empty(&tctx->task_list)) {
+                       spin_lock_irq(&tctx->task_lock);
                        clear_bit(0, &tctx->task_state);
-                       if (wq_list_empty(&tctx->task_list))
+                       if (wq_list_empty(&tctx->task_list)) {
+                               spin_unlock_irq(&tctx->task_lock);
                                break;
+                       }
+                       spin_unlock_irq(&tctx->task_lock);
                        /* another tctx_task_work() is enqueued, yield */
                        if (test_and_set_bit(0, &tctx->task_state))
                                break;
@@ -2047,6 +2060,12 @@ static void io_req_task_queue(struct io_kiocb *req)
        io_req_task_work_add(req);
 }
 
+static void io_req_task_queue_reissue(struct io_kiocb *req)
+{
+       req->io_task_work.func = io_queue_async_work;
+       io_req_task_work_add(req);
+}
+
 static inline void io_queue_next(struct io_kiocb *req)
 {
        struct io_kiocb *nxt = io_req_find_next(req);
@@ -2235,7 +2254,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
                    !(req->flags & REQ_F_DONT_REISSUE)) {
                        req->iopoll_completed = 0;
                        req_ref_get(req);
-                       io_queue_async_work(req);
+                       io_req_task_queue_reissue(req);
                        continue;
                }
 
@@ -2428,6 +2447,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
         */
        if (percpu_ref_is_dying(&ctx->refs))
                return false;
+       /*
+        * Play it safe and assume not safe to re-import and reissue if we're
+        * not in the original thread group (or in task context).
+        */
+       if (!same_thread_group(req->task, current) || !in_task())
+               return false;
        return true;
 }
 #else
@@ -2758,7 +2783,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req)) {
                        req_ref_get(req);
-                       io_queue_async_work(req);
+                       io_req_task_queue_reissue(req);
                } else {
                        int cflags = 0;
 
@@ -4914,7 +4939,6 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
        if (req->poll.events & EPOLLONESHOT)
                flags = 0;
        if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
-               io_poll_remove_waitqs(req);
                req->poll.done = true;
                flags = 0;
        }
@@ -4937,6 +4961,7 @@ static void io_poll_task_func(struct io_kiocb *req)
 
                done = io_poll_complete(req, req->result);
                if (done) {
+                       io_poll_remove_double(req);
                        hash_del(&req->hash_node);
                } else {
                        req->result = 0;
@@ -5124,7 +5149,7 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
                ipt->error = -EINVAL;
 
        spin_lock_irq(&ctx->completion_lock);
-       if (ipt->error)
+       if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
                io_poll_remove_double(req);
        if (likely(poll->head)) {
                spin_lock(&poll->head->lock);
@@ -5196,7 +5221,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
        ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
                                        io_async_wake);
        if (ret || ipt.error) {
-               io_poll_remove_double(req);
                spin_unlock_irq(&ctx->completion_lock);
                if (ret)
                        return IO_APOLL_READY;