io_uring: always wait for sqd exited when stopping SQPOLL thread
[linux-2.6-microblaze.git] / fs / io_uring.c
index cc9a2cc..62f998b 100644 (file)
@@ -985,6 +985,7 @@ static const struct io_op_def io_op_defs[] = {
        [IORING_OP_UNLINKAT] = {},
 };
 
+static bool io_disarm_next(struct io_kiocb *req);
 static void io_uring_del_task_file(unsigned long index);
 static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
                                         struct task_struct *task,
@@ -1525,15 +1526,14 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
        __io_cqring_fill_event(req, res, 0);
 }
 
-static inline void io_req_complete_post(struct io_kiocb *req, long res,
-                                       unsigned int cflags)
+static void io_req_complete_post(struct io_kiocb *req, long res,
+                                unsigned int cflags)
 {
        struct io_ring_ctx *ctx = req->ctx;
        unsigned long flags;
 
        spin_lock_irqsave(&ctx->completion_lock, flags);
        __io_cqring_fill_event(req, res, cflags);
-       io_commit_cqring(ctx);
        /*
         * If we're the last reference to this request, add to our locked
         * free_list cache.
@@ -1541,19 +1541,26 @@ static inline void io_req_complete_post(struct io_kiocb *req, long res,
        if (refcount_dec_and_test(&req->refs)) {
                struct io_comp_state *cs = &ctx->submit_state.comp;
 
+               if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
+                       if (req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_FAIL_LINK))
+                               io_disarm_next(req);
+                       if (req->link) {
+                               io_req_task_queue(req->link);
+                               req->link = NULL;
+                       }
+               }
                io_dismantle_req(req);
                io_put_task(req->task, 1);
                list_add(&req->compl.list, &cs->locked_free_list);
                cs->locked_free_nr++;
        } else
                req = NULL;
+       io_commit_cqring(ctx);
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
-
        io_cqring_ev_posted(ctx);
-       if (req) {
-               io_queue_next(req);
+
+       if (req)
                percpu_ref_put(&ctx->refs);
-       }
 }
 
 static void io_req_complete_state(struct io_kiocb *req, long res,
@@ -6320,6 +6327,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
        refcount_set(&req->refs, 2);
        req->task = current;
        req->result = 0;
+       req->work.list.next = NULL;
+       req->work.creds = NULL;
+       req->work.flags = 0;
 
        /* enforce forwards compatibility on users */
        if (unlikely(sqe_flags & ~SQE_VALID_FLAGS)) {
@@ -6337,17 +6347,13 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
            !io_op_defs[req->opcode].buffer_select)
                return -EOPNOTSUPP;
 
-       req->work.list.next = NULL;
        personality = READ_ONCE(sqe->personality);
        if (personality) {
                req->work.creds = xa_load(&ctx->personalities, personality);
                if (!req->work.creds)
                        return -EINVAL;
                get_cred(req->work.creds);
-       } else {
-               req->work.creds = NULL;
        }
-       req->work.flags = 0;
        state = &ctx->submit_state;
 
        /*
@@ -7073,12 +7079,9 @@ static void io_sq_thread_stop(struct io_sq_data *sqd)
        if (test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state))
                return;
        down_write(&sqd->rw_lock);
-       if (!sqd->thread) {
-               up_write(&sqd->rw_lock);
-               return;
-       }
        set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
-       wake_up_process(sqd->thread);
+       if (sqd->thread)
+               wake_up_process(sqd->thread);
        up_write(&sqd->rw_lock);
        wait_for_completion(&sqd->exited);
 }
@@ -7843,9 +7846,9 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
 
                        ret = -EINVAL;
                        if (cpu >= nr_cpu_ids)
-                               goto err;
+                               goto err_sqpoll;
                        if (!cpu_online(cpu))
-                               goto err;
+                               goto err_sqpoll;
 
                        sqd->sq_cpu = cpu;
                } else {
@@ -7856,7 +7859,7 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
                tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
                if (IS_ERR(tsk)) {
                        ret = PTR_ERR(tsk);
-                       goto err;
+                       goto err_sqpoll;
                }
 
                sqd->thread = tsk;
@@ -7875,6 +7878,9 @@ static int io_sq_offload_create(struct io_ring_ctx *ctx,
 err:
        io_sq_thread_finish(ctx);
        return ret;
+err_sqpoll:
+       complete(&ctx->sq_data->exited);
+       goto err;
 }
 
 static inline void __io_unaccount_mem(struct user_struct *user,
@@ -9016,7 +9022,6 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
 
 static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
 {
-       int ret = 0;
        DEFINE_WAIT(wait);
 
        do {
@@ -9030,7 +9035,7 @@ static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
        } while (!signal_pending(current));
 
        finish_wait(&ctx->sqo_sq_wait, &wait);
-       return ret;
+       return 0;
 }
 
 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,