io_uring: fix code style problems
authorPavel Begunkov <asml.silence@gmail.com>
Thu, 24 Jun 2021 14:09:57 +0000 (15:09 +0100)
committerJens Axboe <axboe@kernel.dk>
Wed, 30 Jun 2021 20:15:39 +0000 (14:15 -0600)
Fix a bunch of problems mostly found by checkpatch.pl

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/cfaf9a2f27b43934144fe9422a916bd327099f44.1624543113.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
fs/io_uring.c

index 8b69982..7639bf3 100644 (file)
@@ -173,7 +173,7 @@ struct io_rings {
         * Written by the application, shouldn't be modified by the
         * kernel.
         */
-       u32                     cq_flags;
+       u32                     cq_flags;
        /*
         * Number of completion events lost because the queue was full;
         * this should be avoided by the application by making sure
@@ -857,7 +857,7 @@ struct io_kiocb {
        struct hlist_node               hash_node;
        struct async_poll               *apoll;
        struct io_wq_work               work;
-       const struct cred               *creds;
+       const struct cred               *creds;
 
        /* store used ubuf, so we can prevent reloading */
        struct io_mapped_ubuf           *imu;
@@ -1707,7 +1707,7 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
 {
        struct io_submit_state *state = &ctx->submit_state;
 
-       BUILD_BUG_ON(IO_REQ_ALLOC_BATCH > ARRAY_SIZE(state->reqs));
+       BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
 
        if (!state->free_reqs) {
                gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
@@ -2769,7 +2769,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
        else
                io_rw_done(kiocb, ret);
 
-       if (check_reissue && req->flags & REQ_F_REISSUE) {
+       if (check_reissue && (req->flags & REQ_F_REISSUE)) {
                req->flags &= ~REQ_F_REISSUE;
                if (io_resubmit_prep(req)) {
                        req_ref_get(req);
@@ -3591,7 +3591,7 @@ static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
 static int __io_splice_prep(struct io_kiocb *req,
                            const struct io_uring_sqe *sqe)
 {
-       struct io_splicesp = &req->splice;
+       struct io_splice *sp = &req->splice;
        unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
@@ -3645,7 +3645,7 @@ static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
 
 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       struct io_splicesp = &req->splice;
+       struct io_splice *sp = &req->splice;
 
        sp->off_in = READ_ONCE(sqe->splice_off_in);
        sp->off_out = READ_ONCE(sqe->off);
@@ -8567,6 +8567,7 @@ static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
        ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
        if (IS_ERR(ctx->cq_ev_fd)) {
                int ret = PTR_ERR(ctx->cq_ev_fd);
+
                ctx->cq_ev_fd = NULL;
                return ret;
        }
@@ -9347,9 +9348,8 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                io_cqring_overflow_flush(ctx, false);
 
                ret = -EOWNERDEAD;
-               if (unlikely(ctx->sq_data->thread == NULL)) {
+               if (unlikely(ctx->sq_data->thread == NULL))
                        goto out;
-               }
                if (flags & IORING_ENTER_SQ_WAKEUP)
                        wake_up(&ctx->sq_data->wait);
                if (flags & IORING_ENTER_SQ_WAIT) {