Merge tag 'for-5.15/io_uring-2021-09-04' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / fs / io_uring.c
index 78f3d3a..d816c09 100644 (file)
@@ -667,6 +667,29 @@ struct io_unlink {
        struct filename                 *filename;
 };
 
+struct io_mkdir {
+       struct file                     *file;
+       int                             dfd;
+       umode_t                         mode;
+       struct filename                 *filename;
+};
+
+struct io_symlink {
+       struct file                     *file;
+       int                             new_dfd;
+       struct filename                 *oldpath;
+       struct filename                 *newpath;
+};
+
+struct io_hardlink {
+       struct file                     *file;
+       int                             old_dfd;
+       int                             new_dfd;
+       struct filename                 *oldpath;
+       struct filename                 *newpath;
+       int                             flags;
+};
+
 struct io_completion {
        struct file                     *file;
        u32                             cflags;
@@ -826,6 +849,9 @@ struct io_kiocb {
                struct io_shutdown      shutdown;
                struct io_rename        rename;
                struct io_unlink        unlink;
+               struct io_mkdir         mkdir;
+               struct io_symlink       symlink;
+               struct io_hardlink      hardlink;
                /* use only after cleaning per-op data, see io_clean_op() */
                struct io_completion    compl;
        };
@@ -1038,6 +1064,9 @@ static const struct io_op_def io_op_defs[] = {
        },
        [IORING_OP_RENAMEAT] = {},
        [IORING_OP_UNLINKAT] = {},
+       [IORING_OP_MKDIRAT] = {},
+       [IORING_OP_SYMLINKAT] = {},
+       [IORING_OP_LINKAT] = {},
 };
 
 /* requests with any of those set should undergo io_disarm_next() */
@@ -2102,6 +2131,9 @@ static void tctx_task_work(struct callback_head *cb)
        while (1) {
                struct io_wq_work_node *node;
 
+               if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr)
+                       io_submit_flush_completions(ctx);
+
                spin_lock_irq(&tctx->task_lock);
                node = tctx->task_list.first;
                INIT_WQ_LIST(&tctx->task_list);
@@ -2656,7 +2688,7 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
 {
        if (__io_complete_rw_common(req, res))
                return;
-       __io_req_complete(req, 0, req->result, io_put_rw_kbuf(req));
+       __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
 }
 
 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
@@ -2847,7 +2879,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                    !kiocb->ki_filp->f_op->iopoll)
                        return -EOPNOTSUPP;
 
-               kiocb->ki_flags |= IOCB_HIPRI;
+               kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
                kiocb->ki_complete = io_complete_rw_iopoll;
                req->iopoll_completed = 0;
        } else {
@@ -3698,6 +3730,149 @@ static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags)
        return 0;
 }
 
+static int io_mkdirat_prep(struct io_kiocb *req,
+                           const struct io_uring_sqe *sqe)
+{
+       struct io_mkdir *mkd = &req->mkdir;
+       const char __user *fname;
+
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->off || sqe->rw_flags || sqe->buf_index ||
+           sqe->splice_fd_in)
+               return -EINVAL;
+       if (unlikely(req->flags & REQ_F_FIXED_FILE))
+               return -EBADF;
+
+       mkd->dfd = READ_ONCE(sqe->fd);
+       mkd->mode = READ_ONCE(sqe->len);
+
+       fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       mkd->filename = getname(fname);
+       if (IS_ERR(mkd->filename))
+               return PTR_ERR(mkd->filename);
+
+       req->flags |= REQ_F_NEED_CLEANUP;
+       return 0;
+}
+
+static int io_mkdirat(struct io_kiocb *req, int issue_flags)
+{
+       struct io_mkdir *mkd = &req->mkdir;
+       int ret;
+
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
+       ret = do_mkdirat(mkd->dfd, mkd->filename, mkd->mode);
+
+       req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (ret < 0)
+               req_set_fail(req);
+       io_req_complete(req, ret);
+       return 0;
+}
+
+static int io_symlinkat_prep(struct io_kiocb *req,
+                           const struct io_uring_sqe *sqe)
+{
+       struct io_symlink *sl = &req->symlink;
+       const char __user *oldpath, *newpath;
+
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->len || sqe->rw_flags || sqe->buf_index ||
+           sqe->splice_fd_in)
+               return -EINVAL;
+       if (unlikely(req->flags & REQ_F_FIXED_FILE))
+               return -EBADF;
+
+       sl->new_dfd = READ_ONCE(sqe->fd);
+       oldpath = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       newpath = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+
+       sl->oldpath = getname(oldpath);
+       if (IS_ERR(sl->oldpath))
+               return PTR_ERR(sl->oldpath);
+
+       sl->newpath = getname(newpath);
+       if (IS_ERR(sl->newpath)) {
+               putname(sl->oldpath);
+               return PTR_ERR(sl->newpath);
+       }
+
+       req->flags |= REQ_F_NEED_CLEANUP;
+       return 0;
+}
+
+static int io_symlinkat(struct io_kiocb *req, int issue_flags)
+{
+       struct io_symlink *sl = &req->symlink;
+       int ret;
+
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
+       ret = do_symlinkat(sl->oldpath, sl->new_dfd, sl->newpath);
+
+       req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (ret < 0)
+               req_set_fail(req);
+       io_req_complete(req, ret);
+       return 0;
+}
+
+static int io_linkat_prep(struct io_kiocb *req,
+                           const struct io_uring_sqe *sqe)
+{
+       struct io_hardlink *lnk = &req->hardlink;
+       const char __user *oldf, *newf;
+
+       if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+               return -EINVAL;
+       if (sqe->ioprio || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+               return -EINVAL;
+       if (unlikely(req->flags & REQ_F_FIXED_FILE))
+               return -EBADF;
+
+       lnk->old_dfd = READ_ONCE(sqe->fd);
+       lnk->new_dfd = READ_ONCE(sqe->len);
+       oldf = u64_to_user_ptr(READ_ONCE(sqe->addr));
+       newf = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+       lnk->flags = READ_ONCE(sqe->hardlink_flags);
+
+       lnk->oldpath = getname(oldf);
+       if (IS_ERR(lnk->oldpath))
+               return PTR_ERR(lnk->oldpath);
+
+       lnk->newpath = getname(newf);
+       if (IS_ERR(lnk->newpath)) {
+               putname(lnk->oldpath);
+               return PTR_ERR(lnk->newpath);
+       }
+
+       req->flags |= REQ_F_NEED_CLEANUP;
+       return 0;
+}
+
+static int io_linkat(struct io_kiocb *req, int issue_flags)
+{
+       struct io_hardlink *lnk = &req->hardlink;
+       int ret;
+
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
+       ret = do_linkat(lnk->old_dfd, lnk->oldpath, lnk->new_dfd,
+                               lnk->newpath, lnk->flags);
+
+       req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (ret < 0)
+               req_set_fail(req);
+       io_req_complete(req, ret);
+       return 0;
+}
+
 static int io_shutdown_prep(struct io_kiocb *req,
                            const struct io_uring_sqe *sqe)
 {
@@ -5095,7 +5270,7 @@ static void io_poll_remove_double(struct io_kiocb *req)
        }
 }
 
-static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
+static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
        __must_hold(&req->ctx->completion_lock)
 {
        struct io_ring_ctx *ctx = req->ctx;
@@ -5117,10 +5292,19 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
        if (flags & IORING_CQE_F_MORE)
                ctx->cq_extra++;
 
-       io_commit_cqring(ctx);
        return !(flags & IORING_CQE_F_MORE);
 }
 
+static inline bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
+       __must_hold(&req->ctx->completion_lock)
+{
+       bool done;
+
+       done = __io_poll_complete(req, mask);
+       io_commit_cqring(req->ctx);
+       return done;
+}
+
 static void io_poll_task_func(struct io_kiocb *req, bool *locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
@@ -5131,7 +5315,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
        } else {
                bool done;
 
-               done = io_poll_complete(req, req->result);
+               done = __io_poll_complete(req, req->result);
                if (done) {
                        io_poll_remove_double(req);
                        hash_del(&req->hash_node);
@@ -5139,6 +5323,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
                        req->result = 0;
                        add_wait_queue(req->poll.head, &req->poll.wait);
                }
+               io_commit_cqring(ctx);
                spin_unlock(&ctx->completion_lock);
                io_cqring_ev_posted(ctx);
 
@@ -6187,6 +6372,12 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                return io_renameat_prep(req, sqe);
        case IORING_OP_UNLINKAT:
                return io_unlinkat_prep(req, sqe);
+       case IORING_OP_MKDIRAT:
+               return io_mkdirat_prep(req, sqe);
+       case IORING_OP_SYMLINKAT:
+               return io_symlinkat_prep(req, sqe);
+       case IORING_OP_LINKAT:
+               return io_linkat_prep(req, sqe);
        }
 
        printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
@@ -6238,6 +6429,11 @@ static bool io_drain_req(struct io_kiocb *req)
        int ret;
        u32 seq;
 
+       if (req->flags & REQ_F_FAIL) {
+               io_req_complete_fail_submit(req);
+               return true;
+       }
+
        /*
         * If we need to drain a request in the middle of a link, drain the
         * head request and the next request/link after the current link.
@@ -6350,6 +6546,17 @@ static void io_clean_op(struct io_kiocb *req)
                case IORING_OP_UNLINKAT:
                        putname(req->unlink.filename);
                        break;
+               case IORING_OP_MKDIRAT:
+                       putname(req->mkdir.filename);
+                       break;
+               case IORING_OP_SYMLINKAT:
+                       putname(req->symlink.oldpath);
+                       putname(req->symlink.newpath);
+                       break;
+               case IORING_OP_LINKAT:
+                       putname(req->hardlink.oldpath);
+                       putname(req->hardlink.newpath);
+                       break;
                }
        }
        if ((req->flags & REQ_F_POLLED) && req->apoll) {
@@ -6478,6 +6685,15 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
        case IORING_OP_UNLINKAT:
                ret = io_unlinkat(req, issue_flags);
                break;
+       case IORING_OP_MKDIRAT:
+               ret = io_mkdirat(req, issue_flags);
+               break;
+       case IORING_OP_SYMLINKAT:
+               ret = io_symlinkat(req, issue_flags);
+               break;
+       case IORING_OP_LINKAT:
+               ret = io_linkat(req, issue_flags);
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -10318,26 +10534,46 @@ static int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
 static int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
                                        void __user *arg)
 {
-       struct io_uring_task *tctx = current->io_uring;
+       struct io_uring_task *tctx = NULL;
+       struct io_sq_data *sqd = NULL;
        __u32 new_count[2];
        int i, ret;
 
-       if (!tctx || !tctx->io_wq)
-               return -EINVAL;
        if (copy_from_user(new_count, arg, sizeof(new_count)))
                return -EFAULT;
        for (i = 0; i < ARRAY_SIZE(new_count); i++)
                if (new_count[i] > INT_MAX)
                        return -EINVAL;
 
+       if (ctx->flags & IORING_SETUP_SQPOLL) {
+               sqd = ctx->sq_data;
+               if (sqd) {
+                       mutex_lock(&sqd->lock);
+                       tctx = sqd->thread->io_uring;
+               }
+       } else {
+               tctx = current->io_uring;
+       }
+
+       ret = -EINVAL;
+       if (!tctx || !tctx->io_wq)
+               goto err;
+
        ret = io_wq_max_workers(tctx->io_wq, new_count);
        if (ret)
-               return ret;
+               goto err;
+
+       if (sqd)
+               mutex_unlock(&sqd->lock);
 
        if (copy_to_user(arg, new_count, sizeof(new_count)))
                return -EFAULT;
 
        return 0;
+err:
+       if (sqd)
+               mutex_unlock(&sqd->lock);
+       return ret;
 }
 
 static bool io_register_op_must_quiesce(int op)