phy: mediatek: Make PHY_MTK_{XSPHY, TPHY} depend on HAS_IOMEM and OF_ADDRESS to fix...
[linux-2.6-microblaze.git] / fs / io_uring.c
index b581692..b42dfa0 100644 (file)
@@ -81,6 +81,7 @@
 #include <linux/pagemap.h>
 #include <linux/io_uring.h>
 #include <linux/blk-cgroup.h>
+#include <linux/audit.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/io_uring.h>
@@ -276,7 +277,7 @@ struct io_ring_ctx {
                unsigned                sq_mask;
                unsigned                sq_thread_idle;
                unsigned                cached_sq_dropped;
-               atomic_t                cached_cq_overflow;
+               unsigned                cached_cq_overflow;
                unsigned long           sq_check_overflow;
 
                struct list_head        defer_list;
@@ -327,6 +328,11 @@ struct io_ring_ctx {
 
        const struct cred       *creds;
 
+#ifdef CONFIG_AUDIT
+       kuid_t                  loginuid;
+       unsigned int            sessionid;
+#endif
+
        struct completion       ref_comp;
        struct completion       sq_thread_comp;
 
@@ -574,12 +580,12 @@ enum {
        REQ_F_NOWAIT_BIT,
        REQ_F_LINK_TIMEOUT_BIT,
        REQ_F_ISREG_BIT,
-       REQ_F_COMP_LOCKED_BIT,
        REQ_F_NEED_CLEANUP_BIT,
        REQ_F_POLLED_BIT,
        REQ_F_BUFFER_SELECTED_BIT,
        REQ_F_NO_FILE_TABLE_BIT,
        REQ_F_WORK_INITIALIZED_BIT,
+       REQ_F_LTIMEOUT_ACTIVE_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -609,12 +615,10 @@ enum {
        REQ_F_CUR_POS           = BIT(REQ_F_CUR_POS_BIT),
        /* must not punt to workers */
        REQ_F_NOWAIT            = BIT(REQ_F_NOWAIT_BIT),
-       /* has linked timeout */
+       /* has or had linked timeout */
        REQ_F_LINK_TIMEOUT      = BIT(REQ_F_LINK_TIMEOUT_BIT),
        /* regular file */
        REQ_F_ISREG             = BIT(REQ_F_ISREG_BIT),
-       /* completion under lock */
-       REQ_F_COMP_LOCKED       = BIT(REQ_F_COMP_LOCKED_BIT),
        /* needs cleanup */
        REQ_F_NEED_CLEANUP      = BIT(REQ_F_NEED_CLEANUP_BIT),
        /* already went through poll handler */
@@ -625,6 +629,8 @@ enum {
        REQ_F_NO_FILE_TABLE     = BIT(REQ_F_NO_FILE_TABLE_BIT),
        /* io_wq_work is initialized */
        REQ_F_WORK_INITIALIZED  = BIT(REQ_F_WORK_INITIALIZED_BIT),
+       /* linked timeout is active, i.e. prepared by link's head */
+       REQ_F_LTIMEOUT_ACTIVE   = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
 };
 
 struct async_poll {
@@ -732,8 +738,6 @@ struct io_submit_state {
 };
 
 struct io_op_def {
-       /* needs current->mm setup, does mm access */
-       unsigned                needs_mm : 1;
        /* needs req->file assigned */
        unsigned                needs_file : 1;
        /* don't fail if file grab fails */
@@ -744,67 +748,58 @@ struct io_op_def {
        unsigned                unbound_nonreg_file : 1;
        /* opcode is not supported by this kernel */
        unsigned                not_supported : 1;
-       /* needs file table */
-       unsigned                file_table : 1;
-       /* needs ->fs */
-       unsigned                needs_fs : 1;
        /* set if opcode supports polled "wait" */
        unsigned                pollin : 1;
        unsigned                pollout : 1;
        /* op supports buffer selection */
        unsigned                buffer_select : 1;
-       /* needs rlimit(RLIMIT_FSIZE) assigned */
-       unsigned                needs_fsize : 1;
        /* must always have async data allocated */
        unsigned                needs_async_data : 1;
-       /* needs blkcg context, issues async io potentially */
-       unsigned                needs_blkcg : 1;
        /* size of async data needed, if any */
        unsigned short          async_size;
+       unsigned                work_flags;
 };
 
-static const struct io_op_def io_op_defs[] __read_mostly = {
+static const struct io_op_def io_op_defs[] = {
        [IORING_OP_NOP] = {},
        [IORING_OP_READV] = {
-               .needs_mm               = 1,
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
                .buffer_select          = 1,
                .needs_async_data       = 1,
-               .needs_blkcg            = 1,
                .async_size             = sizeof(struct io_async_rw),
+               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_WRITEV] = {
-               .needs_mm               = 1,
                .needs_file             = 1,
                .hash_reg_file          = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
-               .needs_fsize            = 1,
                .needs_async_data       = 1,
-               .needs_blkcg            = 1,
                .async_size             = sizeof(struct io_async_rw),
+               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
+                                               IO_WQ_WORK_FSIZE,
        },
        [IORING_OP_FSYNC] = {
                .needs_file             = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_READ_FIXED] = {
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
-               .needs_blkcg            = 1,
                .async_size             = sizeof(struct io_async_rw),
+               .work_flags             = IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM,
        },
        [IORING_OP_WRITE_FIXED] = {
                .needs_file             = 1,
                .hash_reg_file          = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
-               .needs_fsize            = 1,
-               .needs_blkcg            = 1,
                .async_size             = sizeof(struct io_async_rw),
+               .work_flags             = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
+                                               IO_WQ_WORK_MM,
        },
        [IORING_OP_POLL_ADD] = {
                .needs_file             = 1,
@@ -813,137 +808,122 @@ static const struct io_op_def io_op_defs[] __read_mostly = {
        [IORING_OP_POLL_REMOVE] = {},
        [IORING_OP_SYNC_FILE_RANGE] = {
                .needs_file             = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_SENDMSG] = {
-               .needs_mm               = 1,
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
-               .needs_fs               = 1,
                .pollout                = 1,
                .needs_async_data       = 1,
-               .needs_blkcg            = 1,
                .async_size             = sizeof(struct io_async_msghdr),
+               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
+                                               IO_WQ_WORK_FS,
        },
        [IORING_OP_RECVMSG] = {
-               .needs_mm               = 1,
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
-               .needs_fs               = 1,
                .pollin                 = 1,
                .buffer_select          = 1,
                .needs_async_data       = 1,
-               .needs_blkcg            = 1,
                .async_size             = sizeof(struct io_async_msghdr),
+               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
+                                               IO_WQ_WORK_FS,
        },
        [IORING_OP_TIMEOUT] = {
-               .needs_mm               = 1,
                .needs_async_data       = 1,
                .async_size             = sizeof(struct io_timeout_data),
+               .work_flags             = IO_WQ_WORK_MM,
        },
        [IORING_OP_TIMEOUT_REMOVE] = {},
        [IORING_OP_ACCEPT] = {
-               .needs_mm               = 1,
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
-               .file_table             = 1,
                .pollin                 = 1,
+               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
        },
        [IORING_OP_ASYNC_CANCEL] = {},
        [IORING_OP_LINK_TIMEOUT] = {
-               .needs_mm               = 1,
                .needs_async_data       = 1,
                .async_size             = sizeof(struct io_timeout_data),
+               .work_flags             = IO_WQ_WORK_MM,
        },
        [IORING_OP_CONNECT] = {
-               .needs_mm               = 1,
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
                .needs_async_data       = 1,
                .async_size             = sizeof(struct io_async_connect),
+               .work_flags             = IO_WQ_WORK_MM,
        },
        [IORING_OP_FALLOCATE] = {
                .needs_file             = 1,
-               .needs_fsize            = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
        },
        [IORING_OP_OPENAT] = {
-               .file_table             = 1,
-               .needs_fs               = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
+                                               IO_WQ_WORK_FS,
        },
        [IORING_OP_CLOSE] = {
                .needs_file             = 1,
                .needs_file_no_error    = 1,
-               .file_table             = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_FILES_UPDATE] = {
-               .needs_mm               = 1,
-               .file_table             = 1,
+               .work_flags             = IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
        },
        [IORING_OP_STATX] = {
-               .needs_mm               = 1,
-               .needs_fs               = 1,
-               .file_table             = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
+                                               IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_READ] = {
-               .needs_mm               = 1,
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
                .buffer_select          = 1,
-               .needs_blkcg            = 1,
                .async_size             = sizeof(struct io_async_rw),
+               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_WRITE] = {
-               .needs_mm               = 1,
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
-               .needs_fsize            = 1,
-               .needs_blkcg            = 1,
                .async_size             = sizeof(struct io_async_rw),
+               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
+                                               IO_WQ_WORK_FSIZE,
        },
        [IORING_OP_FADVISE] = {
                .needs_file             = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_MADVISE] = {
-               .needs_mm               = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_SEND] = {
-               .needs_mm               = 1,
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollout                = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_RECV] = {
-               .needs_mm               = 1,
                .needs_file             = 1,
                .unbound_nonreg_file    = 1,
                .pollin                 = 1,
                .buffer_select          = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_OPENAT2] = {
-               .file_table             = 1,
-               .needs_fs               = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
+                                               IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_EPOLL_CTL] = {
                .unbound_nonreg_file    = 1,
-               .file_table             = 1,
+               .work_flags             = IO_WQ_WORK_FILES,
        },
        [IORING_OP_SPLICE] = {
                .needs_file             = 1,
                .hash_reg_file          = 1,
                .unbound_nonreg_file    = 1,
-               .needs_blkcg            = 1,
+               .work_flags             = IO_WQ_WORK_BLKCG,
        },
        [IORING_OP_PROVIDE_BUFFERS] = {},
        [IORING_OP_REMOVE_BUFFERS] = {},
@@ -963,8 +943,8 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
                             struct io_comp_state *cs);
 static void io_cqring_fill_event(struct io_kiocb *req, long res);
 static void io_put_req(struct io_kiocb *req);
+static void io_put_req_deferred(struct io_kiocb *req, int nr);
 static void io_double_put_req(struct io_kiocb *req);
-static void __io_double_put_req(struct io_kiocb *req);
 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
 static void __io_queue_linked_timeout(struct io_kiocb *req);
 static void io_queue_linked_timeout(struct io_kiocb *req);
@@ -986,7 +966,7 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
 
 static struct kmem_cache *req_cachep;
 
-static const struct file_operations io_uring_fops __read_mostly;
+static const struct file_operations io_uring_fops;
 
 struct sock *io_uring_get_socket(struct file *file)
 {
@@ -1034,7 +1014,7 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
 static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
                                   struct io_kiocb *req)
 {
-       if (!io_op_defs[req->opcode].needs_mm)
+       if (!(io_op_defs[req->opcode].work_flags & IO_WQ_WORK_MM))
                return 0;
        return __io_sq_thread_acquire_mm(ctx);
 }
@@ -1065,17 +1045,54 @@ static inline void req_set_fail_links(struct io_kiocb *req)
                req->flags |= REQ_F_FAIL_LINK;
 }
 
+/*
+ * None of these are dereferenced, they are simply used to check if any of
+ * them have changed. If we're under current and check they are still the
+ * same, we're fine to grab references to them for actual out-of-line use.
+ */
+static void io_init_identity(struct io_identity *id)
+{
+       id->files = current->files;
+       id->mm = current->mm;
+#ifdef CONFIG_BLK_CGROUP
+       rcu_read_lock();
+       id->blkcg_css = blkcg_css();
+       rcu_read_unlock();
+#endif
+       id->creds = current_cred();
+       id->nsproxy = current->nsproxy;
+       id->fs = current->fs;
+       id->fsize = rlimit(RLIMIT_FSIZE);
+#ifdef CONFIG_AUDIT
+       id->loginuid = current->loginuid;
+       id->sessionid = current->sessionid;
+#endif
+       refcount_set(&id->count, 1);
+}
+
+static inline void __io_req_init_async(struct io_kiocb *req)
+{
+       memset(&req->work, 0, sizeof(req->work));
+       req->flags |= REQ_F_WORK_INITIALIZED;
+}
+
 /*
  * Note: must call io_req_init_async() for the first time you
  * touch any members of io_wq_work.
  */
 static inline void io_req_init_async(struct io_kiocb *req)
 {
+       struct io_uring_task *tctx = current->io_uring;
+
        if (req->flags & REQ_F_WORK_INITIALIZED)
                return;
 
-       memset(&req->work, 0, sizeof(req->work));
-       req->flags |= REQ_F_WORK_INITIALIZED;
+       __io_req_init_async(req);
+
+       /* Grab a ref if this isn't our static identity */
+       req->work.identity = tctx->identity;
+       if (tctx->identity != &tctx->__identity)
+               refcount_inc(&req->work.identity->count);
 }
 
 static inline bool io_async_submit(struct io_ring_ctx *ctx)
@@ -1162,7 +1179,7 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
                struct io_ring_ctx *ctx = req->ctx;
 
                return seq != ctx->cached_cq_tail
-                               + atomic_read(&ctx->cached_cq_overflow);
+                               + READ_ONCE(ctx->cached_cq_overflow);
        }
 
        return false;
@@ -1181,105 +1198,198 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
        }
 }
 
-/*
- * Returns true if we need to defer file table putting. This can only happen
- * from the error path with REQ_F_COMP_LOCKED set.
- */
-static bool io_req_clean_work(struct io_kiocb *req)
+static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
+{
+       if (req->work.identity == &tctx->__identity)
+               return;
+       if (refcount_dec_and_test(&req->work.identity->count))
+               kfree(req->work.identity);
+}
+
+static void io_req_clean_work(struct io_kiocb *req)
 {
        if (!(req->flags & REQ_F_WORK_INITIALIZED))
-               return false;
+               return;
 
        req->flags &= ~REQ_F_WORK_INITIALIZED;
 
-       if (req->work.mm) {
-               mmdrop(req->work.mm);
-               req->work.mm = NULL;
+       if (req->work.flags & IO_WQ_WORK_MM) {
+               mmdrop(req->work.identity->mm);
+               req->work.flags &= ~IO_WQ_WORK_MM;
        }
 #ifdef CONFIG_BLK_CGROUP
-       if (req->work.blkcg_css)
-               css_put(req->work.blkcg_css);
+       if (req->work.flags & IO_WQ_WORK_BLKCG) {
+               css_put(req->work.identity->blkcg_css);
+               req->work.flags &= ~IO_WQ_WORK_BLKCG;
+       }
 #endif
-       if (req->work.creds) {
-               put_cred(req->work.creds);
-               req->work.creds = NULL;
+       if (req->work.flags & IO_WQ_WORK_CREDS) {
+               put_cred(req->work.identity->creds);
+               req->work.flags &= ~IO_WQ_WORK_CREDS;
        }
-       if (req->work.fs) {
-               struct fs_struct *fs = req->work.fs;
-
-               if (req->flags & REQ_F_COMP_LOCKED)
-                       return true;
+       if (req->work.flags & IO_WQ_WORK_FS) {
+               struct fs_struct *fs = req->work.identity->fs;
 
-               spin_lock(&req->work.fs->lock);
+               spin_lock(&req->work.identity->fs->lock);
                if (--fs->users)
                        fs = NULL;
-               spin_unlock(&req->work.fs->lock);
+               spin_unlock(&req->work.identity->fs->lock);
                if (fs)
                        free_fs_struct(fs);
-               req->work.fs = NULL;
+               req->work.flags &= ~IO_WQ_WORK_FS;
        }
 
-       return false;
+       io_put_identity(req->task->io_uring, req);
 }
 
-static void io_prep_async_work(struct io_kiocb *req)
+/*
+ * Create a private copy of io_identity, since some fields don't match
+ * the current context.
+ */
+static bool io_identity_cow(struct io_kiocb *req)
+{
+       struct io_uring_task *tctx = current->io_uring;
+       const struct cred *creds = NULL;
+       struct io_identity *id;
+
+       if (req->work.flags & IO_WQ_WORK_CREDS)
+               creds = req->work.identity->creds;
+
+       id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
+       if (unlikely(!id)) {
+               req->work.flags |= IO_WQ_WORK_CANCEL;
+               return false;
+       }
+
+       /*
+        * We can safely just re-init the creds we copied  Either the field
+        * matches the current one, or we haven't grabbed it yet. The only
+        * exception is ->creds, through registered personalities, so handle
+        * that one separately.
+        */
+       io_init_identity(id);
+       if (creds)
+               req->work.identity->creds = creds;
+
+       /* add one for this request */
+       refcount_inc(&id->count);
+
+       /* drop old identity, assign new one. one ref for req, one for tctx */
+       if (req->work.identity != tctx->identity &&
+           refcount_sub_and_test(2, &req->work.identity->count))
+               kfree(req->work.identity);
+
+       req->work.identity = id;
+       tctx->identity = id;
+       return true;
+}
+
+static bool io_grab_identity(struct io_kiocb *req)
 {
        const struct io_op_def *def = &io_op_defs[req->opcode];
+       struct io_identity *id = req->work.identity;
        struct io_ring_ctx *ctx = req->ctx;
 
-       io_req_init_async(req);
-
-       if (req->flags & REQ_F_ISREG) {
-               if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
-                       io_wq_hash_work(&req->work, file_inode(req->file));
-       } else {
-               if (def->unbound_nonreg_file)
-                       req->work.flags |= IO_WQ_WORK_UNBOUND;
+       if (def->work_flags & IO_WQ_WORK_FSIZE) {
+               if (id->fsize != rlimit(RLIMIT_FSIZE))
+                       return false;
+               req->work.flags |= IO_WQ_WORK_FSIZE;
        }
-       if (!req->work.files && io_op_defs[req->opcode].file_table &&
+
+       if (!(req->work.flags & IO_WQ_WORK_FILES) &&
+           (def->work_flags & IO_WQ_WORK_FILES) &&
            !(req->flags & REQ_F_NO_FILE_TABLE)) {
-               req->work.files = get_files_struct(current);
-               get_nsproxy(current->nsproxy);
-               req->work.nsproxy = current->nsproxy;
+               if (id->files != current->files ||
+                   id->nsproxy != current->nsproxy)
+                       return false;
+               atomic_inc(&id->files->count);
+               get_nsproxy(id->nsproxy);
                req->flags |= REQ_F_INFLIGHT;
 
                spin_lock_irq(&ctx->inflight_lock);
                list_add(&req->inflight_entry, &ctx->inflight_list);
                spin_unlock_irq(&ctx->inflight_lock);
-       }
-       if (!req->work.mm && def->needs_mm) {
-               mmgrab(current->mm);
-               req->work.mm = current->mm;
+               req->work.flags |= IO_WQ_WORK_FILES;
        }
 #ifdef CONFIG_BLK_CGROUP
-       if (!req->work.blkcg_css && def->needs_blkcg) {
+       if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
+           (def->work_flags & IO_WQ_WORK_BLKCG)) {
                rcu_read_lock();
-               req->work.blkcg_css = blkcg_css();
+               if (id->blkcg_css != blkcg_css()) {
+                       rcu_read_unlock();
+                       return false;
+               }
                /*
                 * This should be rare, either the cgroup is dying or the task
                 * is moving cgroups. Just punt to root for the handful of ios.
                 */
-               if (!css_tryget_online(req->work.blkcg_css))
-                       req->work.blkcg_css = NULL;
+               if (css_tryget_online(id->blkcg_css))
+                       req->work.flags |= IO_WQ_WORK_BLKCG;
                rcu_read_unlock();
        }
 #endif
-       if (!req->work.creds)
-               req->work.creds = get_current_cred();
-       if (!req->work.fs && def->needs_fs) {
-               spin_lock(&current->fs->lock);
-               if (!current->fs->in_exec) {
-                       req->work.fs = current->fs;
-                       req->work.fs->users++;
+       if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
+               if (id->creds != current_cred())
+                       return false;
+               get_cred(id->creds);
+               req->work.flags |= IO_WQ_WORK_CREDS;
+       }
+#ifdef CONFIG_AUDIT
+       if (!uid_eq(current->loginuid, id->loginuid) ||
+           current->sessionid != id->sessionid)
+               return false;
+#endif
+       if (!(req->work.flags & IO_WQ_WORK_FS) &&
+           (def->work_flags & IO_WQ_WORK_FS)) {
+               if (current->fs != id->fs)
+                       return false;
+               spin_lock(&id->fs->lock);
+               if (!id->fs->in_exec) {
+                       id->fs->users++;
+                       req->work.flags |= IO_WQ_WORK_FS;
                } else {
                        req->work.flags |= IO_WQ_WORK_CANCEL;
                }
                spin_unlock(&current->fs->lock);
        }
-       if (def->needs_fsize)
-               req->work.fsize = rlimit(RLIMIT_FSIZE);
-       else
-               req->work.fsize = RLIM_INFINITY;
+
+       return true;
+}
+
+static void io_prep_async_work(struct io_kiocb *req)
+{
+       const struct io_op_def *def = &io_op_defs[req->opcode];
+       struct io_ring_ctx *ctx = req->ctx;
+       struct io_identity *id;
+
+       io_req_init_async(req);
+       id = req->work.identity;
+
+       if (req->flags & REQ_F_ISREG) {
+               if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
+                       io_wq_hash_work(&req->work, file_inode(req->file));
+       } else {
+               if (def->unbound_nonreg_file)
+                       req->work.flags |= IO_WQ_WORK_UNBOUND;
+       }
+
+       /* ->mm can never change on us */
+       if (!(req->work.flags & IO_WQ_WORK_MM) &&
+           (def->work_flags & IO_WQ_WORK_MM)) {
+               mmgrab(id->mm);
+               req->work.flags |= IO_WQ_WORK_MM;
+       }
+
+       /* if we fail grabbing identity, we must COW, regrab, and retry */
+       if (io_grab_identity(req))
+               return;
+
+       if (!io_identity_cow(req))
+               return;
+
+       /* can't fail at this point */
+       if (!io_grab_identity(req))
+               WARN_ON(1);
 }
 
 static void io_prep_async_link(struct io_kiocb *req)
@@ -1325,9 +1435,8 @@ static void io_kill_timeout(struct io_kiocb *req)
                atomic_set(&req->ctx->cq_timeouts,
                        atomic_read(&req->ctx->cq_timeouts) + 1);
                list_del_init(&req->timeout.list);
-               req->flags |= REQ_F_COMP_LOCKED;
                io_cqring_fill_event(req, 0);
-               io_put_req(req);
+               io_put_req_deferred(req, 1);
        }
 }
 
@@ -1378,8 +1487,7 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
                if (link) {
                        __io_queue_linked_timeout(link);
                        /* drop submission reference */
-                       link->flags |= REQ_F_COMP_LOCKED;
-                       io_put_req(link);
+                       io_put_req_deferred(link, 1);
                }
                kfree(de);
        } while (!list_empty(&ctx->defer_list));
@@ -1471,8 +1579,9 @@ static inline bool io_match_files(struct io_kiocb *req,
 {
        if (!files)
                return true;
-       if (req->flags & REQ_F_WORK_INITIALIZED)
-               return req->work.files == files;
+       if ((req->flags & REQ_F_WORK_INITIALIZED) &&
+           (req->work.flags & IO_WQ_WORK_FILES))
+               return req->work.identity->files == files;
        return false;
 }
 
@@ -1518,8 +1627,9 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
                        WRITE_ONCE(cqe->res, req->result);
                        WRITE_ONCE(cqe->flags, req->compl.cflags);
                } else {
+                       ctx->cached_cq_overflow++;
                        WRITE_ONCE(ctx->rings->cq_overflow,
-                               atomic_inc_return(&ctx->cached_cq_overflow));
+                                  ctx->cached_cq_overflow);
                }
        }
 
@@ -1561,8 +1671,8 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
                 * then we cannot store the request for later flushing, we need
                 * to drop it on the floor.
                 */
-               WRITE_ONCE(ctx->rings->cq_overflow,
-                               atomic_inc_return(&ctx->cached_cq_overflow));
+               ctx->cached_cq_overflow++;
+               WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
        } else {
                if (list_empty(&ctx->cq_overflow_list)) {
                        set_bit(0, &ctx->sq_check_overflow);
@@ -1606,13 +1716,19 @@ static void io_submit_flush_completions(struct io_comp_state *cs)
                req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
                list_del(&req->compl.list);
                __io_cqring_fill_event(req, req->result, req->compl.cflags);
-               if (!(req->flags & REQ_F_LINK_HEAD)) {
-                       req->flags |= REQ_F_COMP_LOCKED;
-                       io_put_req(req);
-               } else {
+
+               /*
+                * io_free_req() doesn't care about completion_lock unless one
+                * of these flags is set. REQ_F_WORK_INITIALIZED is in the list
+                * because of a potential deadlock with req->work.fs->lock
+                */
+               if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT
+                                |REQ_F_WORK_INITIALIZED)) {
                        spin_unlock_irq(&ctx->completion_lock);
                        io_put_req(req);
                        spin_lock_irq(&ctx->completion_lock);
+               } else {
+                       io_put_req(req);
                }
        }
        io_commit_cqring(ctx);
@@ -1699,7 +1815,7 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file,
                fput(file);
 }
 
-static bool io_dismantle_req(struct io_kiocb *req)
+static void io_dismantle_req(struct io_kiocb *req)
 {
        io_clean_op(req);
 
@@ -1708,15 +1824,17 @@ static bool io_dismantle_req(struct io_kiocb *req)
        if (req->file)
                io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
 
-       return io_req_clean_work(req);
+       io_req_clean_work(req);
 }
 
-static void __io_free_req_finish(struct io_kiocb *req)
+static void __io_free_req(struct io_kiocb *req)
 {
        struct io_uring_task *tctx = req->task->io_uring;
        struct io_ring_ctx *ctx = req->ctx;
 
-       atomic_long_inc(&tctx->req_complete);
+       io_dismantle_req(req);
+
+       percpu_counter_dec(&tctx->inflight);
        if (tctx->in_idle)
                wake_up(&tctx->wait);
        put_task_struct(req->task);
@@ -1728,39 +1846,6 @@ static void __io_free_req_finish(struct io_kiocb *req)
        percpu_ref_put(&ctx->refs);
 }
 
-static void io_req_task_file_table_put(struct callback_head *cb)
-{
-       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-       struct fs_struct *fs = req->work.fs;
-
-       spin_lock(&req->work.fs->lock);
-       if (--fs->users)
-               fs = NULL;
-       spin_unlock(&req->work.fs->lock);
-       if (fs)
-               free_fs_struct(fs);
-       req->work.fs = NULL;
-       __io_free_req_finish(req);
-}
-
-static void __io_free_req(struct io_kiocb *req)
-{
-       if (!io_dismantle_req(req)) {
-               __io_free_req_finish(req);
-       } else {
-               int ret;
-
-               init_task_work(&req->task_work, io_req_task_file_table_put);
-               ret = task_work_add(req->task, &req->task_work, TWA_RESUME);
-               if (unlikely(ret)) {
-                       struct task_struct *tsk;
-
-                       tsk = io_wq_get_task(req->ctx->io_wq);
-                       task_work_add(tsk, &req->task_work, 0);
-               }
-       }
-}
-
 static bool io_link_cancel_timeout(struct io_kiocb *req)
 {
        struct io_timeout_data *io = req->async_data;
@@ -1772,7 +1857,7 @@ static bool io_link_cancel_timeout(struct io_kiocb *req)
                io_cqring_fill_event(req, -ECANCELED);
                io_commit_cqring(ctx);
                req->flags &= ~REQ_F_LINK_HEAD;
-               io_put_req(req);
+               io_put_req_deferred(req, 1);
                return true;
        }
 
@@ -1789,9 +1874,14 @@ static bool __io_kill_linked_timeout(struct io_kiocb *req)
        link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
        if (link->opcode != IORING_OP_LINK_TIMEOUT)
                return false;
+       /*
+        * Can happen if a linked timeout fired and link had been like
+        * req -> link t-out -> link t-out [-> ...]
+        */
+       if (!(link->flags & REQ_F_LTIMEOUT_ACTIVE))
+               return false;
 
        list_del_init(&link->link_list);
-       link->flags |= REQ_F_COMP_LOCKED;
        wake_ev = io_link_cancel_timeout(link);
        req->flags &= ~REQ_F_LINK_TIMEOUT;
        return wake_ev;
@@ -1800,17 +1890,12 @@ static bool __io_kill_linked_timeout(struct io_kiocb *req)
 static void io_kill_linked_timeout(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
+       unsigned long flags;
        bool wake_ev;
 
-       if (!(req->flags & REQ_F_COMP_LOCKED)) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&ctx->completion_lock, flags);
-               wake_ev = __io_kill_linked_timeout(req);
-               spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       } else {
-               wake_ev = __io_kill_linked_timeout(req);
-       }
+       spin_lock_irqsave(&ctx->completion_lock, flags);
+       wake_ev = __io_kill_linked_timeout(req);
+       spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        if (wake_ev)
                io_cqring_ev_posted(ctx);
@@ -1838,10 +1923,12 @@ static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
 /*
  * Called if REQ_F_LINK_HEAD is set, and we fail the head request
  */
-static void __io_fail_links(struct io_kiocb *req)
+static void io_fail_links(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
+       unsigned long flags;
 
+       spin_lock_irqsave(&ctx->completion_lock, flags);
        while (!list_empty(&req->link_list)) {
                struct io_kiocb *link = list_first_entry(&req->link_list,
                                                struct io_kiocb, link_list);
@@ -1850,28 +1937,20 @@ static void __io_fail_links(struct io_kiocb *req)
                trace_io_uring_fail_link(req, link);
 
                io_cqring_fill_event(link, -ECANCELED);
-               link->flags |= REQ_F_COMP_LOCKED;
-               __io_double_put_req(link);
-               req->flags &= ~REQ_F_LINK_TIMEOUT;
+
+               /*
+                * It's ok to free under spinlock as they're not linked anymore,
+                * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
+                * work.fs->lock.
+                */
+               if (link->flags & REQ_F_WORK_INITIALIZED)
+                       io_put_req_deferred(link, 2);
+               else
+                       io_double_put_req(link);
        }
 
        io_commit_cqring(ctx);
-       io_cqring_ev_posted(ctx);
-}
-
-static void io_fail_links(struct io_kiocb *req)
-{
-       struct io_ring_ctx *ctx = req->ctx;
-
-       if (!(req->flags & REQ_F_COMP_LOCKED)) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&ctx->completion_lock, flags);
-               __io_fail_links(req);
-               spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       } else {
-               __io_fail_links(req);
-       }
+       spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
        io_cqring_ev_posted(ctx);
 }
@@ -1905,7 +1984,8 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
 {
        struct task_struct *tsk = req->task;
        struct io_ring_ctx *ctx = req->ctx;
-       int ret, notify;
+       enum task_work_notify_mode notify;
+       int ret;
 
        if (tsk->flags & PF_EXITING)
                return -ESRCH;
@@ -1916,7 +1996,7 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
         * processing task_work. There's no reliable way to tell if TWA_RESUME
         * will do the job.
         */
-       notify = 0;
+       notify = TWA_NONE;
        if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
                notify = TWA_SIGNAL;
 
@@ -1985,7 +2065,7 @@ static void io_req_task_queue(struct io_kiocb *req)
 
                init_task_work(&req->task_work, io_req_task_cancel);
                tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, 0);
+               task_work_add(tsk, &req->task_work, TWA_NONE);
                wake_up_process(tsk);
        }
 }
@@ -2033,7 +2113,9 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
        if (rb->to_free)
                __io_req_free_batch_flush(ctx, rb);
        if (rb->task) {
-               atomic_long_add(rb->task_refs, &rb->task->io_uring->req_complete);
+               struct io_uring_task *tctx = rb->task->io_uring;
+
+               percpu_counter_sub(&tctx->inflight, rb->task_refs);
                put_task_struct_many(rb->task, rb->task_refs);
                rb->task = NULL;
        }
@@ -2050,7 +2132,9 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
 
        if (req->task != rb->task) {
                if (rb->task) {
-                       atomic_long_add(rb->task_refs, &rb->task->io_uring->req_complete);
+                       struct io_uring_task *tctx = rb->task->io_uring;
+
+                       percpu_counter_sub(&tctx->inflight, rb->task_refs);
                        put_task_struct_many(rb->task, rb->task_refs);
                }
                rb->task = req->task;
@@ -2058,7 +2142,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
        }
        rb->task_refs++;
 
-       WARN_ON_ONCE(io_dismantle_req(req));
+       io_dismantle_req(req);
        rb->reqs[rb->to_free++] = req;
        if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
                __io_req_free_batch_flush(req->ctx, rb);
@@ -2085,6 +2169,34 @@ static void io_put_req(struct io_kiocb *req)
                io_free_req(req);
 }
 
+static void io_put_req_deferred_cb(struct callback_head *cb)
+{
+       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+
+       io_free_req(req);
+}
+
+static void io_free_req_deferred(struct io_kiocb *req)
+{
+       int ret;
+
+       init_task_work(&req->task_work, io_put_req_deferred_cb);
+       ret = io_req_task_work_add(req, true);
+       if (unlikely(ret)) {
+               struct task_struct *tsk;
+
+               tsk = io_wq_get_task(req->ctx->io_wq);
+               task_work_add(tsk, &req->task_work, TWA_NONE);
+               wake_up_process(tsk);
+       }
+}
+
+static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
+{
+       if (refcount_sub_and_test(refs, &req->refs))
+               io_free_req_deferred(req);
+}
+
 static struct io_wq_work *io_steal_work(struct io_kiocb *req)
 {
        struct io_kiocb *nxt;
@@ -2101,17 +2213,6 @@ static struct io_wq_work *io_steal_work(struct io_kiocb *req)
        return nxt ? &nxt->work : NULL;
 }
 
-/*
- * Must only be used if we don't need to care about links, usually from
- * within the completion handling itself.
- */
-static void __io_double_put_req(struct io_kiocb *req)
-{
-       /* drop both submit and complete references */
-       if (refcount_sub_and_test(2, &req->refs))
-               __io_free_req(req);
-}
-
 static void io_double_put_req(struct io_kiocb *req)
 {
        /* drop both submit and complete references */
@@ -2601,7 +2702,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd)
 static bool io_bdev_nowait(struct block_device *bdev)
 {
 #ifdef CONFIG_BLOCK
-       return !bdev || queue_is_mq(bdev_get_queue(bdev));
+       return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
 #else
        return true;
 #endif
@@ -3016,9 +3117,10 @@ static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
  * For files that don't have ->read_iter() and ->write_iter(), handle them
  * by looping over ->read() or ->write() manually.
  */
-static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
-                          struct iov_iter *iter)
+static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
 {
+       struct kiocb *kiocb = &req->rw.kiocb;
+       struct file *file = req->file;
        ssize_t ret = 0;
 
        /*
@@ -3038,11 +3140,8 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
                if (!iov_iter_is_bvec(iter)) {
                        iovec = iov_iter_iovec(iter);
                } else {
-                       /* fixed buffers import bvec */
-                       iovec.iov_base = kmap(iter->bvec->bv_page)
-                                               + iter->iov_offset;
-                       iovec.iov_len = min(iter->count,
-                                       iter->bvec->bv_len - iter->iov_offset);
+                       iovec.iov_base = u64_to_user_ptr(req->rw.addr);
+                       iovec.iov_len = req->rw.len;
                }
 
                if (rw == READ) {
@@ -3053,9 +3152,6 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
                                               iovec.iov_len, io_kiocb_ppos(kiocb));
                }
 
-               if (iov_iter_is_bvec(iter))
-                       kunmap(iter->bvec->bv_page);
-
                if (nr < 0) {
                        if (!ret)
                                ret = nr;
@@ -3064,6 +3160,8 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
                ret += nr;
                if (nr != iovec.iov_len)
                        break;
+               req->rw.len -= nr;
+               req->rw.addr += nr;
                iov_iter_advance(iter, nr);
        }
 
@@ -3199,7 +3297,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
                /* queue just for cancelation */
                init_task_work(&req->task_work, io_req_task_cancel);
                tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, 0);
+               task_work_add(tsk, &req->task_work, TWA_NONE);
                wake_up_process(tsk);
        }
        return 1;
@@ -3253,7 +3351,7 @@ static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
        if (req->file->f_op->read_iter)
                return call_read_iter(req->file, &req->rw.kiocb, iter);
        else if (req->file->f_op->read)
-               return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
+               return loop_rw_iter(READ, req, iter);
        else
                return -EINVAL;
 }
@@ -3444,7 +3542,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
        if (req->file->f_op->write_iter)
                ret2 = call_write_iter(req->file, kiocb, iter);
        else if (req->file->f_op->write)
-               ret2 = loop_rw_iter(WRITE, req->file, kiocb, iter);
+               ret2 = loop_rw_iter(WRITE, req, iter);
        else
                ret2 = -EINVAL;
 
@@ -4123,7 +4221,7 @@ static int io_close(struct io_kiocb *req, bool force_nonblock,
        }
 
        /* No ->flush() or already async, safely close from here */
-       ret = filp_close(close->put_file, req->work.files);
+       ret = filp_close(close->put_file, req->work.identity->files);
        if (ret < 0)
                req_set_fail_links(req);
        fput(close->put_file);
@@ -4765,7 +4863,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
 
                WRITE_ONCE(poll->canceled, true);
                tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, 0);
+               task_work_add(tsk, &req->task_work, TWA_NONE);
                wake_up_process(tsk);
        }
        return 1;
@@ -4834,33 +4932,25 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
        io_commit_cqring(ctx);
 }
 
-static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
+static void io_poll_task_func(struct callback_head *cb)
 {
+       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
        struct io_ring_ctx *ctx = req->ctx;
+       struct io_kiocb *nxt;
 
        if (io_poll_rewait(req, &req->poll)) {
                spin_unlock_irq(&ctx->completion_lock);
-               return;
-       }
-
-       hash_del(&req->hash_node);
-       io_poll_complete(req, req->result, 0);
-       req->flags |= REQ_F_COMP_LOCKED;
-       *nxt = io_put_req_find_next(req);
-       spin_unlock_irq(&ctx->completion_lock);
-
-       io_cqring_ev_posted(ctx);
-}
+       } else {
+               hash_del(&req->hash_node);
+               io_poll_complete(req, req->result, 0);
+               spin_unlock_irq(&ctx->completion_lock);
 
-static void io_poll_task_func(struct callback_head *cb)
-{
-       struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-       struct io_ring_ctx *ctx = req->ctx;
-       struct io_kiocb *nxt = NULL;
+               nxt = io_put_req_find_next(req);
+               io_cqring_ev_posted(ctx);
+               if (nxt)
+                       __io_req_task_submit(nxt);
+       }
 
-       io_poll_task_handler(req, &nxt);
-       if (nxt)
-               __io_req_task_submit(nxt);
        percpu_ref_put(&ctx->refs);
 }
 
@@ -4917,6 +5007,8 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
         * for write). Setup a separate io_poll_iocb if this happens.
         */
        if (unlikely(poll->head)) {
+               struct io_poll_iocb *poll_one = poll;
+
                /* already have a 2nd entry, fail a third attempt */
                if (*poll_ptr) {
                        pt->error = -EINVAL;
@@ -4927,7 +5019,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
                        pt->error = -ENOMEM;
                        return;
                }
-               io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
+               io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
                refcount_inc(&req->refs);
                poll->wait.private = req;
                *poll_ptr = poll;
@@ -5012,6 +5104,7 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
        struct io_ring_ctx *ctx = req->ctx;
        bool cancel = false;
 
+       INIT_HLIST_NODE(&req->hash_node);
        io_init_poll_iocb(poll, mask, wake_func);
        poll->file = req->file;
        poll->wait.private = req;
@@ -5073,7 +5166,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
 
        req->flags |= REQ_F_POLLED;
        req->apoll = apoll;
-       INIT_HLIST_NODE(&req->hash_node);
 
        mask = 0;
        if (def->pollin)
@@ -5144,9 +5236,8 @@ static bool io_poll_remove_one(struct io_kiocb *req)
        if (do_complete) {
                io_cqring_fill_event(req, -ECANCELED);
                io_commit_cqring(req->ctx);
-               req->flags |= REQ_F_COMP_LOCKED;
                req_set_fail_links(req);
-               io_put_req(req);
+               io_put_req_deferred(req, 1);
        }
 
        return do_complete;
@@ -5256,8 +5347,6 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
                return -EINVAL;
        if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
                return -EINVAL;
-       if (!poll->file)
-               return -EBADF;
 
        events = READ_ONCE(sqe->poll32_events);
 #ifdef __BIG_ENDIAN
@@ -5275,7 +5364,6 @@ static int io_poll_add(struct io_kiocb *req)
        struct io_poll_table ipt;
        __poll_t mask;
 
-       INIT_HLIST_NODE(&req->hash_node);
        ipt.pt._qproc = io_poll_queue_proc;
 
        mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
@@ -5328,9 +5416,8 @@ static int __io_timeout_cancel(struct io_kiocb *req)
        list_del_init(&req->timeout.list);
 
        req_set_fail_links(req);
-       req->flags |= REQ_F_COMP_LOCKED;
        io_cqring_fill_event(req, -ECANCELED);
-       io_put_req(req);
+       io_put_req_deferred(req, 1);
        return 0;
 }
 
@@ -5740,9 +5827,9 @@ static void io_req_drop_files(struct io_kiocb *req)
                wake_up(&ctx->inflight_wait);
        spin_unlock_irqrestore(&ctx->inflight_lock, flags);
        req->flags &= ~REQ_F_INFLIGHT;
-       put_files_struct(req->work.files);
-       put_nsproxy(req->work.nsproxy);
-       req->work.files = NULL;
+       put_files_struct(req->work.identity->files);
+       put_nsproxy(req->work.identity->nsproxy);
+       req->work.flags &= ~IO_WQ_WORK_FILES;
 }
 
 static void __io_clean_op(struct io_kiocb *req)
@@ -6026,10 +6113,9 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
        if (!list_empty(&req->link_list)) {
                prev = list_entry(req->link_list.prev, struct io_kiocb,
                                  link_list);
-               if (refcount_inc_not_zero(&prev->refs)) {
+               if (refcount_inc_not_zero(&prev->refs))
                        list_del_init(&req->link_list);
-                       prev->flags &= ~REQ_F_LINK_TIMEOUT;
-               } else
+               else
                        prev = NULL;
        }
 
@@ -6086,6 +6172,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
        if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
                return NULL;
 
+       nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
        req->flags |= REQ_F_LINK_TIMEOUT;
        return nxt;
 }
@@ -6100,14 +6187,15 @@ static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
 again:
        linked_timeout = io_prep_linked_timeout(req);
 
-       if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
-           req->work.creds != current_cred()) {
+       if ((req->flags & REQ_F_WORK_INITIALIZED) &&
+           (req->work.flags & IO_WQ_WORK_CREDS) &&
+           req->work.identity->creds != current_cred()) {
                if (old_creds)
                        revert_creds(old_creds);
-               if (old_creds == req->work.creds)
+               if (old_creds == req->work.identity->creds)
                        old_creds = NULL; /* restored original creds */
                else
-                       old_creds = override_creds(req->work.creds);
+                       old_creds = override_creds(req->work.identity->creds);
        }
 
        ret = io_issue_sqe(req, true, cs);
@@ -6148,8 +6236,10 @@ punt:
        if (nxt) {
                req = nxt;
 
-               if (req->flags & REQ_F_FORCE_ASYNC)
+               if (req->flags & REQ_F_FORCE_ASYNC) {
+                       linked_timeout = NULL;
                        goto punt;
+               }
                goto again;
        }
 exit:
@@ -6410,11 +6500,17 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
 
        id = READ_ONCE(sqe->personality);
        if (id) {
-               io_req_init_async(req);
-               req->work.creds = idr_find(&ctx->personality_idr, id);
-               if (unlikely(!req->work.creds))
+               struct io_identity *iod;
+
+               iod = idr_find(&ctx->personality_idr, id);
+               if (unlikely(!iod))
                        return -EINVAL;
-               get_cred(req->work.creds);
+               refcount_inc(&iod->count);
+
+               __io_req_init_async(req);
+               get_cred(iod->creds);
+               req->work.identity = iod;
+               req->work.flags |= IO_WQ_WORK_CREDS;
        }
 
        /* same numerical values with corresponding REQ_F_*, safe to copy */
@@ -6447,7 +6543,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
        if (!percpu_ref_tryget_many(&ctx->refs, nr))
                return -EAGAIN;
 
-       atomic_long_add(nr, &current->io_uring->req_issue);
+       percpu_counter_add(&current->io_uring->inflight, nr);
        refcount_add(nr, &current->usage);
 
        io_submit_state_start(&state, ctx, nr);
@@ -6489,10 +6585,12 @@ fail_req:
 
        if (unlikely(submitted != nr)) {
                int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
+               struct io_uring_task *tctx = current->io_uring;
+               int unused = nr - ref_used;
 
-               percpu_ref_put_many(&ctx->refs, nr - ref_used);
-               atomic_long_sub(nr - ref_used, &current->io_uring->req_issue);
-               put_task_struct_many(current, nr - ref_used);
+               percpu_ref_put_many(&ctx->refs, unused);
+               percpu_counter_sub(&tctx->inflight, unused);
+               put_task_struct_many(current, unused);
        }
        if (link)
                io_queue_link_head(link, &state.comp);
@@ -6672,6 +6770,10 @@ static int io_sq_thread(void *data)
                                old_cred = override_creds(ctx->creds);
                        }
                        io_sq_thread_associate_blkcg(ctx, &cur_css);
+#ifdef CONFIG_AUDIT
+                       current->loginuid = ctx->loginuid;
+                       current->sessionid = ctx->sessionid;
+#endif
 
                        ret |= __io_sq_thread(ctx, start_jiffies, cap_entries);
 
@@ -7306,7 +7408,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
        spin_lock_init(&file_data->lock);
 
        nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
-       file_data->table = kcalloc(nr_tables, sizeof(file_data->table),
+       file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
                                   GFP_KERNEL);
        if (!file_data->table)
                goto out_free;
@@ -7317,6 +7419,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
 
        if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
                goto out_ref;
+       ctx->file_data = file_data;
 
        for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
                struct fixed_file_table *table;
@@ -7351,7 +7454,6 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                table->files[index] = file;
        }
 
-       ctx->file_data = file_data;
        ret = io_sqe_files_scm(ctx);
        if (ret) {
                io_sqe_files_unregister(ctx);
@@ -7384,6 +7486,7 @@ out_ref:
 out_free:
        kfree(file_data->table);
        kfree(file_data);
+       ctx->file_data = NULL;
        return ret;
 }
 
@@ -7609,17 +7712,24 @@ out_fput:
 static int io_uring_alloc_task_context(struct task_struct *task)
 {
        struct io_uring_task *tctx;
+       int ret;
 
        tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
        if (unlikely(!tctx))
                return -ENOMEM;
 
+       ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
+       if (unlikely(ret)) {
+               kfree(tctx);
+               return ret;
+       }
+
        xa_init(&tctx->xa);
        init_waitqueue_head(&tctx->wait);
        tctx->last = NULL;
        tctx->in_idle = 0;
-       atomic_long_set(&tctx->req_issue, 0);
-       atomic_long_set(&tctx->req_complete, 0);
+       io_init_identity(&tctx->__identity);
+       tctx->identity = &tctx->__identity;
        task->io_uring = tctx;
        return 0;
 }
@@ -7629,6 +7739,10 @@ void __io_uring_free(struct task_struct *tsk)
        struct io_uring_task *tctx = tsk->io_uring;
 
        WARN_ON_ONCE(!xa_empty(&tctx->xa));
+       WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
+       if (tctx->identity != &tctx->__identity)
+               kfree(tctx->identity);
+       percpu_counter_destroy(&tctx->inflight);
        kfree(tctx);
        tsk->io_uring = NULL;
 }
@@ -8205,11 +8319,14 @@ static int io_uring_fasync(int fd, struct file *file, int on)
 static int io_remove_personalities(int id, void *p, void *data)
 {
        struct io_ring_ctx *ctx = data;
-       const struct cred *cred;
+       struct io_identity *iod;
 
-       cred = idr_remove(&ctx->personality_idr, id);
-       if (cred)
-               put_cred(cred);
+       iod = idr_remove(&ctx->personality_idr, id);
+       if (iod) {
+               put_cred(iod->creds);
+               if (refcount_dec_and_test(&iod->count))
+                       kfree(iod);
+       }
        return 0;
 }
 
@@ -8281,7 +8398,8 @@ static bool io_wq_files_match(struct io_wq_work *work, void *data)
 {
        struct files_struct *files = data;
 
-       return !files || work->files == files;
+       return !files || ((work->flags & IO_WQ_WORK_FILES) &&
+                               work->identity->files == files);
 }
 
 /*
@@ -8436,7 +8554,8 @@ static bool io_uring_cancel_files(struct io_ring_ctx *ctx,
 
                spin_lock_irq(&ctx->inflight_lock);
                list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
-                       if (files && req->work.files != files)
+                       if (files && (req->work.flags & IO_WQ_WORK_FILES) &&
+                           req->work.identity->files != files)
                                continue;
                        /* req is being completed, ignore */
                        if (!refcount_inc_not_zero(&req->refs))
@@ -8564,19 +8683,11 @@ static void io_uring_del_task_file(struct file *file)
                fput(file);
 }
 
-static void __io_uring_attempt_task_drop(struct file *file)
-{
-       struct file *old = xa_load(&current->io_uring->xa, (unsigned long)file);
-
-       if (old == file)
-               io_uring_del_task_file(file);
-}
-
 /*
  * Drop task note for this file if we're the only ones that hold it after
  * pending fput()
  */
-static void io_uring_attempt_task_drop(struct file *file, bool exiting)
+static void io_uring_attempt_task_drop(struct file *file)
 {
        if (!current->io_uring)
                return;
@@ -8584,10 +8695,9 @@ static void io_uring_attempt_task_drop(struct file *file, bool exiting)
         * fput() is pending, will be 2 if the only other ref is our potential
         * task file note. If the task is exiting, drop regardless of count.
         */
-       if (!exiting && atomic_long_read(&file->f_count) != 2)
-               return;
-
-       __io_uring_attempt_task_drop(file);
+       if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
+           atomic_long_read(&file->f_count) == 2)
+               io_uring_del_task_file(file);
 }
 
 void __io_uring_files_cancel(struct files_struct *files)
@@ -8608,12 +8718,6 @@ void __io_uring_files_cancel(struct files_struct *files)
        }
 }
 
-static inline bool io_uring_task_idle(struct io_uring_task *tctx)
-{
-       return atomic_long_read(&tctx->req_issue) ==
-               atomic_long_read(&tctx->req_complete);
-}
-
 /*
  * Find any io_uring fd that this task has registered or done IO on, and cancel
  * requests.
@@ -8622,14 +8726,16 @@ void __io_uring_task_cancel(void)
 {
        struct io_uring_task *tctx = current->io_uring;
        DEFINE_WAIT(wait);
-       long completions;
+       s64 inflight;
 
        /* make sure overflow events are dropped */
        tctx->in_idle = true;
 
-       while (!io_uring_task_idle(tctx)) {
+       do {
                /* read completions before cancelations */
-               completions = atomic_long_read(&tctx->req_complete);
+               inflight = percpu_counter_sum(&tctx->inflight);
+               if (!inflight)
+                       break;
                __io_uring_files_cancel(NULL);
 
                prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
@@ -8638,12 +8744,10 @@ void __io_uring_task_cancel(void)
                 * If we've seen completions, retry. This avoids a race where
                 * a completion comes in before we did prepare_to_wait().
                 */
-               if (completions != atomic_long_read(&tctx->req_complete))
+               if (inflight != percpu_counter_sum(&tctx->inflight))
                        continue;
-               if (io_uring_task_idle(tctx))
-                       break;
                schedule();
-       }
+       } while (1);
 
        finish_wait(&tctx->wait, &wait);
        tctx->in_idle = false;
@@ -8651,16 +8755,7 @@ void __io_uring_task_cancel(void)
 
 static int io_uring_flush(struct file *file, void *data)
 {
-       struct io_ring_ctx *ctx = file->private_data;
-
-       /*
-        * If the task is going away, cancel work it may have pending
-        */
-       if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
-               data = NULL;
-
-       io_uring_cancel_task_requests(ctx, data);
-       io_uring_attempt_task_drop(file, !data);
+       io_uring_attempt_task_drop(file);
        return 0;
 }
 
@@ -9109,7 +9204,10 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
        ctx->compat = in_compat_syscall();
        ctx->user = user;
        ctx->creds = get_current_cred();
-
+#ifdef CONFIG_AUDIT
+       ctx->loginuid = current->loginuid;
+       ctx->sessionid = current->sessionid;
+#endif
        ctx->sqo_task = get_task_struct(current);
 
        /*
@@ -9277,23 +9375,33 @@ out:
 
 static int io_register_personality(struct io_ring_ctx *ctx)
 {
-       const struct cred *creds = get_current_cred();
-       int id;
+       struct io_identity *id;
+       int ret;
+
+       id = kmalloc(sizeof(*id), GFP_KERNEL);
+       if (unlikely(!id))
+               return -ENOMEM;
 
-       id = idr_alloc_cyclic(&ctx->personality_idr, (void *) creds, 1,
-                               USHRT_MAX, GFP_KERNEL);
-       if (id < 0)
-               put_cred(creds);
-       return id;
+       io_init_identity(id);
+       id->creds = get_current_cred();
+
+       ret = idr_alloc_cyclic(&ctx->personality_idr, id, 1, USHRT_MAX, GFP_KERNEL);
+       if (ret < 0) {
+               put_cred(id->creds);
+               kfree(id);
+       }
+       return ret;
 }
 
 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
 {
-       const struct cred *old_creds;
+       struct io_identity *iod;
 
-       old_creds = idr_remove(&ctx->personality_idr, id);
-       if (old_creds) {
-               put_cred(old_creds);
+       iod = idr_remove(&ctx->personality_idr, id);
+       if (iod) {
+               put_cred(iod->creds);
+               if (refcount_dec_and_test(&iod->count))
+                       kfree(iod);
                return 0;
        }