#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/fs_struct.h>
#include <linux/splice.h>
#include <linux/task_work.h>
+#include <linux/pagemap.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
*/
u32 sq_dropped;
/*
- * Runtime flags
+ * Runtime SQ flags
*
* Written by the kernel, shouldn't be modified by the
* application.
* for IORING_SQ_NEED_WAKEUP after updating the sq tail.
*/
u32 sq_flags;
+ /*
+ * Runtime CQ flags
+ *
+ * Written by the application, shouldn't be modified by the
+ * kernel.
+ */
+ u32 cq_flags;
/*
* Number of completion events lost because the queue was full;
* this should be avoided by the application by making sure
struct list_head node;
struct list_head file_list;
struct fixed_file_data *file_data;
- struct work_struct work;
+ struct llist_node llist;
};
struct fixed_file_data {
struct {
unsigned int flags;
unsigned int compat: 1;
- unsigned int account_mem: 1;
+ unsigned int limit_mem: 1;
unsigned int cq_overflow_flushed: 1;
unsigned int drain_next: 1;
unsigned int eventfd_async: 1;
const struct cred *creds;
- /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
- struct completion *completions;
+ struct completion ref_comp;
+ struct completion sq_thread_comp;
/* if all else fails... */
struct io_kiocb *fallback_req;
spinlock_t completion_lock;
/*
- * ->poll_list is protected by the ctx->uring_lock for
+ * ->iopoll_list is protected by the ctx->uring_lock for
* io_uring instances that don't use IORING_SETUP_SQPOLL.
* For SQPOLL, only the single threaded io_sq_thread() will
* manipulate the list, hence no extra locking is needed there.
*/
- struct list_head poll_list;
+ struct list_head iopoll_list;
struct hlist_head *cancel_hash;
unsigned cancel_hash_bits;
bool poll_multi_file;
struct list_head inflight_list;
} ____cacheline_aligned_in_smp;
+ struct delayed_work file_put_work;
+ struct llist_head file_put_llist;
+
struct work_struct exit_work;
};
struct file *file;
u64 addr;
int flags;
- u32 count;
+ u32 off;
+ u32 target_seq;
+ struct list_head list;
};
struct io_rw {
struct io_sr_msg {
struct file *file;
union {
- struct user_msghdr __user *msg;
+ struct user_msghdr __user *umsg;
void __user *buf;
};
int msg_flags;
struct io_open {
struct file *file;
int dfd;
- union {
- unsigned mask;
- };
struct filename *filename;
- struct statx __user *buffer;
struct open_how how;
unsigned long nofile;
};
__u16 bid;
};
+struct io_statx {
+ struct file *file;
+ int dfd;
+ unsigned int mask;
+ unsigned int flags;
+ const char __user *filename;
+ struct statx __user *buffer;
+};
+
+struct io_completion {
+ struct file *file;
+ struct list_head list;
+ int cflags;
+};
+
struct io_async_connect {
struct sockaddr_storage address;
};
struct iovec *iov;
ssize_t nr_segs;
ssize_t size;
+ struct wait_page_queue wpq;
};
struct io_async_ctx {
REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
REQ_F_LINK_HEAD_BIT,
- REQ_F_LINK_NEXT_BIT,
REQ_F_FAIL_LINK_BIT,
REQ_F_INFLIGHT_BIT,
REQ_F_CUR_POS_BIT,
REQ_F_NOWAIT_BIT,
- REQ_F_IOPOLL_COMPLETED_BIT,
REQ_F_LINK_TIMEOUT_BIT,
- REQ_F_TIMEOUT_BIT,
REQ_F_ISREG_BIT,
- REQ_F_MUST_PUNT_BIT,
- REQ_F_TIMEOUT_NOSEQ_BIT,
REQ_F_COMP_LOCKED_BIT,
REQ_F_NEED_CLEANUP_BIT,
REQ_F_OVERFLOW_BIT,
REQ_F_POLLED_BIT,
REQ_F_BUFFER_SELECTED_BIT,
REQ_F_NO_FILE_TABLE_BIT,
+ REQ_F_WORK_INITIALIZED_BIT,
+ REQ_F_TASK_PINNED_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
/* head of a link */
REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT),
- /* already grabbed next link */
- REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT),
/* fail rest of links */
REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT),
/* on inflight list */
REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
/* must not punt to workers */
REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
- /* polled IO has completed */
- REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
/* has linked timeout */
REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
- /* timeout request */
- REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT),
/* regular file */
REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
- /* must be punted even for NONBLOCK */
- REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT),
- /* no timeout sequence */
- REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
/* completion under lock */
REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
/* needs cleanup */
REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
/* doesn't need file table for this request */
REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT),
+ /* io_wq_work is initialized */
+ REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT),
+ /* req->task is refcounted */
+ REQ_F_TASK_PINNED = BIT(REQ_F_TASK_PINNED_BIT),
};
struct async_poll {
struct io_poll_iocb poll;
- struct io_wq_work work;
+ struct io_poll_iocb *double_poll;
};
/*
struct io_epoll epoll;
struct io_splice splice;
struct io_provide_buf pbuf;
+ struct io_statx statx;
+ /* use only after cleaning per-op data, see io_clean_op() */
+ struct io_completion compl;
};
struct io_async_ctx *io;
- int cflags;
- bool needs_fixed_file;
u8 opcode;
+ /* polled IO has completed */
+ u8 iopoll_completed;
u16 buf_index;
+ u32 result;
- struct io_ring_ctx *ctx;
- struct list_head list;
- unsigned int flags;
- refcount_t refs;
- struct task_struct *task;
- unsigned long fsize;
- u64 user_data;
- u32 result;
- u32 sequence;
-
- struct list_head link_list;
+ struct io_ring_ctx *ctx;
+ unsigned int flags;
+ refcount_t refs;
+ struct task_struct *task;
+ u64 user_data;
- struct list_head inflight_entry;
+ struct list_head link_list;
- struct percpu_ref *fixed_file_refs;
+ /*
+ * 1. used with ctx->iopoll_list with reads/writes
+ * 2. to track reqs with ->files (see io_op_def::file_table)
+ */
+ struct list_head inflight_entry;
+
+ struct percpu_ref *fixed_file_refs;
+ struct callback_head task_work;
+ /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
+ struct hlist_node hash_node;
+ struct async_poll *apoll;
+ struct io_wq_work work;
+};
- union {
- /*
- * Only commands that never go async can use the below fields,
- * obviously. Right now only IORING_OP_POLL_ADD uses them, and
- * async armed poll handlers for regular commands. The latter
- * restore the work, if needed.
- */
- struct {
- struct callback_head task_work;
- struct hlist_node hash_node;
- struct async_poll *apoll;
- };
- struct io_wq_work work;
- };
+struct io_defer_entry {
+ struct list_head list;
+ struct io_kiocb *req;
+ u32 seq;
};
-#define IO_PLUG_THRESHOLD 2
#define IO_IOPOLL_BATCH 8
+struct io_comp_state {
+ unsigned int nr;
+ struct list_head list;
+ struct io_ring_ctx *ctx;
+};
+
struct io_submit_state {
struct blk_plug plug;
void *reqs[IO_IOPOLL_BATCH];
unsigned int free_reqs;
+ /*
+ * Batch completion logic
+ */
+ struct io_comp_state comp;
+
/*
* File reference cache
*/
struct file *file;
unsigned int fd;
unsigned int has_refs;
- unsigned int used_refs;
unsigned int ios_left;
};
unsigned needs_mm : 1;
/* needs req->file assigned */
unsigned needs_file : 1;
+ /* don't fail if file grab fails */
+ unsigned needs_file_no_error : 1;
/* hash wq insertion if file is a regular file */
unsigned hash_reg_file : 1;
/* unbound wq insertion if file is a non-regular file */
unsigned pollout : 1;
/* op supports buffer selection */
unsigned buffer_select : 1;
+ unsigned needs_fsize : 1;
};
static const struct io_op_def io_op_defs[] = {
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
+ .needs_fsize = 1,
},
[IORING_OP_FSYNC] = {
.needs_file = 1,
.hash_reg_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
+ .needs_fsize = 1,
},
[IORING_OP_POLL_ADD] = {
.needs_file = 1,
},
[IORING_OP_FALLOCATE] = {
.needs_file = 1,
+ .needs_fsize = 1,
},
[IORING_OP_OPENAT] = {
.file_table = 1,
},
[IORING_OP_CLOSE] = {
.needs_file = 1,
+ .needs_file_no_error = 1,
.file_table = 1,
},
[IORING_OP_FILES_UPDATE] = {
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
+ .needs_fsize = 1,
},
[IORING_OP_FADVISE] = {
.needs_file = 1,
},
[IORING_OP_PROVIDE_BUFFERS] = {},
[IORING_OP_REMOVE_BUFFERS] = {},
+ [IORING_OP_TEE] = {
+ .needs_file = 1,
+ .hash_reg_file = 1,
+ .unbound_nonreg_file = 1,
+ },
+};
+
+enum io_mem_account {
+ ACCT_LOCKED,
+ ACCT_PINNED,
};
-static void io_wq_submit_work(struct io_wq_work **workptr);
+static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
+ struct io_comp_state *cs);
static void io_cqring_fill_event(struct io_kiocb *req, long res);
static void io_put_req(struct io_kiocb *req);
+static void io_double_put_req(struct io_kiocb *req);
static void __io_double_put_req(struct io_kiocb *req);
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
+static void __io_queue_linked_timeout(struct io_kiocb *req);
static void io_queue_linked_timeout(struct io_kiocb *req);
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_files_update *ip,
unsigned nr_args);
-static int io_grab_files(struct io_kiocb *req);
-static void io_cleanup_req(struct io_kiocb *req);
+static int io_prep_work_files(struct io_kiocb *req);
+static void __io_clean_op(struct io_kiocb *req);
static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
int fd, struct file **out_file, bool fixed);
static void __io_queue_sqe(struct io_kiocb *req,
- const struct io_uring_sqe *sqe);
+ const struct io_uring_sqe *sqe,
+ struct io_comp_state *cs);
+static void io_file_put_work(struct work_struct *work);
+
+static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
+ struct iovec **iovec, struct iov_iter *iter,
+ bool needs_lock);
+static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
+ struct iovec *iovec, struct iovec *fast_iov,
+ struct iov_iter *iter);
static struct kmem_cache *req_cachep;
}
EXPORT_SYMBOL(io_uring_get_socket);
+static void io_get_req_task(struct io_kiocb *req)
+{
+ if (req->flags & REQ_F_TASK_PINNED)
+ return;
+ get_task_struct(req->task);
+ req->flags |= REQ_F_TASK_PINNED;
+}
+
+static inline void io_clean_op(struct io_kiocb *req)
+{
+ if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
+ __io_clean_op(req);
+}
+
+/* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */
+static void __io_put_req_task(struct io_kiocb *req)
+{
+ if (req->flags & REQ_F_TASK_PINNED)
+ put_task_struct(req->task);
+}
+
+static void io_sq_thread_drop_mm(void)
+{
+ struct mm_struct *mm = current->mm;
+
+ if (mm) {
+ kthread_unuse_mm(mm);
+ mmput(mm);
+ }
+}
+
+static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
+{
+ if (!current->mm) {
+ if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) ||
+ !mmget_not_zero(ctx->sqo_mm)))
+ return -EFAULT;
+ kthread_use_mm(ctx->sqo_mm);
+ }
+
+ return 0;
+}
+
+static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
+{
+ if (!io_op_defs[req->opcode].needs_mm)
+ return 0;
+ return __io_sq_thread_acquire_mm(ctx);
+}
+
+static inline void req_set_fail_links(struct io_kiocb *req)
+{
+ if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
+ req->flags |= REQ_F_FAIL_LINK;
+}
+
+/*
+ * Note: must call io_req_init_async() for the first time you
+ * touch any members of io_wq_work.
+ */
+static inline void io_req_init_async(struct io_kiocb *req)
+{
+ if (req->flags & REQ_F_WORK_INITIALIZED)
+ return;
+
+ memset(&req->work, 0, sizeof(req->work));
+ req->flags |= REQ_F_WORK_INITIALIZED;
+}
+
+static inline bool io_async_submit(struct io_ring_ctx *ctx)
+{
+ return ctx->flags & IORING_SETUP_SQPOLL;
+}
+
static void io_ring_ctx_ref_free(struct percpu_ref *ref)
{
struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
- complete(&ctx->completions[0]);
+ complete(&ctx->ref_comp);
+}
+
+static inline bool io_is_timeout_noseq(struct io_kiocb *req)
+{
+ return !req->timeout.off;
}
static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
if (!ctx->fallback_req)
goto err;
- ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
- if (!ctx->completions)
- goto err;
-
/*
* Use 5 bits less than the max cq entries, that should give us around
* 32 entries per hash list if totally full and uniformly spread.
init_waitqueue_head(&ctx->sqo_wait);
init_waitqueue_head(&ctx->cq_wait);
INIT_LIST_HEAD(&ctx->cq_overflow_list);
- init_completion(&ctx->completions[0]);
- init_completion(&ctx->completions[1]);
+ init_completion(&ctx->ref_comp);
+ init_completion(&ctx->sq_thread_comp);
idr_init(&ctx->io_buffer_idr);
idr_init(&ctx->personality_idr);
mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait);
spin_lock_init(&ctx->completion_lock);
- INIT_LIST_HEAD(&ctx->poll_list);
+ INIT_LIST_HEAD(&ctx->iopoll_list);
INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list);
init_waitqueue_head(&ctx->inflight_wait);
spin_lock_init(&ctx->inflight_lock);
INIT_LIST_HEAD(&ctx->inflight_list);
+ INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
+ init_llist_head(&ctx->file_put_llist);
return ctx;
err:
if (ctx->fallback_req)
kmem_cache_free(req_cachep, ctx->fallback_req);
- kfree(ctx->completions);
kfree(ctx->cancel_hash);
kfree(ctx);
return NULL;
}
-static inline bool __req_need_defer(struct io_kiocb *req)
+static bool req_need_defer(struct io_kiocb *req, u32 seq)
{
- struct io_ring_ctx *ctx = req->ctx;
+ if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
+ struct io_ring_ctx *ctx = req->ctx;
- return req->sequence != ctx->cached_cq_tail
+ return seq != ctx->cached_cq_tail
+ atomic_read(&ctx->cached_cq_overflow);
-}
-
-static inline bool req_need_defer(struct io_kiocb *req)
-{
- if (unlikely(req->flags & REQ_F_IO_DRAIN))
- return __req_need_defer(req);
-
- return false;
-}
-
-static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
-{
- struct io_kiocb *req;
-
- req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
- if (req && !req_need_defer(req)) {
- list_del_init(&req->list);
- return req;
- }
-
- return NULL;
-}
-
-static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
-{
- struct io_kiocb *req;
-
- req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
- if (req) {
- if (req->flags & REQ_F_TIMEOUT_NOSEQ)
- return NULL;
- if (!__req_need_defer(req)) {
- list_del_init(&req->list);
- return req;
- }
}
- return NULL;
+ return false;
}
static void __io_commit_cqring(struct io_ring_ctx *ctx)
}
}
-static inline void io_req_work_grab_env(struct io_kiocb *req,
- const struct io_op_def *def)
+/*
+ * Returns true if we need to defer file table putting. This can only happen
+ * from the error path with REQ_F_COMP_LOCKED set.
+ */
+static bool io_req_clean_work(struct io_kiocb *req)
{
- if (!req->work.mm && def->needs_mm) {
- mmgrab(current->mm);
- req->work.mm = current->mm;
- }
- if (!req->work.creds)
- req->work.creds = get_current_cred();
- if (!req->work.fs && def->needs_fs) {
- spin_lock(¤t->fs->lock);
- if (!current->fs->in_exec) {
- req->work.fs = current->fs;
- req->work.fs->users++;
- } else {
- req->work.flags |= IO_WQ_WORK_CANCEL;
- }
- spin_unlock(¤t->fs->lock);
- }
- if (!req->work.task_pid)
- req->work.task_pid = task_pid_vnr(current);
-}
+ if (!(req->flags & REQ_F_WORK_INITIALIZED))
+ return false;
+
+ req->flags &= ~REQ_F_WORK_INITIALIZED;
-static inline void io_req_work_drop_env(struct io_kiocb *req)
-{
if (req->work.mm) {
mmdrop(req->work.mm);
req->work.mm = NULL;
if (req->work.fs) {
struct fs_struct *fs = req->work.fs;
+ if (req->flags & REQ_F_COMP_LOCKED)
+ return true;
+
spin_lock(&req->work.fs->lock);
if (--fs->users)
fs = NULL;
spin_unlock(&req->work.fs->lock);
if (fs)
free_fs_struct(fs);
+ req->work.fs = NULL;
}
+
+ return false;
}
-static inline void io_prep_async_work(struct io_kiocb *req,
- struct io_kiocb **link)
+static void io_prep_async_work(struct io_kiocb *req)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
+ io_req_init_async(req);
+
if (req->flags & REQ_F_ISREG) {
if (def->hash_reg_file)
io_wq_hash_work(&req->work, file_inode(req->file));
if (def->unbound_nonreg_file)
req->work.flags |= IO_WQ_WORK_UNBOUND;
}
+ if (!req->work.mm && def->needs_mm) {
+ mmgrab(current->mm);
+ req->work.mm = current->mm;
+ }
+ if (!req->work.creds)
+ req->work.creds = get_current_cred();
+ if (!req->work.fs && def->needs_fs) {
+ spin_lock(¤t->fs->lock);
+ if (!current->fs->in_exec) {
+ req->work.fs = current->fs;
+ req->work.fs->users++;
+ } else {
+ req->work.flags |= IO_WQ_WORK_CANCEL;
+ }
+ spin_unlock(¤t->fs->lock);
+ }
+ if (def->needs_fsize)
+ req->work.fsize = rlimit(RLIMIT_FSIZE);
+ else
+ req->work.fsize = RLIM_INFINITY;
+}
- io_req_work_grab_env(req, def);
+static void io_prep_async_link(struct io_kiocb *req)
+{
+ struct io_kiocb *cur;
- *link = io_prep_linked_timeout(req);
+ io_prep_async_work(req);
+ if (req->flags & REQ_F_LINK_HEAD)
+ list_for_each_entry(cur, &req->link_list, link_list)
+ io_prep_async_work(cur);
}
-static inline void io_queue_async_work(struct io_kiocb *req)
+static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *link;
-
- io_prep_async_work(req, &link);
+ struct io_kiocb *link = io_prep_linked_timeout(req);
trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
&req->work, req->flags);
io_wq_enqueue(ctx->io_wq, &req->work);
+ return link;
+}
+
+static void io_queue_async_work(struct io_kiocb *req)
+{
+ struct io_kiocb *link;
+
+ /* init ->work of the whole link before punting */
+ io_prep_async_link(req);
+ link = __io_queue_async_work(req);
if (link)
io_queue_linked_timeout(link);
ret = hrtimer_try_to_cancel(&req->io->timeout.timer);
if (ret != -1) {
- atomic_inc(&req->ctx->cq_timeouts);
- list_del_init(&req->list);
+ atomic_set(&req->ctx->cq_timeouts,
+ atomic_read(&req->ctx->cq_timeouts) + 1);
+ list_del_init(&req->timeout.list);
req->flags |= REQ_F_COMP_LOCKED;
io_cqring_fill_event(req, 0);
io_put_req(req);
struct io_kiocb *req, *tmp;
spin_lock_irq(&ctx->completion_lock);
- list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
+ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list)
io_kill_timeout(req);
spin_unlock_irq(&ctx->completion_lock);
}
-static void io_commit_cqring(struct io_ring_ctx *ctx)
+static void __io_queue_deferred(struct io_ring_ctx *ctx)
{
- struct io_kiocb *req;
+ do {
+ struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
+ struct io_defer_entry, list);
+ struct io_kiocb *link;
+
+ if (req_need_defer(de->req, de->seq))
+ break;
+ list_del_init(&de->list);
+ /* punt-init is done before queueing for defer */
+ link = __io_queue_async_work(de->req);
+ if (link) {
+ __io_queue_linked_timeout(link);
+ /* drop submission reference */
+ link->flags |= REQ_F_COMP_LOCKED;
+ io_put_req(link);
+ }
+ kfree(de);
+ } while (!list_empty(&ctx->defer_list));
+}
+
+static void io_flush_timeouts(struct io_ring_ctx *ctx)
+{
+ while (!list_empty(&ctx->timeout_list)) {
+ struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
+ struct io_kiocb, timeout.list);
+
+ if (io_is_timeout_noseq(req))
+ break;
+ if (req->timeout.target_seq != ctx->cached_cq_tail
+ - atomic_read(&ctx->cq_timeouts))
+ break;
- while ((req = io_get_timeout_req(ctx)) != NULL)
+ list_del_init(&req->timeout.list);
io_kill_timeout(req);
+ }
+}
+static void io_commit_cqring(struct io_ring_ctx *ctx)
+{
+ io_flush_timeouts(ctx);
__io_commit_cqring(ctx);
- while ((req = io_get_deferred_req(ctx)) != NULL)
- io_queue_async_work(req);
+ if (unlikely(!list_empty(&ctx->defer_list)))
+ __io_queue_deferred(ctx);
}
static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
{
if (!ctx->cq_ev_fd)
return false;
+ if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
+ return false;
if (!ctx->eventfd_async)
return true;
return io_wq_current_is_worker();
eventfd_signal(ctx->cq_ev_fd, 1);
}
+static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
+{
+ if (list_empty(&ctx->cq_overflow_list)) {
+ clear_bit(0, &ctx->sq_check_overflow);
+ clear_bit(0, &ctx->cq_check_overflow);
+ ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
+ }
+}
+
/* Returns true if there are no backlogged entries after the flush */
static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
break;
req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
- list);
- list_move(&req->list, &list);
+ compl.list);
+ list_move(&req->compl.list, &list);
req->flags &= ~REQ_F_OVERFLOW;
if (cqe) {
WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, req->result);
- WRITE_ONCE(cqe->flags, req->cflags);
+ WRITE_ONCE(cqe->flags, req->compl.cflags);
} else {
WRITE_ONCE(ctx->rings->cq_overflow,
atomic_inc_return(&ctx->cached_cq_overflow));
}
io_commit_cqring(ctx);
- if (cqe) {
- clear_bit(0, &ctx->sq_check_overflow);
- clear_bit(0, &ctx->cq_check_overflow);
- }
+ io_cqring_mark_overflow(ctx);
+
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
while (!list_empty(&list)) {
- req = list_first_entry(&list, struct io_kiocb, list);
- list_del(&req->list);
+ req = list_first_entry(&list, struct io_kiocb, compl.list);
+ list_del(&req->compl.list);
io_put_req(req);
}
if (list_empty(&ctx->cq_overflow_list)) {
set_bit(0, &ctx->sq_check_overflow);
set_bit(0, &ctx->cq_check_overflow);
+ ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
}
+ io_clean_op(req);
req->flags |= REQ_F_OVERFLOW;
- refcount_inc(&req->refs);
req->result = res;
- req->cflags = cflags;
- list_add_tail(&req->list, &ctx->cq_overflow_list);
+ req->compl.cflags = cflags;
+ refcount_inc(&req->refs);
+ list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
}
}
__io_cqring_fill_event(req, res, 0);
}
-static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
+static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
io_cqring_ev_posted(ctx);
}
-static void io_cqring_add_event(struct io_kiocb *req, long res)
+static void io_submit_flush_completions(struct io_comp_state *cs)
+{
+ struct io_ring_ctx *ctx = cs->ctx;
+
+ spin_lock_irq(&ctx->completion_lock);
+ while (!list_empty(&cs->list)) {
+ struct io_kiocb *req;
+
+ req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
+ list_del(&req->compl.list);
+ __io_cqring_fill_event(req, req->result, req->compl.cflags);
+ if (!(req->flags & REQ_F_LINK_HEAD)) {
+ req->flags |= REQ_F_COMP_LOCKED;
+ io_put_req(req);
+ } else {
+ spin_unlock_irq(&ctx->completion_lock);
+ io_put_req(req);
+ spin_lock_irq(&ctx->completion_lock);
+ }
+ }
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ io_cqring_ev_posted(ctx);
+ cs->nr = 0;
+}
+
+static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
+ struct io_comp_state *cs)
+{
+ if (!cs) {
+ io_cqring_add_event(req, res, cflags);
+ io_put_req(req);
+ } else {
+ io_clean_op(req);
+ req->result = res;
+ req->compl.cflags = cflags;
+ list_add_tail(&req->compl.list, &cs->list);
+ if (++cs->nr >= 32)
+ io_submit_flush_completions(cs);
+ }
+}
+
+static void io_req_complete(struct io_kiocb *req, long res)
{
- __io_cqring_add_event(req, res, 0);
+ __io_req_complete(req, res, 0, NULL);
}
static inline bool io_is_fallback_req(struct io_kiocb *req)
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct io_kiocb *req;
- if (!state) {
- req = kmem_cache_alloc(req_cachep, gfp);
- if (unlikely(!req))
- goto fallback;
- } else if (!state->free_reqs) {
+ if (!state->free_reqs) {
size_t sz;
int ret;
fput(file);
}
-static void __io_req_aux_free(struct io_kiocb *req)
+static bool io_dismantle_req(struct io_kiocb *req)
{
- if (req->flags & REQ_F_NEED_CLEANUP)
- io_cleanup_req(req);
+ io_clean_op(req);
- kfree(req->io);
+ if (req->io)
+ kfree(req->io);
if (req->file)
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
- if (req->task)
- put_task_struct(req->task);
-
- io_req_work_drop_env(req);
-}
-
-static void __io_free_req(struct io_kiocb *req)
-{
- __io_req_aux_free(req);
if (req->flags & REQ_F_INFLIGHT) {
struct io_ring_ctx *ctx = req->ctx;
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
- percpu_ref_put(&req->ctx->refs);
+ return io_req_clean_work(req);
+}
+
+static void __io_free_req_finish(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ __io_put_req_task(req);
if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req);
else
- clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req);
+ clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
+ percpu_ref_put(&ctx->refs);
}
-struct req_batch {
- void *reqs[IO_IOPOLL_BATCH];
- int to_free;
- int need_iter;
-};
-
-static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
+static void io_req_task_file_table_put(struct callback_head *cb)
{
- if (!rb->to_free)
- return;
- if (rb->need_iter) {
- int i, inflight = 0;
- unsigned long flags;
+ struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ struct fs_struct *fs = req->work.fs;
- for (i = 0; i < rb->to_free; i++) {
- struct io_kiocb *req = rb->reqs[i];
+ spin_lock(&req->work.fs->lock);
+ if (--fs->users)
+ fs = NULL;
+ spin_unlock(&req->work.fs->lock);
+ if (fs)
+ free_fs_struct(fs);
+ req->work.fs = NULL;
+ __io_free_req_finish(req);
+}
- if (req->flags & REQ_F_INFLIGHT)
- inflight++;
- __io_req_aux_free(req);
- }
- if (!inflight)
- goto do_free;
+static void __io_free_req(struct io_kiocb *req)
+{
+ if (!io_dismantle_req(req)) {
+ __io_free_req_finish(req);
+ } else {
+ int ret;
- spin_lock_irqsave(&ctx->inflight_lock, flags);
- for (i = 0; i < rb->to_free; i++) {
- struct io_kiocb *req = rb->reqs[i];
+ init_task_work(&req->task_work, io_req_task_file_table_put);
+ ret = task_work_add(req->task, &req->task_work, TWA_RESUME);
+ if (unlikely(ret)) {
+ struct task_struct *tsk;
- if (req->flags & REQ_F_INFLIGHT) {
- list_del(&req->inflight_entry);
- if (!--inflight)
- break;
- }
+ tsk = io_wq_get_task(req->ctx->io_wq);
+ task_work_add(tsk, &req->task_work, 0);
}
- spin_unlock_irqrestore(&ctx->inflight_lock, flags);
-
- if (waitqueue_active(&ctx->inflight_wait))
- wake_up(&ctx->inflight_wait);
}
-do_free:
- kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
- percpu_ref_put_many(&ctx->refs, rb->to_free);
- rb->to_free = rb->need_iter = 0;
}
static bool io_link_cancel_timeout(struct io_kiocb *req)
return false;
}
-static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
+static bool __io_kill_linked_timeout(struct io_kiocb *req)
+{
+ struct io_kiocb *link;
+ bool wake_ev;
+
+ if (list_empty(&req->link_list))
+ return false;
+ link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
+ if (link->opcode != IORING_OP_LINK_TIMEOUT)
+ return false;
+
+ list_del_init(&link->link_list);
+ link->flags |= REQ_F_COMP_LOCKED;
+ wake_ev = io_link_cancel_timeout(link);
+ req->flags &= ~REQ_F_LINK_TIMEOUT;
+ return wake_ev;
+}
+
+static void io_kill_linked_timeout(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- bool wake_ev = false;
+ bool wake_ev;
- /* Already got next link */
- if (req->flags & REQ_F_LINK_NEXT)
- return;
+ if (!(req->flags & REQ_F_COMP_LOCKED)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ wake_ev = __io_kill_linked_timeout(req);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ } else {
+ wake_ev = __io_kill_linked_timeout(req);
+ }
+
+ if (wake_ev)
+ io_cqring_ev_posted(ctx);
+}
+
+static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
+{
+ struct io_kiocb *nxt;
/*
* The list should never be empty when we are called here. But could
* potentially happen if the chain is messed up, check to be on the
* safe side.
*/
- while (!list_empty(&req->link_list)) {
- struct io_kiocb *nxt = list_first_entry(&req->link_list,
- struct io_kiocb, link_list);
-
- if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) &&
- (nxt->flags & REQ_F_TIMEOUT))) {
- list_del_init(&nxt->link_list);
- wake_ev |= io_link_cancel_timeout(nxt);
- req->flags &= ~REQ_F_LINK_TIMEOUT;
- continue;
- }
-
- list_del_init(&req->link_list);
- if (!list_empty(&nxt->link_list))
- nxt->flags |= REQ_F_LINK_HEAD;
- *nxtptr = nxt;
- break;
- }
+ if (unlikely(list_empty(&req->link_list)))
+ return NULL;
- req->flags |= REQ_F_LINK_NEXT;
- if (wake_ev)
- io_cqring_ev_posted(ctx);
+ nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
+ list_del_init(&req->link_list);
+ if (!list_empty(&nxt->link_list))
+ nxt->flags |= REQ_F_LINK_HEAD;
+ return nxt;
}
/*
* Called if REQ_F_LINK_HEAD is set, and we fail the head request
*/
-static void io_fail_links(struct io_kiocb *req)
+static void __io_fail_links(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&ctx->completion_lock, flags);
while (!list_empty(&req->link_list)) {
struct io_kiocb *link = list_first_entry(&req->link_list,
list_del_init(&link->link_list);
trace_io_uring_fail_link(req, link);
- if ((req->flags & REQ_F_LINK_TIMEOUT) &&
- link->opcode == IORING_OP_LINK_TIMEOUT) {
- io_link_cancel_timeout(link);
- } else {
- io_cqring_fill_event(link, -ECANCELED);
- __io_double_put_req(link);
- }
+ io_cqring_fill_event(link, -ECANCELED);
+ link->flags |= REQ_F_COMP_LOCKED;
+ __io_double_put_req(link);
req->flags &= ~REQ_F_LINK_TIMEOUT;
}
io_commit_cqring(ctx);
- spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
}
-static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
+static void io_fail_links(struct io_kiocb *req)
{
- if (likely(!(req->flags & REQ_F_LINK_HEAD)))
- return;
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!(req->flags & REQ_F_COMP_LOCKED)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ __io_fail_links(req);
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ } else {
+ __io_fail_links(req);
+ }
+
+ io_cqring_ev_posted(ctx);
+}
+
+static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
+{
+ req->flags &= ~REQ_F_LINK_HEAD;
+ if (req->flags & REQ_F_LINK_TIMEOUT)
+ io_kill_linked_timeout(req);
/*
* If LINK is set, we have dependent requests in this chain. If we
* dependencies to the next request. In case of failure, fail the rest
* of the chain.
*/
- if (req->flags & REQ_F_FAIL_LINK) {
- io_fail_links(req);
- } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
- REQ_F_LINK_TIMEOUT) {
- struct io_ring_ctx *ctx = req->ctx;
- unsigned long flags;
+ if (likely(!(req->flags & REQ_F_FAIL_LINK)))
+ return io_req_link_next(req);
+ io_fail_links(req);
+ return NULL;
+}
- /*
- * If this is a timeout link, we could be racing with the
- * timeout timer. Grab the completion lock for this case to
- * protect against that.
- */
- spin_lock_irqsave(&ctx->completion_lock, flags);
- io_req_link_next(req, nxt);
- spin_unlock_irqrestore(&ctx->completion_lock, flags);
+static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
+{
+ if (likely(!(req->flags & REQ_F_LINK_HEAD)))
+ return NULL;
+ return __io_req_find_next(req);
+}
+
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
+{
+ struct task_struct *tsk = req->task;
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret, notify;
+
+ /*
+ * SQPOLL kernel thread doesn't need notification, just a wakeup. For
+ * all other cases, use TWA_SIGNAL unconditionally to ensure we're
+ * processing task_work. There's no reliable way to tell if TWA_RESUME
+ * will do the job.
+ */
+ notify = 0;
+ if (!(ctx->flags & IORING_SETUP_SQPOLL))
+ notify = TWA_SIGNAL;
+
+ ret = task_work_add(tsk, cb, notify);
+ if (!ret)
+ wake_up_process(tsk);
+
+ return ret;
+}
+
+static void __io_req_task_cancel(struct io_kiocb *req, int error)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock_irq(&ctx->completion_lock);
+ io_cqring_fill_event(req, error);
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ io_cqring_ev_posted(ctx);
+ req_set_fail_links(req);
+ io_double_put_req(req);
+}
+
+static void io_req_task_cancel(struct callback_head *cb)
+{
+ struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+
+ __io_req_task_cancel(req, -ECANCELED);
+}
+
+static void __io_req_task_submit(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (!__io_sq_thread_acquire_mm(ctx)) {
+ mutex_lock(&ctx->uring_lock);
+ __io_queue_sqe(req, NULL, NULL);
+ mutex_unlock(&ctx->uring_lock);
} else {
- io_req_link_next(req, nxt);
+ __io_req_task_cancel(req, -EFAULT);
}
}
-static void io_free_req(struct io_kiocb *req)
+static void io_req_task_submit(struct callback_head *cb)
{
- struct io_kiocb *nxt = NULL;
+ struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ struct io_ring_ctx *ctx = req->ctx;
- io_req_find_next(req, &nxt);
- __io_free_req(req);
+ __io_req_task_submit(req);
+ percpu_ref_put(&ctx->refs);
+}
+
+static void io_req_task_queue(struct io_kiocb *req)
+{
+ int ret;
+
+ init_task_work(&req->task_work, io_req_task_submit);
+ percpu_ref_get(&req->ctx->refs);
+
+ ret = io_req_task_work_add(req, &req->task_work);
+ if (unlikely(ret)) {
+ struct task_struct *tsk;
+
+ init_task_work(&req->task_work, io_req_task_cancel);
+ tsk = io_wq_get_task(req->ctx->io_wq);
+ task_work_add(tsk, &req->task_work, 0);
+ wake_up_process(tsk);
+ }
+}
+
+static void io_queue_next(struct io_kiocb *req)
+{
+ struct io_kiocb *nxt = io_req_find_next(req);
if (nxt)
- io_queue_async_work(nxt);
+ io_req_task_queue(nxt);
}
-static void io_link_work_cb(struct io_wq_work **workptr)
+static void io_free_req(struct io_kiocb *req)
{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct io_kiocb *link;
+ io_queue_next(req);
+ __io_free_req(req);
+}
- link = list_first_entry(&req->link_list, struct io_kiocb, link_list);
- io_queue_linked_timeout(link);
- io_wq_submit_work(workptr);
+struct req_batch {
+ void *reqs[IO_IOPOLL_BATCH];
+ int to_free;
+
+ struct task_struct *task;
+ int task_refs;
+};
+
+static inline void io_init_req_batch(struct req_batch *rb)
+{
+ rb->to_free = 0;
+ rb->task_refs = 0;
+ rb->task = NULL;
}
-static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt)
+static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
+ struct req_batch *rb)
{
- struct io_kiocb *link;
- const struct io_op_def *def = &io_op_defs[nxt->opcode];
+ kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
+ percpu_ref_put_many(&ctx->refs, rb->to_free);
+ rb->to_free = 0;
+}
- if ((nxt->flags & REQ_F_ISREG) && def->hash_reg_file)
- io_wq_hash_work(&nxt->work, file_inode(nxt->file));
+static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
+ struct req_batch *rb)
+{
+ if (rb->to_free)
+ __io_req_free_batch_flush(ctx, rb);
+ if (rb->task) {
+ put_task_struct_many(rb->task, rb->task_refs);
+ rb->task = NULL;
+ }
+}
- *workptr = &nxt->work;
- link = io_prep_linked_timeout(nxt);
- if (link)
- nxt->work.func = io_link_work_cb;
+static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
+{
+ if (unlikely(io_is_fallback_req(req))) {
+ io_free_req(req);
+ return;
+ }
+ if (req->flags & REQ_F_LINK_HEAD)
+ io_queue_next(req);
+
+ if (req->flags & REQ_F_TASK_PINNED) {
+ if (req->task != rb->task) {
+ if (rb->task)
+ put_task_struct_many(rb->task, rb->task_refs);
+ rb->task = req->task;
+ rb->task_refs = 0;
+ }
+ rb->task_refs++;
+ req->flags &= ~REQ_F_TASK_PINNED;
+ }
+
+ WARN_ON_ONCE(io_dismantle_req(req));
+ rb->reqs[rb->to_free++] = req;
+ if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
+ __io_req_free_batch_flush(req->ctx, rb);
}
/*
* Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request.
*/
-__attribute__((nonnull))
-static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
+static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
{
+ struct io_kiocb *nxt = NULL;
+
if (refcount_dec_and_test(&req->refs)) {
- io_req_find_next(req, nxtptr);
+ nxt = io_req_find_next(req);
__io_free_req(req);
}
+ return nxt;
}
static void io_put_req(struct io_kiocb *req)
io_free_req(req);
}
-static void io_steal_work(struct io_kiocb *req,
- struct io_wq_work **workptr)
+static struct io_wq_work *io_steal_work(struct io_kiocb *req)
{
+ struct io_kiocb *nxt;
+
/*
- * It's in an io-wq worker, so there always should be at least
- * one reference, which will be dropped in io_put_work() just
- * after the current handler returns.
- *
- * It also means, that if the counter dropped to 1, then there is
- * no asynchronous users left, so it's safe to steal the next work.
+ * A ref is owned by io-wq in which context we're. So, if that's the
+ * last one, it's safe to steal next work. False negatives are Ok,
+ * it just will be re-punted async in io_put_work()
*/
- if (refcount_read(&req->refs) == 1) {
- struct io_kiocb *nxt = NULL;
+ if (refcount_read(&req->refs) != 1)
+ return NULL;
- io_req_find_next(req, &nxt);
- if (nxt)
- io_wq_assign_next(workptr, nxt);
- }
+ nxt = io_req_find_next(req);
+ return nxt ? &nxt->work : NULL;
}
/*
return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
}
-static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
+static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
{
- if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req))
- return false;
-
- if (req->file || req->io)
- rb->need_iter++;
+ unsigned int cflags;
- rb->reqs[rb->to_free++] = req;
- if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
- io_free_req_many(req->ctx, rb);
- return true;
+ cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
+ cflags |= IORING_CQE_F_BUFFER;
+ req->flags &= ~REQ_F_BUFFER_SELECTED;
+ kfree(kbuf);
+ return cflags;
}
-static int io_put_kbuf(struct io_kiocb *req)
+static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
{
struct io_buffer *kbuf;
- int cflags;
kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
- cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
- cflags |= IORING_CQE_F_BUFFER;
- req->rw.addr = 0;
- kfree(kbuf);
- return cflags;
+ return io_put_kbuf(req, kbuf);
+}
+
+static inline bool io_run_task_work(void)
+{
+ if (current->task_works) {
+ __set_current_state(TASK_RUNNING);
+ task_work_run();
+ return true;
+ }
+
+ return false;
+}
+
+static void io_iopoll_queue(struct list_head *again)
+{
+ struct io_kiocb *req;
+
+ do {
+ req = list_first_entry(again, struct io_kiocb, inflight_entry);
+ list_del(&req->inflight_entry);
+ __io_complete_rw(req, -EAGAIN, 0, NULL);
+ } while (!list_empty(again));
}
/*
{
struct req_batch rb;
struct io_kiocb *req;
+ LIST_HEAD(again);
+
+ /* order with ->result store in io_complete_rw_iopoll() */
+ smp_rmb();
- rb.to_free = rb.need_iter = 0;
+ io_init_req_batch(&rb);
while (!list_empty(done)) {
int cflags = 0;
- req = list_first_entry(done, struct io_kiocb, list);
- list_del(&req->list);
+ req = list_first_entry(done, struct io_kiocb, inflight_entry);
+ if (READ_ONCE(req->result) == -EAGAIN) {
+ req->iopoll_completed = 0;
+ list_move_tail(&req->inflight_entry, &again);
+ continue;
+ }
+ list_del(&req->inflight_entry);
if (req->flags & REQ_F_BUFFER_SELECTED)
- cflags = io_put_kbuf(req);
+ cflags = io_put_rw_kbuf(req);
__io_cqring_fill_event(req, req->result, cflags);
(*nr_events)++;
- if (refcount_dec_and_test(&req->refs) &&
- !io_req_multi_free(&rb, req))
- io_free_req(req);
+ if (refcount_dec_and_test(&req->refs))
+ io_req_free_batch(&rb, req);
}
io_commit_cqring(ctx);
if (ctx->flags & IORING_SETUP_SQPOLL)
io_cqring_ev_posted(ctx);
- io_free_req_many(ctx, &rb);
-}
-
-static void io_iopoll_queue(struct list_head *again)
-{
- struct io_kiocb *req;
+ io_req_free_batch_finish(ctx, &rb);
- do {
- req = list_first_entry(again, struct io_kiocb, list);
- list_del(&req->list);
- refcount_inc(&req->refs);
- io_queue_async_work(req);
- } while (!list_empty(again));
+ if (!list_empty(&again))
+ io_iopoll_queue(&again);
}
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
{
struct io_kiocb *req, *tmp;
LIST_HEAD(done);
- LIST_HEAD(again);
bool spin;
int ret;
spin = !ctx->poll_multi_file && *nr_events < min;
ret = 0;
- list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
+ list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
struct kiocb *kiocb = &req->rw.kiocb;
/*
* If we find a request that requires polling, break out
* and complete those lists first, if we have entries there.
*/
- if (req->flags & REQ_F_IOPOLL_COMPLETED) {
- list_move_tail(&req->list, &done);
+ if (READ_ONCE(req->iopoll_completed)) {
+ list_move_tail(&req->inflight_entry, &done);
continue;
}
if (!list_empty(&done))
break;
- if (req->result == -EAGAIN) {
- list_move_tail(&req->list, &again);
- continue;
- }
- if (!list_empty(&again))
- break;
-
ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
if (ret < 0)
break;
+ /* iopoll may have completed current req */
+ if (READ_ONCE(req->iopoll_completed))
+ list_move_tail(&req->inflight_entry, &done);
+
if (ret && spin)
spin = false;
ret = 0;
if (!list_empty(&done))
io_iopoll_complete(ctx, nr_events, &done);
- if (!list_empty(&again))
- io_iopoll_queue(&again);
-
return ret;
}
static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
long min)
{
- while (!list_empty(&ctx->poll_list) && !need_resched()) {
+ while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
int ret;
ret = io_do_iopoll(ctx, nr_events, min);
if (ret < 0)
return ret;
- if (!min || *nr_events >= min)
+ if (*nr_events >= min)
return 0;
}
* We can't just wait for polled events to come to us, we have to actively
* find and complete them.
*/
-static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
+static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
{
if (!(ctx->flags & IORING_SETUP_IOPOLL))
return;
mutex_lock(&ctx->uring_lock);
- while (!list_empty(&ctx->poll_list)) {
+ while (!list_empty(&ctx->iopoll_list)) {
unsigned int nr_events = 0;
- io_iopoll_getevents(ctx, &nr_events, 1);
+ io_do_iopoll(ctx, &nr_events, 0);
+ /* let it sleep and repeat later if can't complete a request */
+ if (nr_events == 0)
+ break;
/*
* Ensure we allow local-to-the-cpu processing to take place,
* in this case we need to ensure that we reap all events.
+ * Also let task_work, etc. to progress by releasing the mutex
*/
- cond_resched();
+ if (need_resched()) {
+ mutex_unlock(&ctx->uring_lock);
+ cond_resched();
+ mutex_lock(&ctx->uring_lock);
+ }
}
mutex_unlock(&ctx->uring_lock);
}
-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
- long min)
+static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
{
+ unsigned int nr_events = 0;
int iters = 0, ret = 0;
/*
*/
mutex_lock(&ctx->uring_lock);
do {
- int tmin = 0;
-
/*
* Don't enter poll loop if we already have events pending.
* If we do, we can potentially be spinning for commands that
*/
if (!(++iters & 7)) {
mutex_unlock(&ctx->uring_lock);
+ io_run_task_work();
mutex_lock(&ctx->uring_lock);
}
- if (*nr_events < min)
- tmin = min - *nr_events;
-
- ret = io_iopoll_getevents(ctx, nr_events, tmin);
+ ret = io_iopoll_getevents(ctx, &nr_events, min);
if (ret <= 0)
break;
ret = 0;
- } while (min && !*nr_events && !need_resched());
+ } while (min && !nr_events && !need_resched());
mutex_unlock(&ctx->uring_lock);
return ret;
file_end_write(req->file);
}
-static inline void req_set_fail_links(struct io_kiocb *req)
+static void io_complete_rw_common(struct kiocb *kiocb, long res,
+ struct io_comp_state *cs)
{
- if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
- req->flags |= REQ_F_FAIL_LINK;
+ struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
+ int cflags = 0;
+
+ if (kiocb->ki_flags & IOCB_WRITE)
+ kiocb_end_write(req);
+
+ if (res != req->result)
+ req_set_fail_links(req);
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ cflags = io_put_rw_kbuf(req);
+ __io_req_complete(req, res, cflags, cs);
+}
+
+#ifdef CONFIG_BLOCK
+static bool io_resubmit_prep(struct io_kiocb *req, int error)
+{
+ struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
+ ssize_t ret = -ECANCELED;
+ struct iov_iter iter;
+ int rw;
+
+ if (error) {
+ ret = error;
+ goto end_req;
+ }
+
+ switch (req->opcode) {
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
+ rw = READ;
+ break;
+ case IORING_OP_WRITEV:
+ case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
+ rw = WRITE;
+ break;
+ default:
+ printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n",
+ req->opcode);
+ goto end_req;
+ }
+
+ ret = io_import_iovec(rw, req, &iovec, &iter, false);
+ if (ret < 0)
+ goto end_req;
+ ret = io_setup_async_rw(req, ret, iovec, inline_vecs, &iter);
+ if (!ret)
+ return true;
+ kfree(iovec);
+end_req:
+ req_set_fail_links(req);
+ io_req_complete(req, ret);
+ return false;
+}
+
+static void io_rw_resubmit(struct callback_head *cb)
+{
+ struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ struct io_ring_ctx *ctx = req->ctx;
+ int err;
+
+ err = io_sq_thread_acquire_mm(ctx, req);
+
+ if (io_resubmit_prep(req, err)) {
+ refcount_inc(&req->refs);
+ io_queue_async_work(req);
+ }
+
+ percpu_ref_put(&ctx->refs);
+}
+#endif
+
+static bool io_rw_reissue(struct io_kiocb *req, long res)
+{
+#ifdef CONFIG_BLOCK
+ int ret;
+
+ if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
+ return false;
+
+ init_task_work(&req->task_work, io_rw_resubmit);
+ percpu_ref_get(&req->ctx->refs);
+
+ ret = io_req_task_work_add(req, &req->task_work);
+ if (!ret)
+ return true;
+#endif
+ return false;
}
-static void io_complete_rw_common(struct kiocb *kiocb, long res)
+static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
+ struct io_comp_state *cs)
{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- int cflags = 0;
-
- if (kiocb->ki_flags & IOCB_WRITE)
- kiocb_end_write(req);
-
- if (res != req->result)
- req_set_fail_links(req);
- if (req->flags & REQ_F_BUFFER_SELECTED)
- cflags = io_put_kbuf(req);
- __io_cqring_add_event(req, res, cflags);
+ if (!io_rw_reissue(req, res))
+ io_complete_rw_common(&req->rw.kiocb, res, cs);
}
static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
- io_complete_rw_common(kiocb, res);
- io_put_req(req);
+ __io_complete_rw(req, res, res2, NULL);
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
- if (res != req->result)
+ if (res != -EAGAIN && res != req->result)
req_set_fail_links(req);
- req->result = res;
- if (res != -EAGAIN)
- req->flags |= REQ_F_IOPOLL_COMPLETED;
+
+ WRITE_ONCE(req->result, res);
+ /* order with io_poll_complete() checking ->result */
+ smp_wmb();
+ WRITE_ONCE(req->iopoll_completed, 1);
}
/*
* how we do polling eventually, not spinning if we're on potentially
* different devices.
*/
- if (list_empty(&ctx->poll_list)) {
+ if (list_empty(&ctx->iopoll_list)) {
ctx->poll_multi_file = false;
} else if (!ctx->poll_multi_file) {
struct io_kiocb *list_req;
- list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
- list);
+ list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
+ inflight_entry);
if (list_req->file != req->file)
ctx->poll_multi_file = true;
}
* For fast devices, IO may have already completed. If it has, add
* it to the front so we find it first.
*/
- if (req->flags & REQ_F_IOPOLL_COMPLETED)
- list_add(&req->list, &ctx->poll_list);
+ if (READ_ONCE(req->iopoll_completed))
+ list_add(&req->inflight_entry, &ctx->iopoll_list);
else
- list_add_tail(&req->list, &ctx->poll_list);
+ list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
if ((ctx->flags & IORING_SETUP_SQPOLL) &&
wq_has_sleeper(&ctx->sqo_wait))
wake_up(&ctx->sqo_wait);
}
-static void io_file_put(struct io_submit_state *state)
+static void __io_state_file_put(struct io_submit_state *state)
{
- if (state->file) {
- int diff = state->has_refs - state->used_refs;
+ if (state->has_refs)
+ fput_many(state->file, state->has_refs);
+ state->file = NULL;
+}
- if (diff)
- fput_many(state->file, diff);
- state->file = NULL;
- }
+static inline void io_state_file_put(struct io_submit_state *state)
+{
+ if (state->file)
+ __io_state_file_put(state);
}
/*
if (state->file) {
if (state->fd == fd) {
- state->used_refs++;
+ state->has_refs--;
state->ios_left--;
return state->file;
}
- io_file_put(state);
+ __io_state_file_put(state);
}
state->file = fget_many(fd, state->ios_left);
if (!state->file)
return NULL;
state->fd = fd;
- state->has_refs = state->ios_left;
- state->used_refs = 1;
state->ios_left--;
+ state->has_refs = state->ios_left;
return state->file;
}
+static bool io_bdev_nowait(struct block_device *bdev)
+{
+#ifdef CONFIG_BLOCK
+ return !bdev || queue_is_mq(bdev_get_queue(bdev));
+#else
+ return true;
+#endif
+}
+
/*
* If we tracked the file through the SCM inflight mechanism, we could support
* any file. For now, just ensure that anything potentially problematic is done
{
umode_t mode = file_inode(file)->i_mode;
- if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode))
+ if (S_ISBLK(mode)) {
+ if (io_bdev_nowait(file->f_inode->i_bdev))
+ return true;
+ return false;
+ }
+ if (S_ISCHR(mode) || S_ISSOCK(mode))
return true;
- if (S_ISREG(mode) && file->f_op != &io_uring_fops)
+ if (S_ISREG(mode)) {
+ if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
+ file->f_op != &io_uring_fops)
+ return true;
+ return false;
+ }
+
+ /* any ->read/write should understand O_NONBLOCK */
+ if (file->f_flags & O_NONBLOCK)
return true;
if (!(file->f_mode & FMODE_NOWAIT))
kiocb->ki_ioprio = get_current_ioprio();
/* don't allow async punt if RWF_NOWAIT was requested */
- if ((kiocb->ki_flags & IOCB_NOWAIT) ||
- (req->file->f_flags & O_NONBLOCK))
+ if (kiocb->ki_flags & IOCB_NOWAIT)
req->flags |= REQ_F_NOWAIT;
+ if (kiocb->ki_flags & IOCB_DIRECT)
+ io_get_req_task(req);
+
if (force_nonblock)
kiocb->ki_flags |= IOCB_NOWAIT;
kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll;
- req->result = 0;
+ req->iopoll_completed = 0;
+ io_get_req_task(req);
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
}
}
-static void kiocb_done(struct kiocb *kiocb, ssize_t ret)
+static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
+ struct io_comp_state *cs)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = kiocb->ki_pos;
if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
- io_complete_rw(kiocb, ret, 0);
+ __io_complete_rw(req, ret, 0, cs);
else
io_rw_done(kiocb, ret);
}
static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
bool needs_lock)
{
- if (req->flags & REQ_F_BUFFER_SELECTED)
+ if (req->flags & REQ_F_BUFFER_SELECTED) {
+ struct io_buffer *kbuf;
+
+ kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
+ iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
+ iov[0].iov_len = kbuf->len;
return 0;
+ }
if (!req->rw.len)
return 0;
else if (req->rw.len > 1)
if (req->io) {
struct io_async_rw *iorw = &req->io->rw;
- *iovec = iorw->iov;
- iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size);
- if (iorw->iov == iorw->fast_iov)
- *iovec = NULL;
+ iov_iter_init(iter, rw, iorw->iov, iorw->nr_segs, iorw->size);
+ *iovec = NULL;
return iorw->size;
}
struct iovec *iovec, struct iovec *fast_iov,
struct iov_iter *iter)
{
- req->io->rw.nr_segs = iter->nr_segs;
- req->io->rw.size = io_size;
- req->io->rw.iov = iovec;
- if (!req->io->rw.iov) {
- req->io->rw.iov = req->io->rw.fast_iov;
- if (req->io->rw.iov != fast_iov)
- memcpy(req->io->rw.iov, fast_iov,
+ struct io_async_rw *rw = &req->io->rw;
+
+ rw->nr_segs = iter->nr_segs;
+ rw->size = io_size;
+ if (!iovec) {
+ rw->iov = rw->fast_iov;
+ if (rw->iov != fast_iov)
+ memcpy(rw->iov, fast_iov,
sizeof(struct iovec) * iter->nr_segs);
} else {
+ rw->iov = iovec;
req->flags |= REQ_F_NEED_CLEANUP;
}
}
return 0;
}
+static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
+ bool force_nonblock)
+{
+ struct io_async_ctx *io = req->io;
+ struct iov_iter iter;
+ ssize_t ret;
+
+ io->rw.iov = io->rw.fast_iov;
+ req->io = NULL;
+ ret = io_import_iovec(rw, req, &io->rw.iov, &iter, !force_nonblock);
+ req->io = io;
+ if (unlikely(ret < 0))
+ return ret;
+
+ io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
+ return 0;
+}
+
static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
bool force_nonblock)
{
- struct io_async_ctx *io;
- struct iov_iter iter;
ssize_t ret;
ret = io_prep_rw(req, sqe, force_nonblock);
/* either don't need iovec imported or already have it */
if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
return 0;
+ return io_rw_prep_async(req, READ, force_nonblock);
+}
- io = req->io;
- io->rw.iov = io->rw.fast_iov;
- req->io = NULL;
- ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock);
- req->io = io;
- if (ret < 0)
- return ret;
+/*
+ * This is our waitqueue callback handler, registered through lock_page_async()
+ * when we initially tried to do the IO with the iocb armed our waitqueue.
+ * This gets called when the page is unlocked, and we generally expect that to
+ * happen when the page IO is completed and the page is now uptodate. This will
+ * queue a task_work based retry of the operation, attempting to copy the data
+ * again. If the latter fails because the page was NOT uptodate, then we will
+ * do a thread based blocking retry of the operation. That's the unexpected
+ * slow path.
+ */
+static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
+ int sync, void *arg)
+{
+ struct wait_page_queue *wpq;
+ struct io_kiocb *req = wait->private;
+ struct wait_page_key *key = arg;
+ int ret;
- io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
- return 0;
+ wpq = container_of(wait, struct wait_page_queue, wait);
+
+ if (!wake_page_match(wpq, key))
+ return 0;
+
+ list_del_init(&wait->entry);
+
+ init_task_work(&req->task_work, io_req_task_submit);
+ percpu_ref_get(&req->ctx->refs);
+
+ /* submit ref gets dropped, acquire a new one */
+ refcount_inc(&req->refs);
+ ret = io_req_task_work_add(req, &req->task_work);
+ if (unlikely(ret)) {
+ struct task_struct *tsk;
+
+ /* queue just for cancelation */
+ init_task_work(&req->task_work, io_req_task_cancel);
+ tsk = io_wq_get_task(req->ctx->io_wq);
+ task_work_add(tsk, &req->task_work, 0);
+ wake_up_process(tsk);
+ }
+ return 1;
+}
+
+static inline int kiocb_wait_page_queue_init(struct kiocb *kiocb,
+ struct wait_page_queue *wait,
+ wait_queue_func_t func,
+ void *data)
+{
+ /* Can't support async wakeup with polled IO */
+ if (kiocb->ki_flags & IOCB_HIPRI)
+ return -EINVAL;
+ if (kiocb->ki_filp->f_mode & FMODE_BUF_RASYNC) {
+ wait->wait.func = func;
+ wait->wait.private = data;
+ wait->wait.flags = 0;
+ INIT_LIST_HEAD(&wait->wait.entry);
+ kiocb->ki_flags |= IOCB_WAITQ;
+ kiocb->ki_waitq = wait;
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/*
+ * This controls whether a given IO request should be armed for async page
+ * based retry. If we return false here, the request is handed to the async
+ * worker threads for retry. If we're doing buffered reads on a regular file,
+ * we prepare a private wait_page_queue entry and retry the operation. This
+ * will either succeed because the page is now uptodate and unlocked, or it
+ * will register a callback when the page is unlocked at IO completion. Through
+ * that callback, io_uring uses task_work to setup a retry of the operation.
+ * That retry will attempt the buffered read again. The retry will generally
+ * succeed, or in rare cases where it fails, we then fall back to using the
+ * async worker threads for a blocking retry.
+ */
+static bool io_rw_should_retry(struct io_kiocb *req)
+{
+ struct kiocb *kiocb = &req->rw.kiocb;
+ int ret;
+
+ /* never retry for NOWAIT, we just complete with -EAGAIN */
+ if (req->flags & REQ_F_NOWAIT)
+ return false;
+
+ /* already tried, or we're doing O_DIRECT */
+ if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_WAITQ))
+ return false;
+ /*
+ * just use poll if we can, and don't attempt if the fs doesn't
+ * support callback based unlocks
+ */
+ if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
+ return false;
+
+ /*
+ * If request type doesn't require req->io to defer in general,
+ * we need to allocate it here
+ */
+ if (!req->io && __io_alloc_async_ctx(req))
+ return false;
+
+ ret = kiocb_wait_page_queue_init(kiocb, &req->io->rw.wpq,
+ io_async_buf_func, req);
+ if (!ret) {
+ io_get_req_task(req);
+ return true;
+ }
+
+ return false;
+}
+
+static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
+{
+ if (req->file->f_op->read_iter)
+ return call_read_iter(req->file, &req->rw.kiocb, iter);
+ else if (req->file->f_op->read)
+ return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter);
+ else
+ return -EINVAL;
}
-static int io_read(struct io_kiocb *req, bool force_nonblock)
+static int io_read(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter iter;
size_t iov_count;
- ssize_t io_size, ret;
+ ssize_t io_size, ret, ret2;
+ unsigned long nr_segs;
ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock);
if (ret < 0)
return ret;
+ io_size = ret;
+ req->result = io_size;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
kiocb->ki_flags &= ~IOCB_NOWAIT;
- req->result = 0;
- io_size = ret;
- if (req->flags & REQ_F_LINK_HEAD)
- req->result = io_size;
-
- /*
- * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
- * we know to async punt it even if it was opened O_NONBLOCK
- */
+ /* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, READ))
goto copy_iov;
iov_count = iov_iter_count(&iter);
+ nr_segs = iter.nr_segs;
ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count);
- if (!ret) {
- ssize_t ret2;
+ if (unlikely(ret))
+ goto out_free;
- if (req->file->f_op->read_iter)
- ret2 = call_read_iter(req->file, kiocb, &iter);
- else
- ret2 = loop_rw_iter(READ, req->file, kiocb, &iter);
+ ret2 = io_iter_do_read(req, &iter);
- /* Catch -EAGAIN return for forced non-blocking submission */
- if (!force_nonblock || ret2 != -EAGAIN) {
- kiocb_done(kiocb, ret2);
- } else {
+ /* Catch -EAGAIN return for forced non-blocking submission */
+ if (!force_nonblock || (ret2 != -EAGAIN && ret2 != -EIO)) {
+ kiocb_done(kiocb, ret2, cs);
+ } else {
+ iter.count = iov_count;
+ iter.nr_segs = nr_segs;
copy_iov:
- ret = io_setup_async_rw(req, io_size, iovec,
- inline_vecs, &iter);
- if (ret)
+ ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
+ &iter);
+ if (ret)
+ goto out_free;
+ /* it's copied and will be cleaned with ->io */
+ iovec = NULL;
+ /* if we can retry, do so with the callbacks armed */
+ if (io_rw_should_retry(req)) {
+ ret2 = io_iter_do_read(req, &iter);
+ if (ret2 == -EIOCBQUEUED) {
goto out_free;
- /* any defer here is final, must blocking retry */
- if (!(req->flags & REQ_F_NOWAIT) &&
- !file_can_poll(req->file))
- req->flags |= REQ_F_MUST_PUNT;
- return -EAGAIN;
+ } else if (ret2 != -EAGAIN) {
+ kiocb_done(kiocb, ret2, cs);
+ goto out_free;
+ }
}
+ kiocb->ki_flags &= ~IOCB_WAITQ;
+ return -EAGAIN;
}
out_free:
- kfree(iovec);
- req->flags &= ~REQ_F_NEED_CLEANUP;
+ if (iovec)
+ kfree(iovec);
return ret;
}
static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
bool force_nonblock)
{
- struct io_async_ctx *io;
- struct iov_iter iter;
ssize_t ret;
ret = io_prep_rw(req, sqe, force_nonblock);
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
return -EBADF;
- req->fsize = rlimit(RLIMIT_FSIZE);
-
/* either don't need iovec imported or already have it */
if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
return 0;
-
- io = req->io;
- io->rw.iov = io->rw.fast_iov;
- req->io = NULL;
- ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock);
- req->io = io;
- if (ret < 0)
- return ret;
-
- io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter);
- return 0;
+ return io_rw_prep_async(req, WRITE, force_nonblock);
}
-static int io_write(struct io_kiocb *req, bool force_nonblock)
+static int io_write(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct kiocb *kiocb = &req->rw.kiocb;
struct iov_iter iter;
size_t iov_count;
- ssize_t ret, io_size;
+ ssize_t ret, ret2, io_size;
+ unsigned long nr_segs;
ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock);
if (ret < 0)
return ret;
+ io_size = ret;
+ req->result = io_size;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT;
- req->result = 0;
- io_size = ret;
- if (req->flags & REQ_F_LINK_HEAD)
- req->result = io_size;
-
- /*
- * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
- * we know to async punt it even if it was opened O_NONBLOCK
- */
+ /* If the file doesn't support async, just async punt */
if (force_nonblock && !io_file_supports_async(req->file, WRITE))
goto copy_iov;
goto copy_iov;
iov_count = iov_iter_count(&iter);
+ nr_segs = iter.nr_segs;
ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count);
- if (!ret) {
- ssize_t ret2;
-
- /*
- * Open-code file_start_write here to grab freeze protection,
- * which will be released by another thread in
- * io_complete_rw(). Fool lockdep by telling it the lock got
- * released so that it doesn't complain about the held lock when
- * we return to userspace.
- */
- if (req->flags & REQ_F_ISREG) {
- __sb_start_write(file_inode(req->file)->i_sb,
- SB_FREEZE_WRITE, true);
- __sb_writers_release(file_inode(req->file)->i_sb,
- SB_FREEZE_WRITE);
- }
- kiocb->ki_flags |= IOCB_WRITE;
-
- if (!force_nonblock)
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
+ if (unlikely(ret))
+ goto out_free;
- if (req->file->f_op->write_iter)
- ret2 = call_write_iter(req->file, kiocb, &iter);
- else
- ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
+ /*
+ * Open-code file_start_write here to grab freeze protection,
+ * which will be released by another thread in
+ * io_complete_rw(). Fool lockdep by telling it the lock got
+ * released so that it doesn't complain about the held lock when
+ * we return to userspace.
+ */
+ if (req->flags & REQ_F_ISREG) {
+ __sb_start_write(file_inode(req->file)->i_sb,
+ SB_FREEZE_WRITE, true);
+ __sb_writers_release(file_inode(req->file)->i_sb,
+ SB_FREEZE_WRITE);
+ }
+ kiocb->ki_flags |= IOCB_WRITE;
- if (!force_nonblock)
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+ if (req->file->f_op->write_iter)
+ ret2 = call_write_iter(req->file, kiocb, &iter);
+ else if (req->file->f_op->write)
+ ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
+ else
+ ret2 = -EINVAL;
- /*
- * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
- * retry them without IOCB_NOWAIT.
- */
- if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
- ret2 = -EAGAIN;
- if (!force_nonblock || ret2 != -EAGAIN) {
- kiocb_done(kiocb, ret2);
- } else {
+ /*
+ * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
+ * retry them without IOCB_NOWAIT.
+ */
+ if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
+ ret2 = -EAGAIN;
+ if (!force_nonblock || ret2 != -EAGAIN) {
+ kiocb_done(kiocb, ret2, cs);
+ } else {
+ iter.count = iov_count;
+ iter.nr_segs = nr_segs;
copy_iov:
- ret = io_setup_async_rw(req, io_size, iovec,
- inline_vecs, &iter);
- if (ret)
- goto out_free;
- /* any defer here is final, must blocking retry */
- if (!file_can_poll(req->file))
- req->flags |= REQ_F_MUST_PUNT;
- return -EAGAIN;
- }
+ ret = io_setup_async_rw(req, io_size, iovec, inline_vecs,
+ &iter);
+ if (ret)
+ goto out_free;
+ /* it's copied and will be cleaned with ->io */
+ iovec = NULL;
+ return -EAGAIN;
}
out_free:
- req->flags &= ~REQ_F_NEED_CLEANUP;
- kfree(iovec);
+ if (iovec)
+ kfree(iovec);
return ret;
}
-static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int __io_splice_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
{
struct io_splice* sp = &req->splice;
unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
sp->file_in = NULL;
- sp->off_in = READ_ONCE(sqe->splice_off_in);
- sp->off_out = READ_ONCE(sqe->off);
sp->len = READ_ONCE(sqe->len);
sp->flags = READ_ONCE(sqe->splice_flags);
return ret;
req->flags |= REQ_F_NEED_CLEANUP;
- if (!S_ISREG(file_inode(sp->file_in)->i_mode))
+ if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
+ /*
+ * Splice operation will be punted aync, and here need to
+ * modify io_wq_work.flags, so initialize io_wq_work firstly.
+ */
+ io_req_init_async(req);
req->work.flags |= IO_WQ_WORK_UNBOUND;
+ }
+
+ return 0;
+}
+
+static int io_tee_prep(struct io_kiocb *req,
+ const struct io_uring_sqe *sqe)
+{
+ if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
+ return -EINVAL;
+ return __io_splice_prep(req, sqe);
+}
+
+static int io_tee(struct io_kiocb *req, bool force_nonblock)
+{
+ struct io_splice *sp = &req->splice;
+ struct file *in = sp->file_in;
+ struct file *out = sp->file_out;
+ unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
+ long ret = 0;
+
+ if (force_nonblock)
+ return -EAGAIN;
+ if (sp->len)
+ ret = do_tee(in, out, sp->len, flags);
+
+ io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ if (ret != sp->len)
+ req_set_fail_links(req);
+ io_req_complete(req, ret);
return 0;
}
+static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ struct io_splice* sp = &req->splice;
+
+ sp->off_in = READ_ONCE(sqe->splice_off_in);
+ sp->off_out = READ_ONCE(sqe->off);
+ return __io_splice_prep(req, sqe);
+}
+
static int io_splice(struct io_kiocb *req, bool force_nonblock)
{
struct io_splice *sp = &req->splice;
io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
req->flags &= ~REQ_F_NEED_CLEANUP;
- io_cqring_add_event(req, ret);
if (ret != sp->len)
req_set_fail_links(req);
- io_put_req(req);
+ io_req_complete(req, ret);
return 0;
}
/*
* IORING_OP_NOP just posts a completion event, nothing else.
*/
-static int io_nop(struct io_kiocb *req)
+static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
{
struct io_ring_ctx *ctx = req->ctx;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- io_cqring_add_event(req, 0);
- io_put_req(req);
+ __io_req_complete(req, 0, 0, cs);
return 0;
}
return -EINVAL;
req->sync.flags = READ_ONCE(sqe->fsync_flags);
- if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
- return -EINVAL;
-
- req->sync.off = READ_ONCE(sqe->off);
- req->sync.len = READ_ONCE(sqe->len);
- return 0;
-}
-
-static bool io_req_cancelled(struct io_kiocb *req)
-{
- if (req->work.flags & IO_WQ_WORK_CANCEL) {
- req_set_fail_links(req);
- io_cqring_add_event(req, -ECANCELED);
- io_put_req(req);
- return true;
- }
-
- return false;
-}
-
-static void __io_fsync(struct io_kiocb *req)
-{
- loff_t end = req->sync.off + req->sync.len;
- int ret;
-
- ret = vfs_fsync_range(req->file, req->sync.off,
- end > 0 ? end : LLONG_MAX,
- req->sync.flags & IORING_FSYNC_DATASYNC);
- if (ret < 0)
- req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
-}
-
-static void io_fsync_finish(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
+ if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
+ return -EINVAL;
- if (io_req_cancelled(req))
- return;
- __io_fsync(req);
- io_steal_work(req, workptr);
+ req->sync.off = READ_ONCE(sqe->off);
+ req->sync.len = READ_ONCE(sqe->len);
+ return 0;
}
static int io_fsync(struct io_kiocb *req, bool force_nonblock)
{
+ loff_t end = req->sync.off + req->sync.len;
+ int ret;
+
/* fsync always requires a blocking context */
- if (force_nonblock) {
- req->work.func = io_fsync_finish;
+ if (force_nonblock)
return -EAGAIN;
- }
- __io_fsync(req);
- return 0;
-}
-
-static void __io_fallocate(struct io_kiocb *req)
-{
- int ret;
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
- ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
- req->sync.len);
- current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
+ ret = vfs_fsync_range(req->file, req->sync.off,
+ end > 0 ? end : LLONG_MAX,
+ req->sync.flags & IORING_FSYNC_DATASYNC);
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
-}
-
-static void io_fallocate_finish(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
-
- if (io_req_cancelled(req))
- return;
- __io_fallocate(req);
- io_steal_work(req, workptr);
+ io_req_complete(req, ret);
+ return 0;
}
static int io_fallocate_prep(struct io_kiocb *req,
{
if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
return -EINVAL;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
req->sync.off = READ_ONCE(sqe->off);
req->sync.len = READ_ONCE(sqe->addr);
req->sync.mode = READ_ONCE(sqe->len);
- req->fsize = rlimit(RLIMIT_FSIZE);
return 0;
}
static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
{
+ int ret;
+
/* fallocate always requiring blocking context */
- if (force_nonblock) {
- req->work.func = io_fallocate_finish;
+ if (force_nonblock)
return -EAGAIN;
- }
-
- __io_fallocate(req);
+ ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
+ req->sync.len);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_req_complete(req, ret);
return 0;
}
-static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
const char __user *fname;
int ret;
- if (sqe->ioprio || sqe->buf_index)
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
- if (req->flags & REQ_F_FIXED_FILE)
+ if (unlikely(sqe->ioprio || sqe->buf_index))
+ return -EINVAL;
+ if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF;
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
- req->open.dfd = READ_ONCE(sqe->fd);
- req->open.how.mode = READ_ONCE(sqe->len);
- fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
- req->open.how.flags = READ_ONCE(sqe->open_flags);
- if (force_o_largefile())
+ /* open.how should be already initialised */
+ if (!(req->open.how.flags & O_PATH) && force_o_largefile())
req->open.how.flags |= O_LARGEFILE;
+ req->open.dfd = READ_ONCE(sqe->fd);
+ fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
req->open.filename = getname(fname);
if (IS_ERR(req->open.filename)) {
ret = PTR_ERR(req->open.filename);
req->open.filename = NULL;
return ret;
}
-
req->open.nofile = rlimit(RLIMIT_NOFILE);
req->flags |= REQ_F_NEED_CLEANUP;
return 0;
}
+static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+ u64 flags, mode;
+
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ return 0;
+ mode = READ_ONCE(sqe->len);
+ flags = READ_ONCE(sqe->open_flags);
+ req->open.how = build_open_how(flags, mode);
+ return __io_openat_prep(req, sqe);
+}
+
static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct open_how __user *how;
- const char __user *fname;
size_t len;
int ret;
- if (sqe->ioprio || sqe->buf_index)
- return -EINVAL;
- if (req->flags & REQ_F_FIXED_FILE)
- return -EBADF;
if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
-
- req->open.dfd = READ_ONCE(sqe->fd);
- fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
len = READ_ONCE(sqe->len);
-
if (len < OPEN_HOW_SIZE_VER0)
return -EINVAL;
if (ret)
return ret;
- if (!(req->open.how.flags & O_PATH) && force_o_largefile())
- req->open.how.flags |= O_LARGEFILE;
-
- req->open.filename = getname(fname);
- if (IS_ERR(req->open.filename)) {
- ret = PTR_ERR(req->open.filename);
- req->open.filename = NULL;
- return ret;
- }
-
- req->open.nofile = rlimit(RLIMIT_NOFILE);
- req->flags |= REQ_F_NEED_CLEANUP;
- return 0;
+ return __io_openat_prep(req, sqe);
}
static int io_openat2(struct io_kiocb *req, bool force_nonblock)
req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
+ io_req_complete(req, ret);
return 0;
}
static int io_openat(struct io_kiocb *req, bool force_nonblock)
{
- req->open.how = build_open_how(req->open.how.flags, req->open.how.mode);
return io_openat2(req, force_nonblock);
}
return i;
}
-static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock)
+static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
struct io_provide_buf *p = &req->pbuf;
struct io_ring_ctx *ctx = req->ctx;
io_ring_submit_lock(ctx, !force_nonblock);
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
+ __io_req_complete(req, ret, 0, cs);
return 0;
}
p->addr = READ_ONCE(sqe->addr);
p->len = READ_ONCE(sqe->len);
- if (!access_ok(u64_to_user_ptr(p->addr), p->len))
+ if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
return -EFAULT;
p->bgid = READ_ONCE(sqe->buf_group);
return i ? i : -ENOMEM;
}
-static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock)
+static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
struct io_provide_buf *p = &req->pbuf;
struct io_ring_ctx *ctx = req->ctx;
io_ring_submit_unlock(ctx, !force_nonblock);
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
+ __io_req_complete(req, ret, 0, cs);
return 0;
}
#if defined(CONFIG_EPOLL)
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
req->epoll.epfd = READ_ONCE(sqe->fd);
req->epoll.op = READ_ONCE(sqe->len);
#endif
}
-static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock)
+static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
#if defined(CONFIG_EPOLL)
struct io_epoll *ie = &req->epoll;
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
+ __io_req_complete(req, ret, 0, cs);
return 0;
#else
return -EOPNOTSUPP;
#if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
if (sqe->ioprio || sqe->buf_index || sqe->off)
return -EINVAL;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
req->madvise.addr = READ_ONCE(sqe->addr);
req->madvise.len = READ_ONCE(sqe->len);
ret = do_madvise(ma->addr, ma->len, ma->advice);
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
+ io_req_complete(req, ret);
return 0;
#else
return -EOPNOTSUPP;
{
if (sqe->ioprio || sqe->buf_index || sqe->addr)
return -EINVAL;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
req->fadvise.offset = READ_ONCE(sqe->off);
req->fadvise.len = READ_ONCE(sqe->len);
ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
+ io_req_complete(req, ret);
return 0;
}
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
- const char __user *fname;
- unsigned lookup_flags;
- int ret;
-
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE)
return -EBADF;
- if (req->flags & REQ_F_NEED_CLEANUP)
- return 0;
-
- req->open.dfd = READ_ONCE(sqe->fd);
- req->open.mask = READ_ONCE(sqe->len);
- fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
- req->open.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
- req->open.how.flags = READ_ONCE(sqe->statx_flags);
-
- if (vfs_stat_set_lookup_flags(&lookup_flags, req->open.how.flags))
- return -EINVAL;
- req->open.filename = getname_flags(fname, lookup_flags, NULL);
- if (IS_ERR(req->open.filename)) {
- ret = PTR_ERR(req->open.filename);
- req->open.filename = NULL;
- return ret;
- }
+ req->statx.dfd = READ_ONCE(sqe->fd);
+ req->statx.mask = READ_ONCE(sqe->len);
+ req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
+ req->statx.flags = READ_ONCE(sqe->statx_flags);
- req->flags |= REQ_F_NEED_CLEANUP;
return 0;
}
static int io_statx(struct io_kiocb *req, bool force_nonblock)
{
- struct io_open *ctx = &req->open;
- unsigned lookup_flags;
- struct path path;
- struct kstat stat;
+ struct io_statx *ctx = &req->statx;
int ret;
if (force_nonblock) {
return -EAGAIN;
}
- if (vfs_stat_set_lookup_flags(&lookup_flags, ctx->how.flags))
- return -EINVAL;
-
-retry:
- /* filename_lookup() drops it, keep a reference */
- ctx->filename->refcnt++;
-
- ret = filename_lookup(ctx->dfd, ctx->filename, lookup_flags, &path,
- NULL);
- if (ret)
- goto err;
+ ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
+ ctx->buffer);
- ret = vfs_getattr(&path, &stat, ctx->mask, ctx->how.flags);
- path_put(&path);
- if (retry_estale(ret, lookup_flags)) {
- lookup_flags |= LOOKUP_REVAL;
- goto retry;
- }
- if (!ret)
- ret = cp_statx(&stat, ctx->buffer);
-err:
- putname(ctx->filename);
- req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
+ io_req_complete(req, ret);
return 0;
}
{
/*
* If we queue this for async, it must not be cancellable. That would
- * leave the 'file' in an undeterminate state.
+ * leave the 'file' in an undeterminate state, and here need to modify
+ * io_wq_work.flags, so initialize io_wq_work firstly.
*/
+ io_req_init_async(req);
req->work.flags |= IO_WQ_WORK_NO_CANCEL;
+ if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
+ return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
sqe->rw_flags || sqe->buf_index)
return -EINVAL;
return -EBADF;
req->close.fd = READ_ONCE(sqe->fd);
- if (req->file->f_op == &io_uring_fops ||
+ if ((req->file && req->file->f_op == &io_uring_fops) ||
req->close.fd == req->ctx->ring_fd)
return -EBADF;
+ req->close.put_file = NULL;
return 0;
}
-/* only called when __close_fd_get_file() is done */
-static void __io_close_finish(struct io_kiocb *req)
-{
- int ret;
-
- ret = filp_close(req->close.put_file, req->work.files);
- if (ret < 0)
- req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- fput(req->close.put_file);
- io_put_req(req);
-}
-
-static void io_close_finish(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
-
- /* not cancellable, don't do io_req_cancelled() */
- __io_close_finish(req);
- io_steal_work(req, workptr);
-}
-
-static int io_close(struct io_kiocb *req, bool force_nonblock)
+static int io_close(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
+ struct io_close *close = &req->close;
int ret;
- req->close.put_file = NULL;
- ret = __close_fd_get_file(req->close.fd, &req->close.put_file);
- if (ret < 0)
- return ret;
+ /* might be already done during nonblock submission */
+ if (!close->put_file) {
+ ret = __close_fd_get_file(close->fd, &close->put_file);
+ if (ret < 0)
+ return (ret == -ENOENT) ? -EBADF : ret;
+ }
/* if the file has a flush method, be safe and punt to async */
- if (req->close.put_file->f_op->flush && force_nonblock) {
- /* submission ref will be dropped, take it for async */
- refcount_inc(&req->refs);
-
- req->work.func = io_close_finish;
- /*
- * Do manual async queue here to avoid grabbing files - we don't
- * need the files, and it'll cause io_close_finish() to close
- * the file again and cause a double CQE entry for this request
- */
- io_queue_async_work(req);
- return 0;
+ if (close->put_file->f_op->flush && force_nonblock) {
+ /* was never set, but play safe */
+ req->flags &= ~REQ_F_NOWAIT;
+ /* avoid grabbing files - we don't need the files */
+ req->flags |= REQ_F_NO_FILE_TABLE;
+ return -EAGAIN;
}
- /*
- * No ->flush(), safely close from here and just punt the
- * fput() to async context.
- */
- __io_close_finish(req);
+ /* No ->flush() or already async, safely close from here */
+ ret = filp_close(close->put_file, req->work.files);
+ if (ret < 0)
+ req_set_fail_links(req);
+ fput(close->put_file);
+ close->put_file = NULL;
+ __io_req_complete(req, ret, 0, cs);
return 0;
}
return 0;
}
-static void __io_sync_file_range(struct io_kiocb *req)
+static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
{
int ret;
+ /* sync_file_range always requires a blocking context */
+ if (force_nonblock)
+ return -EAGAIN;
+
ret = sync_file_range(req->file, req->sync.off, req->sync.len,
req->sync.flags);
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
-}
-
-
-static void io_sync_file_range_finish(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
-
- if (io_req_cancelled(req))
- return;
- __io_sync_file_range(req);
- io_steal_work(req, workptr);
-}
-
-static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
-{
- /* sync_file_range always requires a blocking context */
- if (force_nonblock) {
- req->work.func = io_sync_file_range_finish;
- return -EAGAIN;
- }
-
- __io_sync_file_range(req);
+ io_req_complete(req, ret);
return 0;
}
return -EAGAIN;
}
+static int io_sendmsg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg)
+{
+ iomsg->iov = iomsg->fast_iov;
+ iomsg->msg.msg_name = &iomsg->addr;
+ return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
+ req->sr_msg.msg_flags, &iomsg->iov);
+}
+
static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *sr = &req->sr_msg;
struct io_async_ctx *io = req->io;
int ret;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+
sr->msg_flags = READ_ONCE(sqe->msg_flags);
- sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
#ifdef CONFIG_COMPAT
if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
- io->msg.iov = io->msg.fast_iov;
- ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
- &io->msg.iov);
+ ret = io_sendmsg_copy_hdr(req, &io->msg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
}
-static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
+static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
- struct io_async_msghdr *kmsg = NULL;
+ struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
+ unsigned flags;
int ret;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
sock = sock_from_file(req->file, &ret);
- if (sock) {
- struct io_async_ctx io;
- unsigned flags;
-
- if (req->io) {
- kmsg = &req->io->msg;
- kmsg->msg.msg_name = &req->io->msg.addr;
- /* if iov is set, it's allocated already */
- if (!kmsg->iov)
- kmsg->iov = kmsg->fast_iov;
- kmsg->msg.msg_iter.iov = kmsg->iov;
- } else {
- struct io_sr_msg *sr = &req->sr_msg;
-
- kmsg = &io.msg;
- kmsg->msg.msg_name = &io.msg.addr;
+ if (unlikely(!sock))
+ return ret;
- io.msg.iov = io.msg.fast_iov;
- ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
- sr->msg_flags, &io.msg.iov);
- if (ret)
- return ret;
- }
+ if (req->io) {
+ kmsg = &req->io->msg;
+ kmsg->msg.msg_name = &req->io->msg.addr;
+ /* if iov is set, it's allocated already */
+ if (!kmsg->iov)
+ kmsg->iov = kmsg->fast_iov;
+ kmsg->msg.msg_iter.iov = kmsg->iov;
+ } else {
+ ret = io_sendmsg_copy_hdr(req, &iomsg);
+ if (ret)
+ return ret;
+ kmsg = &iomsg;
+ }
- flags = req->sr_msg.msg_flags;
- if (flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
- flags |= MSG_DONTWAIT;
+ flags = req->sr_msg.msg_flags;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
- ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
- if (force_nonblock && ret == -EAGAIN)
- return io_setup_async_msg(req, kmsg);
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
- }
+ ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
+ if (force_nonblock && ret == -EAGAIN)
+ return io_setup_async_msg(req, kmsg);
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
- if (kmsg && kmsg->iov != kmsg->fast_iov)
+ if (kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
- io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req(req);
+ __io_req_complete(req, ret, 0, cs);
return 0;
}
-static int io_send(struct io_kiocb *req, bool force_nonblock)
+static int io_send(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
+ struct io_sr_msg *sr = &req->sr_msg;
+ struct msghdr msg;
+ struct iovec iov;
struct socket *sock;
+ unsigned flags;
int ret;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
sock = sock_from_file(req->file, &ret);
- if (sock) {
- struct io_sr_msg *sr = &req->sr_msg;
- struct msghdr msg;
- struct iovec iov;
- unsigned flags;
+ if (unlikely(!sock))
+ return ret;
- ret = import_single_range(WRITE, sr->buf, sr->len, &iov,
- &msg.msg_iter);
- if (ret)
- return ret;
+ ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
+ if (unlikely(ret))
+ return ret;;
- msg.msg_name = NULL;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_namelen = 0;
+ msg.msg_name = NULL;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_namelen = 0;
- flags = req->sr_msg.msg_flags;
- if (flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
- flags |= MSG_DONTWAIT;
+ flags = req->sr_msg.msg_flags;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
- msg.msg_flags = flags;
- ret = sock_sendmsg(sock, &msg);
- if (force_nonblock && ret == -EAGAIN)
- return -EAGAIN;
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
- }
+ msg.msg_flags = flags;
+ ret = sock_sendmsg(sock, &msg);
+ if (force_nonblock && ret == -EAGAIN)
+ return -EAGAIN;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
- io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req(req);
+ __io_req_complete(req, ret, 0, cs);
return 0;
}
-static int __io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
+static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg)
{
struct io_sr_msg *sr = &req->sr_msg;
struct iovec __user *uiov;
size_t iov_len;
int ret;
- ret = __copy_msghdr_from_user(&io->msg.msg, sr->msg, &io->msg.uaddr,
- &uiov, &iov_len);
+ ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
+ &iomsg->uaddr, &uiov, &iov_len);
if (ret)
return ret;
if (req->flags & REQ_F_BUFFER_SELECT) {
if (iov_len > 1)
return -EINVAL;
- if (copy_from_user(io->msg.iov, uiov, sizeof(*uiov)))
+ if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
return -EFAULT;
- sr->len = io->msg.iov[0].iov_len;
- iov_iter_init(&io->msg.msg.msg_iter, READ, io->msg.iov, 1,
+ sr->len = iomsg->iov[0].iov_len;
+ iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
sr->len);
- io->msg.iov = NULL;
+ iomsg->iov = NULL;
} else {
ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
- &io->msg.iov, &io->msg.msg.msg_iter);
+ &iomsg->iov, &iomsg->msg.msg_iter);
if (ret > 0)
ret = 0;
}
#ifdef CONFIG_COMPAT
static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_ctx *io)
+ struct io_async_msghdr *iomsg)
{
struct compat_msghdr __user *msg_compat;
struct io_sr_msg *sr = &req->sr_msg;
compat_size_t len;
int ret;
- msg_compat = (struct compat_msghdr __user *) sr->msg;
- ret = __get_compat_msghdr(&io->msg.msg, msg_compat, &io->msg.uaddr,
+ msg_compat = (struct compat_msghdr __user *) sr->umsg;
+ ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
&ptr, &len);
if (ret)
return ret;
return -EFAULT;
if (clen < 0)
return -EINVAL;
- sr->len = io->msg.iov[0].iov_len;
- io->msg.iov = NULL;
+ sr->len = iomsg->iov[0].iov_len;
+ iomsg->iov = NULL;
} else {
ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV,
- &io->msg.iov,
- &io->msg.msg.msg_iter);
+ &iomsg->iov,
+ &iomsg->msg.msg_iter);
if (ret < 0)
return ret;
}
}
#endif
-static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io)
+static int io_recvmsg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg)
{
- io->msg.iov = io->msg.fast_iov;
+ iomsg->msg.msg_name = &iomsg->addr;
+ iomsg->iov = iomsg->fast_iov;
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
- return __io_compat_recvmsg_copy_hdr(req, io);
+ return __io_compat_recvmsg_copy_hdr(req, iomsg);
#endif
- return __io_recvmsg_copy_hdr(req, io);
+ return __io_recvmsg_copy_hdr(req, iomsg);
}
static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
- int *cflags, bool needs_lock)
+ bool needs_lock)
{
struct io_sr_msg *sr = &req->sr_msg;
struct io_buffer *kbuf;
- if (!(req->flags & REQ_F_BUFFER_SELECT))
- return NULL;
-
kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
if (IS_ERR(kbuf))
return kbuf;
sr->kbuf = kbuf;
req->flags |= REQ_F_BUFFER_SELECTED;
-
- *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
- *cflags |= IORING_CQE_F_BUFFER;
return kbuf;
}
+static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
+{
+ return io_put_kbuf(req, req->sr_msg.kbuf);
+}
+
static int io_recvmsg_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_async_ctx *io = req->io;
int ret;
+ if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
+ return -EINVAL;
+
sr->msg_flags = READ_ONCE(sqe->msg_flags);
- sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
+ sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
sr->bgid = READ_ONCE(sqe->buf_group);
if (req->flags & REQ_F_NEED_CLEANUP)
return 0;
- ret = io_recvmsg_copy_hdr(req, io);
+ ret = io_recvmsg_copy_hdr(req, &io->msg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
}
-static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
+static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
- struct io_async_msghdr *kmsg = NULL;
+ struct io_async_msghdr iomsg, *kmsg;
struct socket *sock;
+ struct io_buffer *kbuf;
+ unsigned flags;
int ret, cflags = 0;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
sock = sock_from_file(req->file, &ret);
- if (sock) {
- struct io_buffer *kbuf;
- struct io_async_ctx io;
- unsigned flags;
-
- if (req->io) {
- kmsg = &req->io->msg;
- kmsg->msg.msg_name = &req->io->msg.addr;
- /* if iov is set, it's allocated already */
- if (!kmsg->iov)
- kmsg->iov = kmsg->fast_iov;
- kmsg->msg.msg_iter.iov = kmsg->iov;
- } else {
- kmsg = &io.msg;
- kmsg->msg.msg_name = &io.msg.addr;
+ if (unlikely(!sock))
+ return ret;
- ret = io_recvmsg_copy_hdr(req, &io);
- if (ret)
- return ret;
- }
+ if (req->io) {
+ kmsg = &req->io->msg;
+ kmsg->msg.msg_name = &req->io->msg.addr;
+ /* if iov is set, it's allocated already */
+ if (!kmsg->iov)
+ kmsg->iov = kmsg->fast_iov;
+ kmsg->msg.msg_iter.iov = kmsg->iov;
+ } else {
+ ret = io_recvmsg_copy_hdr(req, &iomsg);
+ if (ret)
+ return ret;
+ kmsg = &iomsg;
+ }
- kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
- if (IS_ERR(kbuf)) {
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ kbuf = io_recv_buffer_select(req, !force_nonblock);
+ if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
- } else if (kbuf) {
- kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
- iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
- 1, req->sr_msg.len);
- }
+ kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
+ iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
+ 1, req->sr_msg.len);
+ }
- flags = req->sr_msg.msg_flags;
- if (flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
- flags |= MSG_DONTWAIT;
+ flags = req->sr_msg.msg_flags;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
- ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg,
- kmsg->uaddr, flags);
- if (force_nonblock && ret == -EAGAIN)
- return io_setup_async_msg(req, kmsg);
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
- }
+ ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
+ kmsg->uaddr, flags);
+ if (force_nonblock && ret == -EAGAIN)
+ return io_setup_async_msg(req, kmsg);
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
- if (kmsg && kmsg->iov != kmsg->fast_iov)
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ cflags = io_put_recv_kbuf(req);
+ if (kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
- __io_cqring_add_event(req, ret, cflags);
if (ret < 0)
req_set_fail_links(req);
- io_put_req(req);
+ __io_req_complete(req, ret, cflags, cs);
return 0;
}
-static int io_recv(struct io_kiocb *req, bool force_nonblock)
+static int io_recv(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
- struct io_buffer *kbuf = NULL;
+ struct io_buffer *kbuf;
+ struct io_sr_msg *sr = &req->sr_msg;
+ struct msghdr msg;
+ void __user *buf = sr->buf;
struct socket *sock;
+ struct iovec iov;
+ unsigned flags;
int ret, cflags = 0;
- if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
-
sock = sock_from_file(req->file, &ret);
- if (sock) {
- struct io_sr_msg *sr = &req->sr_msg;
- void __user *buf = sr->buf;
- struct msghdr msg;
- struct iovec iov;
- unsigned flags;
+ if (unlikely(!sock))
+ return ret;
- kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock);
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ kbuf = io_recv_buffer_select(req, !force_nonblock);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
- else if (kbuf)
- buf = u64_to_user_ptr(kbuf->addr);
+ buf = u64_to_user_ptr(kbuf->addr);
+ }
- ret = import_single_range(READ, buf, sr->len, &iov,
- &msg.msg_iter);
- if (ret) {
- kfree(kbuf);
- return ret;
- }
+ ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
+ if (unlikely(ret))
+ goto out_free;
- req->flags |= REQ_F_NEED_CLEANUP;
- msg.msg_name = NULL;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_namelen = 0;
- msg.msg_iocb = NULL;
- msg.msg_flags = 0;
-
- flags = req->sr_msg.msg_flags;
- if (flags & MSG_DONTWAIT)
- req->flags |= REQ_F_NOWAIT;
- else if (force_nonblock)
- flags |= MSG_DONTWAIT;
-
- ret = sock_recvmsg(sock, &msg, flags);
- if (force_nonblock && ret == -EAGAIN)
- return -EAGAIN;
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
- }
+ msg.msg_name = NULL;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_namelen = 0;
+ msg.msg_iocb = NULL;
+ msg.msg_flags = 0;
- kfree(kbuf);
- req->flags &= ~REQ_F_NEED_CLEANUP;
- __io_cqring_add_event(req, ret, cflags);
+ flags = req->sr_msg.msg_flags;
+ if (flags & MSG_DONTWAIT)
+ req->flags |= REQ_F_NOWAIT;
+ else if (force_nonblock)
+ flags |= MSG_DONTWAIT;
+
+ ret = sock_recvmsg(sock, &msg, flags);
+ if (force_nonblock && ret == -EAGAIN)
+ return -EAGAIN;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+out_free:
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ cflags = io_put_recv_kbuf(req);
if (ret < 0)
req_set_fail_links(req);
- io_put_req(req);
+ __io_req_complete(req, ret, cflags, cs);
return 0;
}
return 0;
}
-static int __io_accept(struct io_kiocb *req, bool force_nonblock)
+static int io_accept(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
struct io_accept *accept = &req->accept;
- unsigned file_flags;
+ unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
int ret;
- file_flags = force_nonblock ? O_NONBLOCK : 0;
+ if (req->file->f_flags & O_NONBLOCK)
+ req->flags |= REQ_F_NOWAIT;
+
ret = __sys_accept4_file(req->file, file_flags, accept->addr,
accept->addr_len, accept->flags,
accept->nofile);
if (ret == -EAGAIN && force_nonblock)
return -EAGAIN;
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
- if (ret < 0)
+ if (ret < 0) {
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
- return 0;
-}
-
-static void io_accept_finish(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
-
- if (io_req_cancelled(req))
- return;
- __io_accept(req, false);
- io_steal_work(req, workptr);
-}
-
-static int io_accept(struct io_kiocb *req, bool force_nonblock)
-{
- int ret;
-
- ret = __io_accept(req, force_nonblock);
- if (ret == -EAGAIN && force_nonblock) {
- req->work.func = io_accept_finish;
- return -EAGAIN;
}
+ __io_req_complete(req, ret, 0, cs);
return 0;
}
&io->connect.address);
}
-static int io_connect(struct io_kiocb *req, bool force_nonblock)
+static int io_connect(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
struct io_async_ctx __io, *io;
unsigned file_flags;
out:
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
+ __io_req_complete(req, ret, 0, cs);
return 0;
}
#else /* !CONFIG_NET */
return -EOPNOTSUPP;
}
-static int io_sendmsg(struct io_kiocb *req, bool force_nonblock)
+static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
return -EOPNOTSUPP;
}
-static int io_send(struct io_kiocb *req, bool force_nonblock)
+static int io_send(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
-static int io_recvmsg(struct io_kiocb *req, bool force_nonblock)
+static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
return -EOPNOTSUPP;
}
-static int io_recv(struct io_kiocb *req, bool force_nonblock)
+static int io_recv(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
-static int io_accept(struct io_kiocb *req, bool force_nonblock)
+static int io_accept(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
-static int io_connect(struct io_kiocb *req, bool force_nonblock)
+static int io_connect(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
return -EOPNOTSUPP;
}
int error;
};
-static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
- struct wait_queue_head *head)
-{
- if (unlikely(poll->head)) {
- pt->error = -EINVAL;
- return;
- }
-
- pt->error = 0;
- poll->head = head;
- add_wait_queue(head, &poll->wait);
-}
-
-static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
- struct poll_table_struct *p)
-{
- struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
-
- __io_queue_proc(&pt->req->apoll->poll, pt, head);
-}
-
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
__poll_t mask, task_work_func_t func)
{
- struct task_struct *tsk;
int ret;
/* for instances that support it check for an event match first: */
list_del_init(&poll->wait.entry);
- tsk = req->task;
req->result = mask;
init_task_work(&req->task_work, func);
+ percpu_ref_get(&req->ctx->refs);
+
/*
* If this fails, then the task is exiting. When a task exits, the
* work gets canceled, so just cancel this request as well instead
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
- ret = task_work_add(tsk, &req->task_work, true);
+ ret = io_req_task_work_add(req, &req->task_work);
if (unlikely(ret)) {
+ struct task_struct *tsk;
+
WRITE_ONCE(poll->canceled, true);
tsk = io_wq_get_task(req->ctx->io_wq);
- task_work_add(tsk, &req->task_work, true);
+ task_work_add(tsk, &req->task_work, 0);
+ wake_up_process(tsk);
}
- wake_up_process(tsk);
return 1;
}
return false;
}
+static void io_poll_remove_double(struct io_kiocb *req, void *data)
+{
+ struct io_poll_iocb *poll = data;
+
+ lockdep_assert_held(&req->ctx->completion_lock);
+
+ if (poll && poll->head) {
+ struct wait_queue_head *head = poll->head;
+
+ spin_lock(&head->lock);
+ list_del_init(&poll->wait.entry);
+ if (poll->wait.private)
+ refcount_dec(&req->refs);
+ poll->head = NULL;
+ spin_unlock(&head->lock);
+ }
+}
+
+static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ io_poll_remove_double(req, req->io);
+ req->poll.done = true;
+ io_cqring_fill_event(req, error ? error : mangle_poll(mask));
+ io_commit_cqring(ctx);
+}
+
+static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ if (io_poll_rewait(req, &req->poll)) {
+ spin_unlock_irq(&ctx->completion_lock);
+ return;
+ }
+
+ hash_del(&req->hash_node);
+ io_poll_complete(req, req->result, 0);
+ req->flags |= REQ_F_COMP_LOCKED;
+ *nxt = io_put_req_find_next(req);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ io_cqring_ev_posted(ctx);
+}
+
+static void io_poll_task_func(struct callback_head *cb)
+{
+ struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *nxt = NULL;
+
+ io_poll_task_handler(req, &nxt);
+ if (nxt)
+ __io_req_task_submit(nxt);
+ percpu_ref_put(&ctx->refs);
+}
+
+static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
+ int sync, void *key)
+{
+ struct io_kiocb *req = wait->private;
+ struct io_poll_iocb *poll = req->apoll->double_poll;
+ __poll_t mask = key_to_poll(key);
+
+ /* for instances that support it check for an event match first: */
+ if (mask && !(mask & poll->events))
+ return 0;
+
+ if (poll && poll->head) {
+ bool done;
+
+ spin_lock(&poll->head->lock);
+ done = list_empty(&poll->wait.entry);
+ if (!done)
+ list_del_init(&poll->wait.entry);
+ spin_unlock(&poll->head->lock);
+ if (!done)
+ __io_async_wake(req, poll, mask, io_poll_task_func);
+ }
+ refcount_dec(&req->refs);
+ return 1;
+}
+
+static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
+ wait_queue_func_t wake_func)
+{
+ poll->head = NULL;
+ poll->done = false;
+ poll->canceled = false;
+ poll->events = events;
+ INIT_LIST_HEAD(&poll->wait.entry);
+ init_waitqueue_func_entry(&poll->wait, wake_func);
+}
+
+static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
+ struct wait_queue_head *head,
+ struct io_poll_iocb **poll_ptr)
+{
+ struct io_kiocb *req = pt->req;
+
+ /*
+ * If poll->head is already set, it's because the file being polled
+ * uses multiple waitqueues for poll handling (eg one for read, one
+ * for write). Setup a separate io_poll_iocb if this happens.
+ */
+ if (unlikely(poll->head)) {
+ /* already have a 2nd entry, fail a third attempt */
+ if (*poll_ptr) {
+ pt->error = -EINVAL;
+ return;
+ }
+ poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
+ if (!poll) {
+ pt->error = -ENOMEM;
+ return;
+ }
+ io_init_poll_iocb(poll, req->poll.events, io_poll_double_wake);
+ refcount_inc(&req->refs);
+ poll->wait.private = req;
+ *poll_ptr = poll;
+ }
+
+ pt->error = 0;
+ poll->head = head;
+
+ if (poll->events & EPOLLEXCLUSIVE)
+ add_wait_queue_exclusive(head, &poll->wait);
+ else
+ add_wait_queue(head, &poll->wait);
+}
+
+static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
+ struct poll_table_struct *p)
+{
+ struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
+ struct async_poll *apoll = pt->req->apoll;
+
+ __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
+}
+
static void io_async_task_func(struct callback_head *cb)
{
struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
struct async_poll *apoll = req->apoll;
struct io_ring_ctx *ctx = req->ctx;
- bool canceled;
trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
if (io_poll_rewait(req, &apoll->poll)) {
spin_unlock_irq(&ctx->completion_lock);
+ percpu_ref_put(&ctx->refs);
return;
}
+ /* If req is still hashed, it cannot have been canceled. Don't check. */
if (hash_hashed(&req->hash_node))
hash_del(&req->hash_node);
- canceled = READ_ONCE(apoll->poll.canceled);
- if (canceled) {
- io_cqring_fill_event(req, -ECANCELED);
- io_commit_cqring(ctx);
- }
-
+ io_poll_remove_double(req, apoll->double_poll);
spin_unlock_irq(&ctx->completion_lock);
- /* restore ->work in case we need to retry again */
- memcpy(&req->work, &apoll->work, sizeof(req->work));
-
- if (canceled) {
- kfree(apoll);
- io_cqring_ev_posted(ctx);
- req_set_fail_links(req);
- io_double_put_req(req);
- return;
- }
-
- __set_current_state(TASK_RUNNING);
- mutex_lock(&ctx->uring_lock);
- __io_queue_sqe(req, NULL);
- mutex_unlock(&ctx->uring_lock);
+ if (!READ_ONCE(apoll->poll.canceled))
+ __io_req_task_submit(req);
+ else
+ __io_req_task_cancel(req, -ECANCELED);
+ percpu_ref_put(&ctx->refs);
+ kfree(apoll->double_poll);
kfree(apoll);
}
struct io_ring_ctx *ctx = req->ctx;
bool cancel = false;
+ io_init_poll_iocb(poll, mask, wake_func);
poll->file = req->file;
- poll->head = NULL;
- poll->done = poll->canceled = false;
- poll->events = mask;
+ poll->wait.private = req;
ipt->pt._key = mask;
ipt->req = req;
ipt->error = -EINVAL;
- INIT_LIST_HEAD(&poll->wait.entry);
- init_waitqueue_func_entry(&poll->wait, wake_func);
- poll->wait.private = req;
-
mask = vfs_poll(req->file, &ipt->pt) & poll->events;
spin_lock_irq(&ctx->completion_lock);
if (!req->file || !file_can_poll(req->file))
return false;
- if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED))
+ if (req->flags & REQ_F_POLLED)
return false;
if (!def->pollin && !def->pollout)
return false;
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
if (unlikely(!apoll))
return false;
+ apoll->double_poll = NULL;
req->flags |= REQ_F_POLLED;
- memcpy(&apoll->work, &req->work, sizeof(req->work));
-
- get_task_struct(current);
- req->task = current;
+ io_get_req_task(req);
req->apoll = apoll;
INIT_HLIST_NODE(&req->hash_node);
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
io_async_wake);
if (ret) {
- ipt.error = 0;
- apoll->poll.done = true;
+ io_poll_remove_double(req, apoll->double_poll);
spin_unlock_irq(&ctx->completion_lock);
- memcpy(&req->work, &apoll->work, sizeof(req->work));
+ kfree(apoll->double_poll);
kfree(apoll);
return false;
}
do_complete = true;
}
spin_unlock(&poll->head->lock);
+ hash_del(&req->hash_node);
return do_complete;
}
static bool io_poll_remove_one(struct io_kiocb *req)
{
- struct async_poll *apoll = NULL;
bool do_complete;
if (req->opcode == IORING_OP_POLL_ADD) {
+ io_poll_remove_double(req, req->io);
do_complete = __io_poll_remove_one(req, &req->poll);
} else {
- apoll = req->apoll;
- /* non-poll requests have submit ref still */
- do_complete = __io_poll_remove_one(req, &req->apoll->poll);
- if (do_complete)
- io_put_req(req);
- }
+ struct async_poll *apoll = req->apoll;
- hash_del(&req->hash_node);
+ io_poll_remove_double(req, apoll->double_poll);
- if (do_complete && apoll) {
- /*
- * restore ->work because we need to call io_req_work_drop_env.
- */
- memcpy(&req->work, &apoll->work, sizeof(req->work));
- kfree(apoll);
+ /* non-poll requests have submit ref still */
+ do_complete = __io_poll_remove_one(req, &apoll->poll);
+ if (do_complete) {
+ io_put_req(req);
+ kfree(apoll->double_poll);
+ kfree(apoll);
+ }
}
if (do_complete) {
ret = io_poll_cancel(ctx, addr);
spin_unlock_irq(&ctx->completion_lock);
- io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
- io_put_req(req);
+ io_req_complete(req, ret);
return 0;
}
-static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
-{
- struct io_ring_ctx *ctx = req->ctx;
-
- req->poll.done = true;
- io_cqring_fill_event(req, error ? error : mangle_poll(mask));
- io_commit_cqring(ctx);
-}
-
-static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct io_poll_iocb *poll = &req->poll;
-
- if (io_poll_rewait(req, poll)) {
- spin_unlock_irq(&ctx->completion_lock);
- return;
- }
-
- hash_del(&req->hash_node);
- io_poll_complete(req, req->result, 0);
- req->flags |= REQ_F_COMP_LOCKED;
- io_put_req_find_next(req, nxt);
- spin_unlock_irq(&ctx->completion_lock);
-
- io_cqring_ev_posted(ctx);
-}
-
-static void io_poll_task_func(struct callback_head *cb)
-{
- struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
- struct io_kiocb *nxt = NULL;
-
- io_poll_task_handler(req, &nxt);
- if (nxt) {
- struct io_ring_ctx *ctx = nxt->ctx;
-
- mutex_lock(&ctx->uring_lock);
- __io_queue_sqe(nxt, NULL);
- mutex_unlock(&ctx->uring_lock);
- }
-}
-
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key)
{
{
struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
- __io_queue_proc(&pt->req->poll, pt, head);
+ __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->io);
}
static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_poll_iocb *poll = &req->poll;
- u16 events;
+ u32 events;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (!poll->file)
return -EBADF;
- events = READ_ONCE(sqe->poll_events);
- poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
+ events = READ_ONCE(sqe->poll32_events);
+#ifdef __BIG_ENDIAN
+ events = swahw32(events);
+#endif
+ poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
+ (events & EPOLLEXCLUSIVE);
- get_task_struct(current);
- req->task = current;
+ io_get_req_task(req);
return 0;
}
__poll_t mask;
INIT_HLIST_NODE(&req->hash_node);
- INIT_LIST_HEAD(&req->list);
ipt.pt._qproc = io_poll_queue_proc;
mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
- atomic_inc(&ctx->cq_timeouts);
-
spin_lock_irqsave(&ctx->completion_lock, flags);
+ atomic_set(&req->ctx->cq_timeouts,
+ atomic_read(&req->ctx->cq_timeouts) + 1);
+
/*
* We could be racing with timeout deletion. If the list is empty,
* then timeout lookup already found it and will be handling it.
*/
- if (!list_empty(&req->list)) {
- struct io_kiocb *prev;
-
- /*
- * Adjust the reqs sequence before the current one because it
- * will consume a slot in the cq_ring and the cq_tail
- * pointer will be increased, otherwise other timeout reqs may
- * return in advance without waiting for enough wait_nr.
- */
- prev = req;
- list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
- prev->sequence++;
- list_del_init(&req->list);
- }
+ if (!list_empty(&req->timeout.list))
+ list_del_init(&req->timeout.list);
io_cqring_fill_event(req, -ETIME);
io_commit_cqring(ctx);
struct io_kiocb *req;
int ret = -ENOENT;
- list_for_each_entry(req, &ctx->timeout_list, list) {
+ list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
if (user_data == req->user_data) {
- list_del_init(&req->list);
+ list_del_init(&req->timeout.list);
ret = 0;
break;
}
return -EALREADY;
req_set_fail_links(req);
+ req->flags |= REQ_F_COMP_LOCKED;
io_cqring_fill_event(req, -ECANCELED);
io_put_req(req);
return 0;
{
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
+ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->buf_index || sqe->len)
return -EINVAL;
req->timeout.addr = READ_ONCE(sqe->addr);
{
struct io_timeout_data *data;
unsigned flags;
+ u32 off = READ_ONCE(sqe->off);
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
return -EINVAL;
- if (sqe->off && is_timeout_link)
+ if (off && is_timeout_link)
return -EINVAL;
flags = READ_ONCE(sqe->timeout_flags);
if (flags & ~IORING_TIMEOUT_ABS)
return -EINVAL;
- req->timeout.count = READ_ONCE(sqe->off);
+ req->timeout.off = off;
if (!req->io && io_alloc_async_ctx(req))
return -ENOMEM;
data = &req->io->timeout;
data->req = req;
- req->flags |= REQ_F_TIMEOUT;
if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
static int io_timeout(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_timeout_data *data;
+ struct io_timeout_data *data = &req->io->timeout;
struct list_head *entry;
- unsigned span = 0;
- u32 count = req->timeout.count;
- u32 seq = req->sequence;
+ u32 tail, off = req->timeout.off;
- data = &req->io->timeout;
+ spin_lock_irq(&ctx->completion_lock);
/*
* sqe->off holds how many events that need to occur for this
* timeout event to be satisfied. If it isn't set, then this is
* a pure timeout request, sequence isn't used.
*/
- if (!count) {
- req->flags |= REQ_F_TIMEOUT_NOSEQ;
- spin_lock_irq(&ctx->completion_lock);
+ if (io_is_timeout_noseq(req)) {
entry = ctx->timeout_list.prev;
goto add;
}
- req->sequence = seq + count;
+ tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+ req->timeout.target_seq = tail + off;
/*
* Insertion sort, ensuring the first entry in the list is always
* the one we need first.
*/
- spin_lock_irq(&ctx->completion_lock);
list_for_each_prev(entry, &ctx->timeout_list) {
- struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
- unsigned nxt_seq;
- long long tmp, tmp_nxt;
- u32 nxt_offset = nxt->timeout.count;
+ struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
+ timeout.list);
- if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
+ if (io_is_timeout_noseq(nxt))
continue;
-
- /*
- * Since seq + count can overflow, use type long
- * long to store it.
- */
- tmp = (long long)seq + count;
- nxt_seq = nxt->sequence - nxt_offset;
- tmp_nxt = (long long)nxt_seq + nxt_offset;
-
- /*
- * cached_sq_head may overflow, and it will never overflow twice
- * once there is some timeout req still be valid.
- */
- if (seq < nxt_seq)
- tmp += UINT_MAX;
-
- if (tmp > tmp_nxt)
+ /* nxt.seq is behind @tail, otherwise would've been completed */
+ if (off >= nxt->timeout.target_seq - tail)
break;
-
- /*
- * Sequence of reqs after the insert one and itself should
- * be adjusted because each timeout req consumes a slot.
- */
- span++;
- nxt->sequence++;
}
- req->sequence -= span;
add:
- list_add(&req->list, entry);
+ list_add(&req->timeout.list, entry);
data->timer.function = io_timeout_fn;
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
spin_unlock_irq(&ctx->completion_lock);
enum io_wq_cancel cancel_ret;
int ret = 0;
- cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
+ cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
switch (cancel_ret) {
case IO_WQ_CANCEL_OK:
ret = 0;
{
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
- if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
- sqe->cancel_flags)
+ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
return -EINVAL;
req->cancel.addr = READ_ONCE(sqe->addr);
static int io_files_update_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
- if (sqe->flags || sqe->ioprio || sqe->rw_flags)
+ if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
+ return -EINVAL;
+ if (sqe->ioprio || sqe->rw_flags)
return -EINVAL;
req->files_update.offset = READ_ONCE(sqe->off);
return 0;
}
-static int io_files_update(struct io_kiocb *req, bool force_nonblock)
+static int io_files_update(struct io_kiocb *req, bool force_nonblock,
+ struct io_comp_state *cs)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_uring_files_update up;
if (ret < 0)
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
+ __io_req_complete(req, ret, 0, cs);
return 0;
}
if (!sqe)
return 0;
- if (io_op_defs[req->opcode].file_table) {
- ret = io_grab_files(req);
- if (unlikely(ret))
- return ret;
- }
-
- io_req_work_grab_env(req, &io_op_defs[req->opcode]);
+ if (io_alloc_async_ctx(req))
+ return -EAGAIN;
+ ret = io_prep_work_files(req);
+ if (unlikely(ret))
+ return ret;
switch (req->opcode) {
case IORING_OP_NOP:
case IORING_OP_REMOVE_BUFFERS:
ret = io_remove_buffers_prep(req, sqe);
break;
+ case IORING_OP_TEE:
+ ret = io_tee_prep(req, sqe);
+ break;
default:
printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
req->opcode);
return ret;
}
+static u32 io_get_sequence(struct io_kiocb *req)
+{
+ struct io_kiocb *pos;
+ struct io_ring_ctx *ctx = req->ctx;
+ u32 total_submitted, nr_reqs = 1;
+
+ if (req->flags & REQ_F_LINK_HEAD)
+ list_for_each_entry(pos, &req->link_list, link_list)
+ nr_reqs++;
+
+ total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
+ return total_submitted - nr_reqs;
+}
+
static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
+ struct io_defer_entry *de;
int ret;
+ u32 seq;
/* Still need defer if there is pending req in defer list. */
- if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
+ if (likely(list_empty_careful(&ctx->defer_list) &&
+ !(req->flags & REQ_F_IO_DRAIN)))
+ return 0;
+
+ seq = io_get_sequence(req);
+ /* Still a chance to pass the sequence check */
+ if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
return 0;
if (!req->io) {
- if (io_alloc_async_ctx(req))
- return -EAGAIN;
ret = io_req_defer_prep(req, sqe);
- if (ret < 0)
+ if (ret)
return ret;
}
+ io_prep_async_link(req);
+ de = kmalloc(sizeof(*de), GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
spin_lock_irq(&ctx->completion_lock);
- if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
+ if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock);
- return 0;
+ kfree(de);
+ io_queue_async_work(req);
+ return -EIOCBQUEUED;
}
trace_io_uring_defer(ctx, req, req->user_data);
- list_add_tail(&req->list, &ctx->defer_list);
+ de->req = req;
+ de->seq = seq;
+ list_add_tail(&de->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock);
return -EIOCBQUEUED;
}
-static void io_cleanup_req(struct io_kiocb *req)
+static void __io_clean_op(struct io_kiocb *req)
{
struct io_async_ctx *io = req->io;
- switch (req->opcode) {
- case IORING_OP_READV:
- case IORING_OP_READ_FIXED:
- case IORING_OP_READ:
- if (req->flags & REQ_F_BUFFER_SELECTED)
+ if (req->flags & REQ_F_BUFFER_SELECTED) {
+ switch (req->opcode) {
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
kfree((void *)(unsigned long)req->rw.addr);
- /* fallthrough */
- case IORING_OP_WRITEV:
- case IORING_OP_WRITE_FIXED:
- case IORING_OP_WRITE:
- if (io->rw.iov != io->rw.fast_iov)
- kfree(io->rw.iov);
- break;
- case IORING_OP_RECVMSG:
- if (req->flags & REQ_F_BUFFER_SELECTED)
- kfree(req->sr_msg.kbuf);
- /* fallthrough */
- case IORING_OP_SENDMSG:
- if (io->msg.iov != io->msg.fast_iov)
- kfree(io->msg.iov);
- break;
- case IORING_OP_RECV:
- if (req->flags & REQ_F_BUFFER_SELECTED)
+ break;
+ case IORING_OP_RECVMSG:
+ case IORING_OP_RECV:
kfree(req->sr_msg.kbuf);
- break;
- case IORING_OP_OPENAT:
- case IORING_OP_OPENAT2:
- case IORING_OP_STATX:
- putname(req->open.filename);
- break;
- case IORING_OP_SPLICE:
- io_put_file(req, req->splice.file_in,
- (req->splice.flags & SPLICE_F_FD_IN_FIXED));
- break;
+ break;
+ }
+ req->flags &= ~REQ_F_BUFFER_SELECTED;
+ }
+
+ if (req->flags & REQ_F_NEED_CLEANUP) {
+ switch (req->opcode) {
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
+ case IORING_OP_WRITEV:
+ case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
+ if (io->rw.iov != io->rw.fast_iov)
+ kfree(io->rw.iov);
+ break;
+ case IORING_OP_RECVMSG:
+ case IORING_OP_SENDMSG:
+ if (io->msg.iov != io->msg.fast_iov)
+ kfree(io->msg.iov);
+ break;
+ case IORING_OP_SPLICE:
+ case IORING_OP_TEE:
+ io_put_file(req, req->splice.file_in,
+ (req->splice.flags & SPLICE_F_FD_IN_FIXED));
+ break;
+ }
+ req->flags &= ~REQ_F_NEED_CLEANUP;
}
-
- req->flags &= ~REQ_F_NEED_CLEANUP;
}
static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- bool force_nonblock)
+ bool force_nonblock, struct io_comp_state *cs)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
switch (req->opcode) {
case IORING_OP_NOP:
- ret = io_nop(req);
+ ret = io_nop(req, cs);
break;
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
if (ret < 0)
break;
}
- ret = io_read(req, force_nonblock);
+ ret = io_read(req, force_nonblock, cs);
break;
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
if (ret < 0)
break;
}
- ret = io_write(req, force_nonblock);
+ ret = io_write(req, force_nonblock, cs);
break;
case IORING_OP_FSYNC:
if (sqe) {
break;
}
if (req->opcode == IORING_OP_SENDMSG)
- ret = io_sendmsg(req, force_nonblock);
+ ret = io_sendmsg(req, force_nonblock, cs);
else
- ret = io_send(req, force_nonblock);
+ ret = io_send(req, force_nonblock, cs);
break;
case IORING_OP_RECVMSG:
case IORING_OP_RECV:
break;
}
if (req->opcode == IORING_OP_RECVMSG)
- ret = io_recvmsg(req, force_nonblock);
+ ret = io_recvmsg(req, force_nonblock, cs);
else
- ret = io_recv(req, force_nonblock);
+ ret = io_recv(req, force_nonblock, cs);
break;
case IORING_OP_TIMEOUT:
if (sqe) {
if (ret)
break;
}
- ret = io_accept(req, force_nonblock);
+ ret = io_accept(req, force_nonblock, cs);
break;
case IORING_OP_CONNECT:
if (sqe) {
if (ret)
break;
}
- ret = io_connect(req, force_nonblock);
+ ret = io_connect(req, force_nonblock, cs);
break;
case IORING_OP_ASYNC_CANCEL:
if (sqe) {
if (ret)
break;
}
- ret = io_close(req, force_nonblock);
+ ret = io_close(req, force_nonblock, cs);
break;
case IORING_OP_FILES_UPDATE:
if (sqe) {
if (ret)
break;
}
- ret = io_files_update(req, force_nonblock);
+ ret = io_files_update(req, force_nonblock, cs);
break;
case IORING_OP_STATX:
if (sqe) {
if (ret)
break;
}
- ret = io_epoll_ctl(req, force_nonblock);
+ ret = io_epoll_ctl(req, force_nonblock, cs);
break;
case IORING_OP_SPLICE:
if (sqe) {
if (ret)
break;
}
- ret = io_provide_buffers(req, force_nonblock);
+ ret = io_provide_buffers(req, force_nonblock, cs);
break;
case IORING_OP_REMOVE_BUFFERS:
if (sqe) {
if (ret)
break;
}
- ret = io_remove_buffers(req, force_nonblock);
+ ret = io_remove_buffers(req, force_nonblock, cs);
+ break;
+ case IORING_OP_TEE:
+ if (sqe) {
+ ret = io_tee_prep(req, sqe);
+ if (ret < 0)
+ break;
+ }
+ ret = io_tee(req, force_nonblock);
break;
default:
ret = -EINVAL;
if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
const bool in_async = io_wq_current_is_worker();
- if (req->result == -EAGAIN)
- return -EAGAIN;
-
/* workqueue context doesn't hold uring_lock, grab it now */
if (in_async)
mutex_lock(&ctx->uring_lock);
return 0;
}
-static void io_wq_submit_work(struct io_wq_work **workptr)
+static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
{
- struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct io_kiocb *timeout;
int ret = 0;
+ timeout = io_prep_linked_timeout(req);
+ if (timeout)
+ io_queue_linked_timeout(timeout);
+
/* if NO_CANCEL is set, we must still run the work */
if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
IO_WQ_WORK_CANCEL) {
if (!ret) {
do {
- ret = io_issue_sqe(req, NULL, false);
+ ret = io_issue_sqe(req, NULL, false, NULL);
/*
* We can get EAGAIN for polled IO even though we're
* forcing a sync submission from here, since we can't
if (ret) {
req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- io_put_req(req);
+ io_req_complete(req, ret);
}
- io_steal_work(req, workptr);
+ return io_steal_work(req);
}
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
struct fixed_file_table *table;
table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
- return table->files[index & IORING_FILE_TABLE_MASK];;
+ return table->files[index & IORING_FILE_TABLE_MASK];
}
static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
return -EBADF;
fd = array_index_nospec(fd, ctx->nr_user_files);
file = io_file_from_index(ctx, fd);
- if (!file)
- return -EBADF;
- req->fixed_file_refs = ctx->file_data->cur_refs;
- percpu_ref_get(req->fixed_file_refs);
+ if (file) {
+ req->fixed_file_refs = ctx->file_data->cur_refs;
+ percpu_ref_get(req->fixed_file_refs);
+ }
} else {
trace_io_uring_file_get(ctx, fd);
file = __io_file_get(state, fd);
- if (unlikely(!file))
- return -EBADF;
}
- *out_file = file;
- return 0;
+ if (file || io_op_defs[req->opcode].needs_file_no_error) {
+ *out_file = file;
+ return 0;
+ }
+ return -EBADF;
}
static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
bool fixed;
fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
- if (unlikely(!fixed && req->needs_fixed_file))
+ if (unlikely(!fixed && io_async_submit(req->ctx)))
return -EBADF;
return io_file_get(state, req, fd, &req->file, fixed);
int ret = -EBADF;
struct io_ring_ctx *ctx = req->ctx;
+ io_req_init_async(req);
+
if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE))
return 0;
if (!ctx->ring_file)
return ret;
}
+static inline int io_prep_work_files(struct io_kiocb *req)
+{
+ if (!io_op_defs[req->opcode].file_table)
+ return 0;
+ return io_grab_files(req);
+}
+
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
{
struct io_timeout_data *data = container_of(timer,
io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
io_put_req(prev);
} else {
- io_cqring_add_event(req, -ETIME);
- io_put_req(req);
+ io_req_complete(req, -ETIME);
}
return HRTIMER_NORESTART;
}
-static void io_queue_linked_timeout(struct io_kiocb *req)
+static void __io_queue_linked_timeout(struct io_kiocb *req)
{
- struct io_ring_ctx *ctx = req->ctx;
-
/*
* If the list is now empty, then our linked request finished before
* we got a chance to setup the timer
*/
- spin_lock_irq(&ctx->completion_lock);
if (!list_empty(&req->link_list)) {
struct io_timeout_data *data = &req->io->timeout;
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
data->mode);
}
+}
+
+static void io_queue_linked_timeout(struct io_kiocb *req)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock_irq(&ctx->completion_lock);
+ __io_queue_linked_timeout(req);
spin_unlock_irq(&ctx->completion_lock);
/* drop submission reference */
if (!(req->flags & REQ_F_LINK_HEAD))
return NULL;
- /* for polled retry, if flag is set, we already went through here */
- if (req->flags & REQ_F_POLLED)
+ if (req->flags & REQ_F_LINK_TIMEOUT)
return NULL;
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
return nxt;
}
-static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_comp_state *cs)
{
struct io_kiocb *linked_timeout;
struct io_kiocb *nxt;
again:
linked_timeout = io_prep_linked_timeout(req);
- if (req->work.creds && req->work.creds != current_cred()) {
+ if ((req->flags & REQ_F_WORK_INITIALIZED) && req->work.creds &&
+ req->work.creds != current_cred()) {
if (old_creds)
revert_creds(old_creds);
if (old_creds == req->work.creds)
old_creds = override_creds(req->work.creds);
}
- ret = io_issue_sqe(req, sqe, true);
+ ret = io_issue_sqe(req, sqe, true, cs);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
* doesn't support non-blocking read/write attempts
*/
- if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
- (req->flags & REQ_F_MUST_PUNT))) {
- if (io_arm_poll_handler(req)) {
- if (linked_timeout)
- io_queue_linked_timeout(linked_timeout);
- goto exit;
- }
+ if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
+ if (!io_arm_poll_handler(req)) {
punt:
- if (io_op_defs[req->opcode].file_table) {
- ret = io_grab_files(req);
- if (ret)
+ ret = io_prep_work_files(req);
+ if (unlikely(ret))
goto err;
+ /*
+ * Queued up for async execution, worker will release
+ * submit reference when the iocb is actually submitted.
+ */
+ io_queue_async_work(req);
}
- /*
- * Queued up for async execution, worker will release
- * submit reference when the iocb is actually submitted.
- */
- io_queue_async_work(req);
+ if (linked_timeout)
+ io_queue_linked_timeout(linked_timeout);
goto exit;
}
+ if (unlikely(ret)) {
err:
- nxt = NULL;
- /* drop submission reference */
- io_put_req_find_next(req, &nxt);
-
- if (linked_timeout) {
- if (!ret)
- io_queue_linked_timeout(linked_timeout);
- else
- io_put_req(linked_timeout);
- }
-
- /* and drop final reference, if we failed */
- if (ret) {
- io_cqring_add_event(req, ret);
+ /* un-prep timeout, so it'll be killed as any other linked */
+ req->flags &= ~REQ_F_LINK_TIMEOUT;
req_set_fail_links(req);
io_put_req(req);
+ io_req_complete(req, ret);
+ goto exit;
}
+
+ /* drop submission reference */
+ nxt = io_put_req_find_next(req);
+ if (linked_timeout)
+ io_queue_linked_timeout(linked_timeout);
+
if (nxt) {
req = nxt;
revert_creds(old_creds);
}
-static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ struct io_comp_state *cs)
{
int ret;
if (ret) {
if (ret != -EIOCBQUEUED) {
fail_req:
- io_cqring_add_event(req, ret);
req_set_fail_links(req);
- io_double_put_req(req);
+ io_put_req(req);
+ io_req_complete(req, ret);
}
} else if (req->flags & REQ_F_FORCE_ASYNC) {
if (!req->io) {
- ret = -EAGAIN;
- if (io_alloc_async_ctx(req))
- goto fail_req;
ret = io_req_defer_prep(req, sqe);
- if (unlikely(ret < 0))
+ if (unlikely(ret))
goto fail_req;
}
* Never try inline submit of IOSQE_ASYNC is set, go straight
* to async execution.
*/
+ io_req_init_async(req);
req->work.flags |= IO_WQ_WORK_CONCURRENT;
io_queue_async_work(req);
} else {
- __io_queue_sqe(req, sqe);
+ __io_queue_sqe(req, sqe, cs);
}
}
-static inline void io_queue_link_head(struct io_kiocb *req)
+static inline void io_queue_link_head(struct io_kiocb *req,
+ struct io_comp_state *cs)
{
if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
- io_cqring_add_event(req, -ECANCELED);
- io_double_put_req(req);
+ io_put_req(req);
+ io_req_complete(req, -ECANCELED);
} else
- io_queue_sqe(req, NULL);
+ io_queue_sqe(req, NULL, cs);
}
static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
- struct io_submit_state *state, struct io_kiocb **link)
+ struct io_kiocb **link, struct io_comp_state *cs)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
head->flags |= REQ_F_IO_DRAIN;
ctx->drain_next = 1;
}
- if (io_alloc_async_ctx(req))
- return -EAGAIN;
-
ret = io_req_defer_prep(req, sqe);
- if (ret) {
+ if (unlikely(ret)) {
/* fail even hard links since we don't submit */
head->flags |= REQ_F_FAIL_LINK;
return ret;
}
trace_io_uring_link(ctx, req, head);
+ io_get_req_task(req);
list_add_tail(&req->link_list, &head->link_list);
/* last request of a link, enqueue the link */
if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
- io_queue_link_head(head);
+ io_queue_link_head(head, cs);
*link = NULL;
}
} else {
req->flags |= REQ_F_LINK_HEAD;
INIT_LIST_HEAD(&req->link_list);
- if (io_alloc_async_ctx(req))
- return -EAGAIN;
-
ret = io_req_defer_prep(req, sqe);
- if (ret)
+ if (unlikely(ret))
req->flags |= REQ_F_FAIL_LINK;
*link = req;
} else {
- io_queue_sqe(req, sqe);
+ io_queue_sqe(req, sqe, cs);
}
}
*/
static void io_submit_state_end(struct io_submit_state *state)
{
+ if (!list_empty(&state->comp.list))
+ io_submit_flush_completions(&state->comp);
blk_finish_plug(&state->plug);
- io_file_put(state);
+ io_state_file_put(state);
if (state->free_reqs)
kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
}
* Start submission side cache.
*/
static void io_submit_state_start(struct io_submit_state *state,
- unsigned int max_ios)
+ struct io_ring_ctx *ctx, unsigned int max_ios)
{
blk_start_plug(&state->plug);
+#ifdef CONFIG_BLOCK
+ state->plug.nowait = true;
+#endif
+ state->comp.nr = 0;
+ INIT_LIST_HEAD(&state->comp.list);
+ state->comp.ctx = ctx;
state->free_reqs = 0;
state->file = NULL;
state->ios_left = max_ios;
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe,
- struct io_submit_state *state, bool async)
+ struct io_submit_state *state)
{
unsigned int sqe_flags;
int id;
- /*
- * All io need record the previous position, if LINK vs DARIN,
- * it can be used to mark the position of the first IO in the
- * link list.
- */
- req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped;
req->opcode = READ_ONCE(sqe->opcode);
req->user_data = READ_ONCE(sqe->user_data);
req->io = NULL;
req->flags = 0;
/* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2);
- req->task = NULL;
+ req->task = current;
req->result = 0;
- req->needs_fixed_file = async;
- INIT_IO_WORK(&req->work, io_wq_submit_work);
if (unlikely(req->opcode >= IORING_OP_LAST))
return -EINVAL;
- if (io_op_defs[req->opcode].needs_mm && !current->mm) {
- if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
- return -EFAULT;
- use_mm(ctx->sqo_mm);
- }
+ if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
+ return -EFAULT;
sqe_flags = READ_ONCE(sqe->flags);
/* enforce forwards compatibility on users */
id = READ_ONCE(sqe->personality);
if (id) {
+ io_req_init_async(req);
req->work.creds = idr_find(&ctx->personality_idr, id);
if (unlikely(!req->work.creds))
return -EINVAL;
}
/* same numerical values with corresponding REQ_F_*, safe to copy */
- req->flags |= sqe_flags & (IOSQE_IO_DRAIN | IOSQE_IO_HARDLINK |
- IOSQE_ASYNC | IOSQE_FIXED_FILE |
- IOSQE_BUFFER_SELECT | IOSQE_IO_LINK);
+ req->flags |= sqe_flags;
if (!io_op_defs[req->opcode].needs_file)
return 0;
}
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
- struct file *ring_file, int ring_fd, bool async)
+ struct file *ring_file, int ring_fd)
{
- struct io_submit_state state, *statep = NULL;
+ struct io_submit_state state;
struct io_kiocb *link = NULL;
int i, submitted = 0;
if (!percpu_ref_tryget_many(&ctx->refs, nr))
return -EAGAIN;
- if (nr > IO_PLUG_THRESHOLD) {
- io_submit_state_start(&state, nr);
- statep = &state;
- }
+ io_submit_state_start(&state, ctx, nr);
ctx->ring_fd = ring_fd;
ctx->ring_file = ring_file;
io_consume_sqe(ctx);
break;
}
- req = io_alloc_req(ctx, statep);
+ req = io_alloc_req(ctx, &state);
if (unlikely(!req)) {
if (!submitted)
submitted = -EAGAIN;
break;
}
- err = io_init_req(ctx, req, sqe, statep, async);
+ err = io_init_req(ctx, req, sqe, &state);
io_consume_sqe(ctx);
/* will complete beyond this point, count as submitted */
submitted++;
if (unlikely(err)) {
fail_req:
- io_cqring_add_event(req, err);
- io_double_put_req(req);
+ io_put_req(req);
+ io_req_complete(req, err);
break;
}
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
- true, async);
- err = io_submit_sqe(req, sqe, statep, &link);
+ true, io_async_submit(ctx));
+ err = io_submit_sqe(req, sqe, &link, &state.comp);
if (err)
goto fail_req;
}
percpu_ref_put_many(&ctx->refs, nr - ref_used);
}
if (link)
- io_queue_link_head(link);
- if (statep)
- io_submit_state_end(&state);
+ io_queue_link_head(link, &state.comp);
+ io_submit_state_end(&state);
/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);
return submitted;
}
-static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
+static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
{
- struct mm_struct *mm = current->mm;
+ /* Tell userspace we may need a wakeup call */
+ spin_lock_irq(&ctx->completion_lock);
+ ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
+ spin_unlock_irq(&ctx->completion_lock);
+}
- if (mm) {
- unuse_mm(mm);
- mmput(mm);
- }
+static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
+{
+ spin_lock_irq(&ctx->completion_lock);
+ ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+ spin_unlock_irq(&ctx->completion_lock);
}
static int io_sq_thread(void *data)
{
struct io_ring_ctx *ctx = data;
const struct cred *old_cred;
- mm_segment_t old_fs;
DEFINE_WAIT(wait);
unsigned long timeout;
int ret = 0;
- complete(&ctx->completions[1]);
+ complete(&ctx->sq_thread_comp);
- old_fs = get_fs();
- set_fs(USER_DS);
old_cred = override_creds(ctx->creds);
timeout = jiffies + ctx->sq_thread_idle;
while (!kthread_should_park()) {
unsigned int to_submit;
- if (!list_empty(&ctx->poll_list)) {
+ if (!list_empty(&ctx->iopoll_list)) {
unsigned nr_events = 0;
mutex_lock(&ctx->uring_lock);
- if (!list_empty(&ctx->poll_list))
- io_iopoll_getevents(ctx, &nr_events, 0);
+ if (!list_empty(&ctx->iopoll_list) && !need_resched())
+ io_do_iopoll(ctx, &nr_events, 0);
else
timeout = jiffies + ctx->sq_thread_idle;
mutex_unlock(&ctx->uring_lock);
* If submit got -EBUSY, flag us as needing the application
* to enter the kernel to reap and flush events.
*/
- if (!to_submit || ret == -EBUSY) {
+ if (!to_submit || ret == -EBUSY || need_resched()) {
/*
* Drop cur_mm before scheduling, we can't hold it for
* long periods (or over schedule()). Do this before
* adding ourselves to the waitqueue, as the unuse/drop
* may sleep.
*/
- io_sq_thread_drop_mm(ctx);
+ io_sq_thread_drop_mm();
/*
* We're polling. If we're within the defined idle
* more IO, we should wait for the application to
* reap events and wake us up.
*/
- if (!list_empty(&ctx->poll_list) ||
+ if (!list_empty(&ctx->iopoll_list) || need_resched() ||
(!time_after(jiffies, timeout) && ret != -EBUSY &&
!percpu_ref_is_dying(&ctx->refs))) {
- if (current->task_works)
- task_work_run();
+ io_run_task_work();
cond_resched();
continue;
}
/*
* While doing polled IO, before going to sleep, we need
- * to check if there are new reqs added to poll_list, it
- * is because reqs may have been punted to io worker and
- * will be added to poll_list later, hence check the
- * poll_list again.
+ * to check if there are new reqs added to iopoll_list,
+ * it is because reqs may have been punted to io worker
+ * and will be added to iopoll_list later, hence check
+ * the iopoll_list again.
*/
if ((ctx->flags & IORING_SETUP_IOPOLL) &&
- !list_empty_careful(&ctx->poll_list)) {
+ !list_empty_careful(&ctx->iopoll_list)) {
finish_wait(&ctx->sqo_wait, &wait);
continue;
}
- /* Tell userspace we may need a wakeup call */
- ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
- /* make sure to read SQ tail after writing flags */
- smp_mb();
+ io_ring_set_wakeup_flag(ctx);
to_submit = io_sqring_entries(ctx);
if (!to_submit || ret == -EBUSY) {
finish_wait(&ctx->sqo_wait, &wait);
break;
}
- if (current->task_works) {
- task_work_run();
+ if (io_run_task_work()) {
finish_wait(&ctx->sqo_wait, &wait);
+ io_ring_clear_wakeup_flag(ctx);
continue;
}
if (signal_pending(current))
schedule();
finish_wait(&ctx->sqo_wait, &wait);
- ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+ io_ring_clear_wakeup_flag(ctx);
ret = 0;
continue;
}
finish_wait(&ctx->sqo_wait, &wait);
- ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+ io_ring_clear_wakeup_flag(ctx);
}
mutex_lock(&ctx->uring_lock);
- ret = io_submit_sqes(ctx, to_submit, NULL, -1, true);
+ if (likely(!percpu_ref_is_dying(&ctx->refs)))
+ ret = io_submit_sqes(ctx, to_submit, NULL, -1);
mutex_unlock(&ctx->uring_lock);
timeout = jiffies + ctx->sq_thread_idle;
}
- if (current->task_works)
- task_work_run();
+ io_run_task_work();
- set_fs(old_fs);
- io_sq_thread_drop_mm(ctx);
+ io_sq_thread_drop_mm();
revert_creds(old_cred);
kthread_parkme();
do {
if (io_cqring_events(ctx, false) >= min_events)
return 0;
- if (!current->task_works)
+ if (!io_run_task_work())
break;
- task_work_run();
} while (1);
if (sig) {
do {
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE);
- if (current->task_works)
- task_work_run();
- if (io_should_wake(&iowq, false))
- break;
- schedule();
+ /* make sure we run task_work before checking for signals */
+ if (io_run_task_work())
+ continue;
if (signal_pending(current)) {
+ if (current->jobctl & JOBCTL_TASK_WORK) {
+ spin_lock_irq(¤t->sighand->siglock);
+ current->jobctl &= ~JOBCTL_TASK_WORK;
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+ continue;
+ }
ret = -EINTR;
break;
}
+ if (io_should_wake(&iowq, false))
+ break;
+ schedule();
} while (1);
finish_wait(&ctx->wait, &iowq.wq);
struct fixed_file_data *data = ctx->file_data;
struct fixed_file_ref_node *ref_node = NULL;
unsigned nr_tables, i;
- unsigned long flags;
if (!data)
return -ENXIO;
- spin_lock_irqsave(&data->lock, flags);
+ spin_lock(&data->lock);
if (!list_empty(&data->ref_list))
ref_node = list_first_entry(&data->ref_list,
struct fixed_file_ref_node, node);
- spin_unlock_irqrestore(&data->lock, flags);
+ spin_unlock(&data->lock);
if (ref_node)
percpu_ref_kill(&ref_node->refs);
percpu_ref_kill(&data->refs);
/* wait for all refs nodes to complete */
+ flush_delayed_work(&ctx->file_put_work);
wait_for_completion(&data->done);
__io_sqe_files_unregister(ctx);
static void io_sq_thread_stop(struct io_ring_ctx *ctx)
{
if (ctx->sqo_thread) {
- wait_for_completion(&ctx->completions[1]);
+ wait_for_completion(&ctx->sq_thread_comp);
/*
* The park is a bit of a work-around, without it we get
* warning spews on shutdown with SQPOLL set and affinity
struct file *file;
};
-static void io_file_put_work(struct work_struct *work)
+static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
{
- struct fixed_file_ref_node *ref_node;
- struct fixed_file_data *file_data;
- struct io_ring_ctx *ctx;
+ struct fixed_file_data *file_data = ref_node->file_data;
+ struct io_ring_ctx *ctx = file_data->ctx;
struct io_file_put *pfile, *tmp;
- unsigned long flags;
-
- ref_node = container_of(work, struct fixed_file_ref_node, work);
- file_data = ref_node->file_data;
- ctx = file_data->ctx;
list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
- list_del_init(&pfile->list);
+ list_del(&pfile->list);
io_ring_file_put(ctx, pfile->file);
kfree(pfile);
}
- spin_lock_irqsave(&file_data->lock, flags);
- list_del_init(&ref_node->node);
- spin_unlock_irqrestore(&file_data->lock, flags);
+ spin_lock(&file_data->lock);
+ list_del(&ref_node->node);
+ spin_unlock(&file_data->lock);
percpu_ref_exit(&ref_node->refs);
kfree(ref_node);
percpu_ref_put(&file_data->refs);
}
+static void io_file_put_work(struct work_struct *work)
+{
+ struct io_ring_ctx *ctx;
+ struct llist_node *node;
+
+ ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
+ node = llist_del_all(&ctx->file_put_llist);
+
+ while (node) {
+ struct fixed_file_ref_node *ref_node;
+ struct llist_node *next = node->next;
+
+ ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
+ __io_file_put_work(ref_node);
+ node = next;
+ }
+}
+
static void io_file_data_ref_zero(struct percpu_ref *ref)
{
struct fixed_file_ref_node *ref_node;
+ struct io_ring_ctx *ctx;
+ bool first_add;
+ int delay = HZ;
ref_node = container_of(ref, struct fixed_file_ref_node, refs);
+ ctx = ref_node->file_data->ctx;
- queue_work(system_wq, &ref_node->work);
+ if (percpu_ref_is_dying(&ctx->file_data->refs))
+ delay = 0;
+
+ first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
+ if (!delay)
+ mod_delayed_work(system_wq, &ctx->file_put_work, 0);
+ else if (first_add)
+ queue_delayed_work(system_wq, &ctx->file_put_work, delay);
}
static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
}
INIT_LIST_HEAD(&ref_node->node);
INIT_LIST_HEAD(&ref_node->file_list);
- INIT_WORK(&ref_node->work, io_file_put_work);
ref_node->file_data = ctx->file_data;
return ref_node;
-
}
static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
int fd, ret = 0;
unsigned i;
struct fixed_file_ref_node *ref_node;
- unsigned long flags;
if (ctx->file_data)
return -EBUSY;
for (i = 0; i < nr_tables; i++)
kfree(ctx->file_data->table[i].files);
+ percpu_ref_exit(&ctx->file_data->refs);
kfree(ctx->file_data->table);
kfree(ctx->file_data);
ctx->file_data = NULL;
}
ctx->file_data->cur_refs = &ref_node->refs;
- spin_lock_irqsave(&ctx->file_data->lock, flags);
+ spin_lock(&ctx->file_data->lock);
list_add(&ref_node->node, &ctx->file_data->ref_list);
- spin_unlock_irqrestore(&ctx->file_data->lock, flags);
+ spin_unlock(&ctx->file_data->lock);
percpu_ref_get(&ctx->file_data->refs);
return ret;
}
__s32 __user *fds;
int fd, i, err;
__u32 done;
- unsigned long flags;
bool needs_switch = false;
if (check_add_overflow(up->offset, nr_args, &done))
}
table->files[index] = file;
err = io_sqe_file_register(ctx, file, i);
- if (err)
+ if (err) {
+ fput(file);
break;
+ }
}
nr_args--;
done++;
if (needs_switch) {
percpu_ref_kill(data->cur_refs);
- spin_lock_irqsave(&data->lock, flags);
+ spin_lock(&data->lock);
list_add(&ref_node->node, &data->ref_list);
data->cur_refs = &ref_node->refs;
- spin_unlock_irqrestore(&data->lock, flags);
+ spin_unlock(&data->lock);
percpu_ref_get(&ctx->file_data->refs);
} else
destroy_fixed_file_ref_node(ref_node);
data.user = ctx->user;
data.free_work = io_free_work;
+ data.do_work = io_wq_submit_work;
if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
/* Do QD, or 4 * CPUS, whatever is smallest */
return 0;
err:
io_finish_async(ctx);
- mmdrop(ctx->sqo_mm);
- ctx->sqo_mm = NULL;
+ if (ctx->sqo_mm) {
+ mmdrop(ctx->sqo_mm);
+ ctx->sqo_mm = NULL;
+ }
return ret;
}
-static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
+static inline void __io_unaccount_mem(struct user_struct *user,
+ unsigned long nr_pages)
{
atomic_long_sub(nr_pages, &user->locked_vm);
}
-static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
+static inline int __io_account_mem(struct user_struct *user,
+ unsigned long nr_pages)
{
unsigned long page_limit, cur_pages, new_pages;
return 0;
}
+static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
+ enum io_mem_account acct)
+{
+ if (ctx->limit_mem)
+ __io_unaccount_mem(ctx->user, nr_pages);
+
+ if (ctx->sqo_mm) {
+ if (acct == ACCT_LOCKED)
+ ctx->sqo_mm->locked_vm -= nr_pages;
+ else if (acct == ACCT_PINNED)
+ atomic64_sub(nr_pages, &ctx->sqo_mm->pinned_vm);
+ }
+}
+
+static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
+ enum io_mem_account acct)
+{
+ int ret;
+
+ if (ctx->limit_mem) {
+ ret = __io_account_mem(ctx->user, nr_pages);
+ if (ret)
+ return ret;
+ }
+
+ if (ctx->sqo_mm) {
+ if (acct == ACCT_LOCKED)
+ ctx->sqo_mm->locked_vm += nr_pages;
+ else if (acct == ACCT_PINNED)
+ atomic64_add(nr_pages, &ctx->sqo_mm->pinned_vm);
+ }
+
+ return 0;
+}
+
static void io_mem_free(void *ptr)
{
struct page *page;
return SIZE_MAX;
#endif
+ if (sq_offset)
+ *sq_offset = off;
+
sq_array_size = array_size(sizeof(u32), sq_entries);
if (sq_array_size == SIZE_MAX)
return SIZE_MAX;
if (check_add_overflow(off, sq_array_size, &off))
return SIZE_MAX;
- if (sq_offset)
- *sq_offset = off;
-
return off;
}
for (j = 0; j < imu->nr_bvecs; j++)
unpin_user_page(imu->bvec[j].bv_page);
- if (ctx->account_mem)
- io_unaccount_mem(ctx->user, imu->nr_bvecs);
+ io_unaccount_mem(ctx, imu->nr_bvecs, ACCT_PINNED);
kvfree(imu->bvec);
imu->nr_bvecs = 0;
}
start = ubuf >> PAGE_SHIFT;
nr_pages = end - start;
- if (ctx->account_mem) {
- ret = io_account_mem(ctx->user, nr_pages);
- if (ret)
- goto err;
- }
+ ret = io_account_mem(ctx, nr_pages, ACCT_PINNED);
+ if (ret)
+ goto err;
ret = 0;
if (!pages || nr_pages > got_pages) {
- kfree(vmas);
- kfree(pages);
+ kvfree(vmas);
+ kvfree(pages);
pages = kvmalloc_array(nr_pages, sizeof(struct page *),
GFP_KERNEL);
vmas = kvmalloc_array(nr_pages,
GFP_KERNEL);
if (!pages || !vmas) {
ret = -ENOMEM;
- if (ctx->account_mem)
- io_unaccount_mem(ctx->user, nr_pages);
+ io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
goto err;
}
got_pages = nr_pages;
GFP_KERNEL);
ret = -ENOMEM;
if (!imu->bvec) {
- if (ctx->account_mem)
- io_unaccount_mem(ctx->user, nr_pages);
+ io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
goto err;
}
ret = 0;
- down_read(¤t->mm->mmap_sem);
+ mmap_read_lock(current->mm);
pret = pin_user_pages(ubuf, nr_pages,
FOLL_WRITE | FOLL_LONGTERM,
pages, vmas);
} else {
ret = pret < 0 ? pret : -EFAULT;
}
- up_read(¤t->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (ret) {
/*
* if we did partial map, or found file backed vmas,
*/
if (pret > 0)
unpin_user_pages(pages, pret);
- if (ctx->account_mem)
- io_unaccount_mem(ctx->user, nr_pages);
+ io_unaccount_mem(ctx, nr_pages, ACCT_PINNED);
kvfree(imu->bvec);
goto err;
}
static void io_ring_ctx_free(struct io_ring_ctx *ctx)
{
io_finish_async(ctx);
- if (ctx->sqo_mm)
+ io_sqe_buffer_unregister(ctx);
+ if (ctx->sqo_mm) {
mmdrop(ctx->sqo_mm);
+ ctx->sqo_mm = NULL;
+ }
- io_iopoll_reap_events(ctx);
- io_sqe_buffer_unregister(ctx);
io_sqe_files_unregister(ctx);
io_eventfd_unregister(ctx);
io_destroy_buffers(ctx);
io_mem_free(ctx->sq_sqes);
percpu_ref_exit(&ctx->refs);
- if (ctx->account_mem)
- io_unaccount_mem(ctx->user,
- ring_pages(ctx->sq_entries, ctx->cq_entries));
free_uid(ctx->user);
put_cred(ctx->creds);
- kfree(ctx->completions);
kfree(ctx->cancel_hash);
kmem_cache_free(req_cachep, ctx->fallback_req);
kfree(ctx);
static void io_ring_exit_work(struct work_struct *work)
{
- struct io_ring_ctx *ctx;
+ struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
+ exit_work);
- ctx = container_of(work, struct io_ring_ctx, exit_work);
- if (ctx->rings)
- io_cqring_overflow_flush(ctx, true);
-
- wait_for_completion(&ctx->completions[0]);
+ /*
+ * If we're doing polled IO and end up having requests being
+ * submitted async (out-of-line), then completions can come in while
+ * we're waiting for refs to drop. We need to reap these manually,
+ * as nobody else will be looking for them.
+ */
+ do {
+ if (ctx->rings)
+ io_cqring_overflow_flush(ctx, true);
+ io_iopoll_try_reap_events(ctx);
+ } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
io_ring_ctx_free(ctx);
}
percpu_ref_kill(&ctx->refs);
mutex_unlock(&ctx->uring_lock);
- /*
- * Wait for sq thread to idle, if we have one. It won't spin on new
- * work after we've killed the ctx ref above. This is important to do
- * before we cancel existing commands, as the thread could otherwise
- * be queueing new work post that. If that's work we need to cancel,
- * it could cause shutdown to hang.
- */
- while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
- cond_resched();
-
io_kill_timeouts(ctx);
io_poll_remove_all(ctx);
if (ctx->io_wq)
io_wq_cancel_all(ctx->io_wq);
- io_iopoll_reap_events(ctx);
/* if we failed setting up the ctx, we might not have any rings */
if (ctx->rings)
io_cqring_overflow_flush(ctx, true);
+ io_iopoll_try_reap_events(ctx);
idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
+
+ /*
+ * Do this upfront, so we won't have a grace period where the ring
+ * is closed but resources aren't reaped yet. This can cause
+ * spurious failure in setting up a new ring.
+ */
+ io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
+ ACCT_LOCKED);
+
INIT_WORK(&ctx->exit_work, io_ring_exit_work);
queue_work(system_wq, &ctx->exit_work);
}
return 0;
}
+static bool io_wq_files_match(struct io_wq_work *work, void *data)
+{
+ struct files_struct *files = data;
+
+ return work->files == files;
+}
+
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
+ if (list_empty_careful(&ctx->inflight_list))
+ return;
+
+ /* cancel all at once, should be faster than doing it one by one*/
+ io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
+
while (!list_empty_careful(&ctx->inflight_list)) {
struct io_kiocb *cancel_req = NULL, *req;
DEFINE_WAIT(wait);
if (cancel_req->flags & REQ_F_OVERFLOW) {
spin_lock_irq(&ctx->completion_lock);
- list_del(&cancel_req->list);
+ list_del(&cancel_req->compl.list);
cancel_req->flags &= ~REQ_F_OVERFLOW;
- if (list_empty(&ctx->cq_overflow_list)) {
- clear_bit(0, &ctx->sq_check_overflow);
- clear_bit(0, &ctx->cq_check_overflow);
- }
- spin_unlock_irq(&ctx->completion_lock);
+ io_cqring_mark_overflow(ctx);
WRITE_ONCE(ctx->rings->cq_overflow,
atomic_inc_return(&ctx->cached_cq_overflow));
+ io_commit_cqring(ctx);
+ spin_unlock_irq(&ctx->completion_lock);
/*
* Put inflight ref and overflow ref. If that's
* all we had, then we're done with this request.
*/
if (refcount_sub_and_test(2, &cancel_req->refs)) {
- io_put_req(cancel_req);
+ io_free_req(cancel_req);
finish_wait(&ctx->inflight_wait, &wait);
continue;
}
+ } else {
+ io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
+ io_put_req(cancel_req);
}
- io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
- io_put_req(cancel_req);
schedule();
finish_wait(&ctx->inflight_wait, &wait);
}
}
+static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
+{
+ struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+ struct task_struct *task = data;
+
+ return req->task == task;
+}
+
static int io_uring_flush(struct file *file, void *data)
{
struct io_ring_ctx *ctx = file->private_data;
* If the task is going away, cancel work it may have pending
*/
if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
- io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
+ io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, current, true);
return 0;
}
int submitted = 0;
struct fd f;
- if (current->task_works)
- task_work_run();
+ io_run_task_work();
if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
return -EINVAL;
submitted = to_submit;
} else if (to_submit) {
mutex_lock(&ctx->uring_lock);
- submitted = io_submit_sqes(ctx, to_submit, f.file, fd, false);
+ submitted = io_submit_sqes(ctx, to_submit, f.file, fd);
mutex_unlock(&ctx->uring_lock);
if (submitted != to_submit)
goto out;
}
if (flags & IORING_ENTER_GETEVENTS) {
- unsigned nr_events = 0;
-
min_complete = min(min_complete, ctx->cq_entries);
/*
*/
if (ctx->flags & IORING_SETUP_IOPOLL &&
!(ctx->flags & IORING_SETUP_SQPOLL)) {
- ret = io_iopoll_check(ctx, &nr_events, min_complete);
+ ret = io_iopoll_check(ctx, min_complete);
} else {
ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
}
struct io_rings *rings;
size_t size, sq_array_offset;
+ /* make sure these are sane, as we already accounted them */
+ ctx->sq_entries = p->sq_entries;
+ ctx->cq_entries = p->cq_entries;
+
size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
if (size == SIZE_MAX)
return -EOVERFLOW;
rings->cq_ring_entries = p->cq_entries;
ctx->sq_mask = rings->sq_ring_mask;
ctx->cq_mask = rings->cq_ring_mask;
- ctx->sq_entries = rings->sq_ring_entries;
- ctx->cq_entries = rings->cq_ring_entries;
size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
if (size == SIZE_MAX) {
{
struct user_struct *user = NULL;
struct io_ring_ctx *ctx;
- bool account_mem;
+ bool limit_mem;
int ret;
if (!entries)
}
user = get_uid(current_user());
- account_mem = !capable(CAP_IPC_LOCK);
+ limit_mem = !capable(CAP_IPC_LOCK);
- if (account_mem) {
- ret = io_account_mem(user,
+ if (limit_mem) {
+ ret = __io_account_mem(user,
ring_pages(p->sq_entries, p->cq_entries));
if (ret) {
free_uid(user);
ctx = io_ring_ctx_alloc(p);
if (!ctx) {
- if (account_mem)
- io_unaccount_mem(user, ring_pages(p->sq_entries,
+ if (limit_mem)
+ __io_unaccount_mem(user, ring_pages(p->sq_entries,
p->cq_entries));
free_uid(user);
return -ENOMEM;
}
ctx->compat = in_compat_syscall();
- ctx->account_mem = account_mem;
ctx->user = user;
ctx->creds = get_current_cred();
+ /*
+ * Account memory _before_ installing the file descriptor. Once
+ * the descriptor is installed, it can get closed at any time. Also
+ * do this before hitting the general error path, as ring freeing
+ * will un-account as well.
+ */
+ io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
+ ACCT_LOCKED);
+ ctx->limit_mem = limit_mem;
+
ret = io_allocate_scq_urings(ctx, p);
if (ret)
goto err;
p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
p->cq_off.cqes = offsetof(struct io_rings, cqes);
+ p->cq_off.flags = offsetof(struct io_rings, cq_flags);
p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
- IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL;
+ IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
+ IORING_FEAT_POLL_32BITS;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
goto err;
}
+
/*
* Install ring fd as the very last thing, so we don't risk someone
* having closed it before we finish setup
* after we've killed the percpu ref.
*/
mutex_unlock(&ctx->uring_lock);
- ret = wait_for_completion_interruptible(&ctx->completions[0]);
+ ret = wait_for_completion_interruptible(&ctx->ref_comp);
mutex_lock(&ctx->uring_lock);
if (ret) {
percpu_ref_resurrect(&ctx->refs);
/* bring the ctx back to life */
percpu_ref_reinit(&ctx->refs);
out:
- reinit_completion(&ctx->completions[0]);
+ reinit_completion(&ctx->ref_comp);
}
return ret;
}
BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags);
BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags);
- BUILD_BUG_SQE_ELEM(28, __u16, poll_events);
+ BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events);
+ BUILD_BUG_SQE_ELEM(28, __u32, poll32_events);
BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags);
BUILD_BUG_SQE_ELEM(28, __u32, msg_flags);
BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags);