1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
6 #include <linux/slab.h>
7 #include <linux/nospec.h>
8 #include <linux/io_uring.h>
10 #include <uapi/linux/io_uring.h>
15 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
16 struct task_struct *task)
18 struct io_wq_hash *hash;
19 struct io_wq_data data;
20 unsigned int concurrency;
22 mutex_lock(&ctx->uring_lock);
25 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
27 mutex_unlock(&ctx->uring_lock);
28 return ERR_PTR(-ENOMEM);
30 refcount_set(&hash->refs, 1);
31 init_waitqueue_head(&hash->wait);
34 mutex_unlock(&ctx->uring_lock);
38 data.free_work = io_wq_free_work;
39 data.do_work = io_wq_submit_work;
41 /* Do QD, or 4 * CPUS, whatever is smallest */
42 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
44 return io_wq_create(concurrency, &data);
47 void __io_uring_free(struct task_struct *tsk)
49 struct io_uring_task *tctx = tsk->io_uring;
51 WARN_ON_ONCE(!xa_empty(&tctx->xa));
52 WARN_ON_ONCE(tctx->io_wq);
53 WARN_ON_ONCE(tctx->cached_refs);
55 percpu_counter_destroy(&tctx->inflight);
60 __cold int io_uring_alloc_task_context(struct task_struct *task,
61 struct io_ring_ctx *ctx)
63 struct io_uring_task *tctx;
66 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
70 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
76 tctx->io_wq = io_init_wq_offload(ctx, task);
77 if (IS_ERR(tctx->io_wq)) {
78 ret = PTR_ERR(tctx->io_wq);
79 percpu_counter_destroy(&tctx->inflight);
85 init_waitqueue_head(&tctx->wait);
86 atomic_set(&tctx->in_idle, 0);
87 atomic_set(&tctx->inflight_tracked, 0);
88 task->io_uring = tctx;
89 init_llist_head(&tctx->task_list);
90 init_task_work(&tctx->task_work, tctx_task_work);
94 static int io_register_submitter(struct io_ring_ctx *ctx)
98 mutex_lock(&ctx->uring_lock);
99 if (!ctx->submitter_task)
100 ctx->submitter_task = get_task_struct(current);
101 else if (ctx->submitter_task != current)
103 mutex_unlock(&ctx->uring_lock);
108 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter)
110 struct io_uring_task *tctx = current->io_uring;
111 struct io_tctx_node *node;
114 if ((ctx->flags & IORING_SETUP_SINGLE_ISSUER) && submitter) {
115 ret = io_register_submitter(ctx);
120 if (unlikely(!tctx)) {
121 ret = io_uring_alloc_task_context(current, ctx);
125 tctx = current->io_uring;
126 if (ctx->iowq_limits_set) {
127 unsigned int limits[2] = { ctx->iowq_limits[0],
128 ctx->iowq_limits[1], };
130 ret = io_wq_max_workers(tctx->io_wq, limits);
135 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
136 node = kmalloc(sizeof(*node), GFP_KERNEL);
140 node->task = current;
142 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
149 mutex_lock(&ctx->uring_lock);
150 list_add(&node->ctx_node, &ctx->tctx_list);
151 mutex_unlock(&ctx->uring_lock);
159 * Remove this io_uring_file -> task mapping.
161 __cold void io_uring_del_tctx_node(unsigned long index)
163 struct io_uring_task *tctx = current->io_uring;
164 struct io_tctx_node *node;
168 node = xa_erase(&tctx->xa, index);
172 WARN_ON_ONCE(current != node->task);
173 WARN_ON_ONCE(list_empty(&node->ctx_node));
175 mutex_lock(&node->ctx->uring_lock);
176 list_del(&node->ctx_node);
177 mutex_unlock(&node->ctx->uring_lock);
179 if (tctx->last == node->ctx)
184 __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
186 struct io_wq *wq = tctx->io_wq;
187 struct io_tctx_node *node;
190 xa_for_each(&tctx->xa, index, node) {
191 io_uring_del_tctx_node(index);
196 * Must be after io_uring_del_tctx_node() (removes nodes under
197 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
199 io_wq_put_and_exit(wq);
204 void io_uring_unreg_ringfd(void)
206 struct io_uring_task *tctx = current->io_uring;
209 for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
210 if (tctx->registered_rings[i]) {
211 fput(tctx->registered_rings[i]);
212 tctx->registered_rings[i] = NULL;
217 static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
223 for (offset = start; offset < end; offset++) {
224 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
225 if (tctx->registered_rings[offset])
231 } else if (!io_is_uring_fops(file)) {
235 tctx->registered_rings[offset] = file;
243 * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
244 * invocation. User passes in an array of struct io_uring_rsrc_update
245 * with ->data set to the ring_fd, and ->offset given for the desired
246 * index. If no index is desired, application may set ->offset == -1U
247 * and we'll find an available index. Returns number of entries
248 * successfully processed, or < 0 on error if none were processed.
250 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
253 struct io_uring_rsrc_update __user *arg = __arg;
254 struct io_uring_rsrc_update reg;
255 struct io_uring_task *tctx;
258 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
261 mutex_unlock(&ctx->uring_lock);
262 ret = __io_uring_add_tctx_node(ctx, false);
263 mutex_lock(&ctx->uring_lock);
267 tctx = current->io_uring;
268 for (i = 0; i < nr_args; i++) {
271 if (copy_from_user(®, &arg[i], sizeof(reg))) {
281 if (reg.offset == -1U) {
283 end = IO_RINGFD_REG_MAX;
285 if (reg.offset >= IO_RINGFD_REG_MAX) {
293 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
298 if (copy_to_user(&arg[i], ®, sizeof(reg))) {
299 fput(tctx->registered_rings[reg.offset]);
300 tctx->registered_rings[reg.offset] = NULL;
309 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
312 struct io_uring_rsrc_update __user *arg = __arg;
313 struct io_uring_task *tctx = current->io_uring;
314 struct io_uring_rsrc_update reg;
317 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
322 for (i = 0; i < nr_args; i++) {
323 if (copy_from_user(®, &arg[i], sizeof(reg))) {
327 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
332 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
333 if (tctx->registered_rings[reg.offset]) {
334 fput(tctx->registered_rings[reg.offset]);
335 tctx->registered_rings[reg.offset] = NULL;