1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
6 #include <linux/slab.h>
7 #include <linux/nospec.h>
8 #include <linux/io_uring.h>
10 #include <uapi/linux/io_uring.h>
12 #include "io_uring_types.h"
16 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
17 struct task_struct *task)
19 struct io_wq_hash *hash;
20 struct io_wq_data data;
21 unsigned int concurrency;
23 mutex_lock(&ctx->uring_lock);
26 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
28 mutex_unlock(&ctx->uring_lock);
29 return ERR_PTR(-ENOMEM);
31 refcount_set(&hash->refs, 1);
32 init_waitqueue_head(&hash->wait);
35 mutex_unlock(&ctx->uring_lock);
39 data.free_work = io_wq_free_work;
40 data.do_work = io_wq_submit_work;
42 /* Do QD, or 4 * CPUS, whatever is smallest */
43 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
45 return io_wq_create(concurrency, &data);
48 void __io_uring_free(struct task_struct *tsk)
50 struct io_uring_task *tctx = tsk->io_uring;
52 WARN_ON_ONCE(!xa_empty(&tctx->xa));
53 WARN_ON_ONCE(tctx->io_wq);
54 WARN_ON_ONCE(tctx->cached_refs);
56 percpu_counter_destroy(&tctx->inflight);
61 __cold int io_uring_alloc_task_context(struct task_struct *task,
62 struct io_ring_ctx *ctx)
64 struct io_uring_task *tctx;
67 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
71 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
77 tctx->io_wq = io_init_wq_offload(ctx, task);
78 if (IS_ERR(tctx->io_wq)) {
79 ret = PTR_ERR(tctx->io_wq);
80 percpu_counter_destroy(&tctx->inflight);
86 init_waitqueue_head(&tctx->wait);
87 atomic_set(&tctx->in_idle, 0);
88 atomic_set(&tctx->inflight_tracked, 0);
89 task->io_uring = tctx;
90 spin_lock_init(&tctx->task_lock);
91 INIT_WQ_LIST(&tctx->task_list);
92 INIT_WQ_LIST(&tctx->prio_task_list);
93 init_task_work(&tctx->task_work, tctx_task_work);
97 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
99 struct io_uring_task *tctx = current->io_uring;
100 struct io_tctx_node *node;
103 if (unlikely(!tctx)) {
104 ret = io_uring_alloc_task_context(current, ctx);
108 tctx = current->io_uring;
109 if (ctx->iowq_limits_set) {
110 unsigned int limits[2] = { ctx->iowq_limits[0],
111 ctx->iowq_limits[1], };
113 ret = io_wq_max_workers(tctx->io_wq, limits);
118 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
119 node = kmalloc(sizeof(*node), GFP_KERNEL);
123 node->task = current;
125 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
132 mutex_lock(&ctx->uring_lock);
133 list_add(&node->ctx_node, &ctx->tctx_list);
134 mutex_unlock(&ctx->uring_lock);
141 * Remove this io_uring_file -> task mapping.
143 __cold void io_uring_del_tctx_node(unsigned long index)
145 struct io_uring_task *tctx = current->io_uring;
146 struct io_tctx_node *node;
150 node = xa_erase(&tctx->xa, index);
154 WARN_ON_ONCE(current != node->task);
155 WARN_ON_ONCE(list_empty(&node->ctx_node));
157 mutex_lock(&node->ctx->uring_lock);
158 list_del(&node->ctx_node);
159 mutex_unlock(&node->ctx->uring_lock);
161 if (tctx->last == node->ctx)
166 __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
168 struct io_wq *wq = tctx->io_wq;
169 struct io_tctx_node *node;
172 xa_for_each(&tctx->xa, index, node) {
173 io_uring_del_tctx_node(index);
178 * Must be after io_uring_del_tctx_node() (removes nodes under
179 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
181 io_wq_put_and_exit(wq);
186 void io_uring_unreg_ringfd(void)
188 struct io_uring_task *tctx = current->io_uring;
191 for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
192 if (tctx->registered_rings[i]) {
193 fput(tctx->registered_rings[i]);
194 tctx->registered_rings[i] = NULL;
199 static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
205 for (offset = start; offset < end; offset++) {
206 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
207 if (tctx->registered_rings[offset])
213 } else if (!io_is_uring_fops(file)) {
217 tctx->registered_rings[offset] = file;
225 * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
226 * invocation. User passes in an array of struct io_uring_rsrc_update
227 * with ->data set to the ring_fd, and ->offset given for the desired
228 * index. If no index is desired, application may set ->offset == -1U
229 * and we'll find an available index. Returns number of entries
230 * successfully processed, or < 0 on error if none were processed.
232 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
235 struct io_uring_rsrc_update __user *arg = __arg;
236 struct io_uring_rsrc_update reg;
237 struct io_uring_task *tctx;
240 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
243 mutex_unlock(&ctx->uring_lock);
244 ret = io_uring_add_tctx_node(ctx);
245 mutex_lock(&ctx->uring_lock);
249 tctx = current->io_uring;
250 for (i = 0; i < nr_args; i++) {
253 if (copy_from_user(®, &arg[i], sizeof(reg))) {
263 if (reg.offset == -1U) {
265 end = IO_RINGFD_REG_MAX;
267 if (reg.offset >= IO_RINGFD_REG_MAX) {
275 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
280 if (copy_to_user(&arg[i], ®, sizeof(reg))) {
281 fput(tctx->registered_rings[reg.offset]);
282 tctx->registered_rings[reg.offset] = NULL;
291 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
294 struct io_uring_rsrc_update __user *arg = __arg;
295 struct io_uring_task *tctx = current->io_uring;
296 struct io_uring_rsrc_update reg;
299 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
304 for (i = 0; i < nr_args; i++) {
305 if (copy_from_user(®, &arg[i], sizeof(reg))) {
309 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
314 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
315 if (tctx->registered_rings[reg.offset]) {
316 fput(tctx->registered_rings[reg.offset]);
317 tctx->registered_rings[reg.offset] = NULL;