4 #include <linux/errno.h>
5 #include <linux/lockdep.h>
6 #include <linux/io_uring_types.h>
11 #ifndef CREATE_TRACE_POINTS
12 #include <trace/events/io_uring.h>
17 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
20 * Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT
21 * are set to indicate to the poll runner that multishot should be
22 * removed and the result is set on req->cqe.res.
24 IOU_STOP_MULTISHOT = -ECANCELED,
27 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx);
28 bool io_req_cqe_overflow(struct io_kiocb *req);
29 int io_run_task_work_sig(struct io_ring_ctx *ctx);
30 int __io_run_local_work(struct io_ring_ctx *ctx, bool locked);
31 int io_run_local_work(struct io_ring_ctx *ctx);
32 void io_req_complete_failed(struct io_kiocb *req, s32 res);
33 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
34 void io_req_complete_post(struct io_kiocb *req);
35 void __io_req_complete_post(struct io_kiocb *req);
36 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
38 bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
40 void __io_commit_cqring_flush(struct io_ring_ctx *ctx);
42 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
44 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
45 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
46 unsigned issue_flags);
48 static inline bool io_req_ffs_set(struct io_kiocb *req)
50 return req->flags & REQ_F_FIXED_FILE;
53 bool io_is_uring_fops(struct file *file);
54 bool io_alloc_async_data(struct io_kiocb *req);
55 void io_req_task_work_add(struct io_kiocb *req);
56 void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags);
57 void io_req_task_queue(struct io_kiocb *req);
58 void io_queue_iowq(struct io_kiocb *req, bool *dont_use);
59 void io_req_task_complete(struct io_kiocb *req, bool *locked);
60 void io_req_task_queue_fail(struct io_kiocb *req, int ret);
61 void io_req_task_submit(struct io_kiocb *req, bool *locked);
62 void tctx_task_work(struct callback_head *cb);
63 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
64 int io_uring_alloc_task_context(struct task_struct *task,
65 struct io_ring_ctx *ctx);
67 int io_poll_issue(struct io_kiocb *req, bool *locked);
68 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr);
69 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin);
70 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node);
71 int io_req_prep_async(struct io_kiocb *req);
73 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
74 void io_wq_submit_work(struct io_wq_work *work);
76 void io_free_req(struct io_kiocb *req);
77 void io_queue_next(struct io_kiocb *req);
78 void __io_put_task(struct task_struct *task, int nr);
79 void io_task_refs_refill(struct io_uring_task *tctx);
80 bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
82 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
85 #define io_for_each_link(pos, head) \
86 for (pos = (head); pos; pos = pos->link)
88 static inline void io_cq_lock(struct io_ring_ctx *ctx)
89 __acquires(ctx->completion_lock)
91 spin_lock(&ctx->completion_lock);
94 void io_cq_unlock_post(struct io_ring_ctx *ctx);
96 static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
98 if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
99 struct io_uring_cqe *cqe = ctx->cqe_cached;
101 ctx->cached_cq_tail++;
103 if (ctx->flags & IORING_SETUP_CQE32)
108 return __io_get_cqe(ctx);
111 static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
112 struct io_kiocb *req)
114 struct io_uring_cqe *cqe;
117 * If we can't get a cq entry, userspace overflowed the
118 * submission (by quite a lot). Increment the overflow count in
121 cqe = io_get_cqe(ctx);
123 return io_req_cqe_overflow(req);
125 trace_io_uring_complete(req->ctx, req, req->cqe.user_data,
126 req->cqe.res, req->cqe.flags,
127 (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0,
128 (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0);
130 memcpy(cqe, &req->cqe, sizeof(*cqe));
132 if (ctx->flags & IORING_SETUP_CQE32) {
133 u64 extra1 = 0, extra2 = 0;
135 if (req->flags & REQ_F_CQE32_INIT) {
136 extra1 = req->extra1;
137 extra2 = req->extra2;
140 WRITE_ONCE(cqe->big_cqe[0], extra1);
141 WRITE_ONCE(cqe->big_cqe[1], extra2);
146 static inline void req_set_fail(struct io_kiocb *req)
148 req->flags |= REQ_F_FAIL;
149 if (req->flags & REQ_F_CQE_SKIP) {
150 req->flags &= ~REQ_F_CQE_SKIP;
151 req->flags |= REQ_F_SKIP_LINK_CQES;
155 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags)
158 req->cqe.flags = cflags;
161 static inline bool req_has_async_data(struct io_kiocb *req)
163 return req->flags & REQ_F_ASYNC_DATA;
166 static inline void io_put_file(struct file *file)
172 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx,
173 unsigned issue_flags)
175 lockdep_assert_held(&ctx->uring_lock);
176 if (issue_flags & IO_URING_F_UNLOCKED)
177 mutex_unlock(&ctx->uring_lock);
180 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx,
181 unsigned issue_flags)
184 * "Normal" inline submissions always hold the uring_lock, since we
185 * grab it from the system call. Same is true for the SQPOLL offload.
186 * The only exception is when we've detached the request and issue it
187 * from an async worker thread, grab the lock for that case.
189 if (issue_flags & IO_URING_F_UNLOCKED)
190 mutex_lock(&ctx->uring_lock);
191 lockdep_assert_held(&ctx->uring_lock);
194 static inline void io_commit_cqring(struct io_ring_ctx *ctx)
196 /* order cqe stores with ring update */
197 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
200 static inline void io_cqring_wake(struct io_ring_ctx *ctx)
203 * wake_up_all() may seem excessive, but io_wake_function() and
204 * io_should_wake() handle the termination of the loop and only
205 * wake as many waiters as we need to.
207 if (wq_has_sleeper(&ctx->cq_wait))
208 wake_up_all(&ctx->cq_wait);
211 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
213 struct io_rings *r = ctx->rings;
215 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries;
218 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
220 struct io_rings *rings = ctx->rings;
222 /* make sure SQ entry isn't read before tail */
223 return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
226 static inline int io_run_task_work(void)
228 if (test_thread_flag(TIF_NOTIFY_SIGNAL)) {
229 __set_current_state(TASK_RUNNING);
230 clear_notify_signal();
231 if (task_work_pending(current))
239 static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
241 return test_thread_flag(TIF_NOTIFY_SIGNAL) ||
242 !wq_list_empty(&ctx->work_llist);
245 static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
250 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
251 ret = io_run_local_work(ctx);
253 /* want to run this after in case more is added */
254 ret2 = io_run_task_work();
256 /* Try propagate error in favour of if tasks were run,
257 * but still make sure to run them if requested
265 static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
268 mutex_lock(&ctx->uring_lock);
274 * Don't complete immediately but use deferred completion infrastructure.
275 * Protected by ->uring_lock and can only be used either with
276 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex.
278 static inline void io_req_complete_defer(struct io_kiocb *req)
279 __must_hold(&req->ctx->uring_lock)
281 struct io_submit_state *state = &req->ctx->submit_state;
283 lockdep_assert_held(&req->ctx->uring_lock);
285 wq_list_add_tail(&req->comp_list, &state->compl_reqs);
288 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx)
290 if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd))
291 __io_commit_cqring_flush(ctx);
294 /* must to be called somewhat shortly after putting a request */
295 static inline void io_put_task(struct task_struct *task, int nr)
297 if (likely(task == current))
298 task->io_uring->cached_refs += nr;
300 __io_put_task(task, nr);
303 static inline void io_get_task_refs(int nr)
305 struct io_uring_task *tctx = current->io_uring;
307 tctx->cached_refs -= nr;
308 if (unlikely(tctx->cached_refs < 0))
309 io_task_refs_refill(tctx);
312 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx)
314 return !ctx->submit_state.free_list.next;
317 static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx)
319 if (unlikely(io_req_cache_empty(ctx)))
320 return __io_alloc_req_refill(ctx);
324 static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
326 struct io_wq_work_node *node;
328 node = wq_stack_extract(&ctx->submit_state.free_list);
329 return container_of(node, struct io_kiocb, comp_list);