1 #ifndef IO_URING_TYPES_H
2 #define IO_URING_TYPES_H
4 #include <linux/blkdev.h>
5 #include <linux/task_work.h>
6 #include <linux/bitmap.h>
7 #include <uapi/linux/io_uring.h>
9 struct io_wq_work_node {
10 struct io_wq_work_node *next;
13 struct io_wq_work_list {
14 struct io_wq_work_node *first;
15 struct io_wq_work_node *last;
19 struct io_wq_work_node list;
21 /* place it here instead of io_kiocb as it fills padding and saves 4B */
25 struct io_fixed_file {
26 /* file * with additional FFS_* flags */
27 unsigned long file_ptr;
30 struct io_file_table {
31 struct io_fixed_file *files;
32 unsigned long *bitmap;
33 unsigned int alloc_hint;
36 struct io_hash_bucket {
38 struct hlist_head list;
39 } ____cacheline_aligned_in_smp;
41 struct io_hash_table {
42 struct io_hash_bucket *hbs;
47 u32 head ____cacheline_aligned_in_smp;
48 u32 tail ____cacheline_aligned_in_smp;
52 * This data is shared with the application through the mmap at offsets
53 * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
55 * The offsets to the member fields are published through struct
56 * io_sqring_offsets when calling io_uring_setup.
60 * Head and tail offsets into the ring; the offsets need to be
61 * masked to get valid indices.
63 * The kernel controls head of the sq ring and the tail of the cq ring,
64 * and the application controls tail of the sq ring and the head of the
67 struct io_uring sq, cq;
69 * Bitmasks to apply to head and tail offsets (constant, equals
72 u32 sq_ring_mask, cq_ring_mask;
73 /* Ring sizes (constant, power of 2) */
74 u32 sq_ring_entries, cq_ring_entries;
76 * Number of invalid entries dropped by the kernel due to
77 * invalid index stored in array
79 * Written by the kernel, shouldn't be modified by the
80 * application (i.e. get number of "new events" by comparing to
83 * After a new SQ head value was read by the application this
84 * counter includes all submissions that were dropped reaching
85 * the new SQ head (and possibly more).
91 * Written by the kernel, shouldn't be modified by the
94 * The application needs a full memory barrier before checking
95 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
101 * Written by the application, shouldn't be modified by the
106 * Number of completion events lost because the queue was full;
107 * this should be avoided by the application by making sure
108 * there are not more requests pending than there is space in
109 * the completion queue.
111 * Written by the kernel, shouldn't be modified by the
112 * application (i.e. get number of "new events" by comparing to
115 * As completion events come in out of order this counter is not
116 * ordered with any other data.
120 * Ring buffer of completion events.
122 * The kernel writes completion events fresh every time they are
123 * produced, so the application is allowed to modify pending
126 struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
129 struct io_restriction {
130 DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
131 DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
132 u8 sqe_flags_allowed;
133 u8 sqe_flags_required;
137 struct io_submit_link {
138 struct io_kiocb *head;
139 struct io_kiocb *last;
142 struct io_submit_state {
143 /* inline/task_work completion list, under ->uring_lock */
144 struct io_wq_work_node free_list;
145 /* batch completion logic */
146 struct io_wq_work_list compl_reqs;
147 struct io_submit_link link;
152 unsigned short submit_nr;
153 struct blk_plug plug;
157 struct eventfd_ctx *cq_ev_fd;
158 unsigned int eventfd_async: 1;
163 /* const or read-mostly hot data */
165 struct percpu_ref refs;
167 struct io_rings *rings;
169 enum task_work_notify_mode notify_method;
170 unsigned int compat: 1;
171 unsigned int drain_next: 1;
172 unsigned int restricted: 1;
173 unsigned int off_timeout_used: 1;
174 unsigned int drain_active: 1;
175 unsigned int drain_disabled: 1;
176 unsigned int has_evfd: 1;
177 unsigned int syscall_iopoll: 1;
178 } ____cacheline_aligned_in_smp;
180 /* submission data */
182 struct mutex uring_lock;
185 * Ring buffer of indices into array of io_uring_sqe, which is
186 * mmapped by the application using the IORING_OFF_SQES offset.
188 * This indirection could e.g. be used to assign fixed
189 * io_uring_sqe entries to operations and only submit them to
190 * the queue when needed.
192 * The kernel modifies neither the indices array nor the entries
196 struct io_uring_sqe *sq_sqes;
197 unsigned cached_sq_head;
201 * Fixed resources fast path, should be accessed only under
202 * uring_lock, and updated through io_uring_register(2)
204 struct io_rsrc_node *rsrc_node;
205 int rsrc_cached_refs;
207 struct io_file_table file_table;
208 unsigned nr_user_files;
209 unsigned nr_user_bufs;
210 struct io_mapped_ubuf **user_bufs;
212 struct io_submit_state submit_state;
214 struct io_buffer_list *io_bl;
215 struct xarray io_bl_xa;
216 struct list_head io_buffers_cache;
218 struct io_hash_table cancel_table_locked;
219 struct list_head cq_overflow_list;
220 struct list_head apoll_cache;
221 struct xarray personalities;
223 } ____cacheline_aligned_in_smp;
225 /* IRQ completion list, under ->completion_lock */
226 struct io_wq_work_list locked_free_list;
227 unsigned int locked_free_nr;
229 const struct cred *sq_creds; /* cred used for __io_sq_thread() */
230 struct io_sq_data *sq_data; /* if using sq thread polling */
232 struct wait_queue_head sqo_sq_wait;
233 struct list_head sqd_list;
235 unsigned long check_cq;
239 * We cache a range of free CQEs we can use, once exhausted it
240 * should go through a slower range setup, see __io_get_cqe()
242 struct io_uring_cqe *cqe_cached;
243 struct io_uring_cqe *cqe_sentinel;
245 unsigned cached_cq_tail;
247 struct io_ev_fd __rcu *io_ev_fd;
248 struct wait_queue_head cq_wait;
250 } ____cacheline_aligned_in_smp;
253 spinlock_t completion_lock;
256 * ->iopoll_list is protected by the ctx->uring_lock for
257 * io_uring instances that don't use IORING_SETUP_SQPOLL.
258 * For SQPOLL, only the single threaded io_sq_thread() will
259 * manipulate the list, hence no extra locking is needed there.
261 struct io_wq_work_list iopoll_list;
262 struct io_hash_table cancel_table;
263 bool poll_multi_queue;
265 struct list_head io_buffers_comp;
266 } ____cacheline_aligned_in_smp;
270 spinlock_t timeout_lock;
271 atomic_t cq_timeouts;
272 struct list_head timeout_list;
273 struct list_head ltimeout_list;
274 unsigned cq_last_tm_flush;
275 } ____cacheline_aligned_in_smp;
277 /* Keep this last, we don't need it for the fast path */
279 struct io_restriction restrictions;
280 struct task_struct *submitter_task;
282 /* slow path rsrc auxilary data, used by update/register */
283 struct io_rsrc_node *rsrc_backup_node;
284 struct io_mapped_ubuf *dummy_ubuf;
285 struct io_rsrc_data *file_data;
286 struct io_rsrc_data *buf_data;
288 struct delayed_work rsrc_put_work;
289 struct llist_head rsrc_put_llist;
290 struct list_head rsrc_ref_list;
291 spinlock_t rsrc_ref_lock;
293 struct list_head io_buffers_pages;
295 #if defined(CONFIG_UNIX)
296 struct socket *ring_sock;
298 /* hashed buffered write serialization */
299 struct io_wq_hash *hash_map;
301 /* Only used for accounting purposes */
302 struct user_struct *user;
303 struct mm_struct *mm_account;
305 /* ctx exit and cancelation */
306 struct llist_head fallback_llist;
307 struct delayed_work fallback_work;
308 struct work_struct exit_work;
309 struct list_head tctx_list;
310 struct completion ref_comp;
312 /* io-wq management, e.g. thread count */
314 bool iowq_limits_set;
316 struct list_head defer_list;
317 unsigned sq_thread_idle;
321 REQ_F_FIXED_FILE_BIT = IOSQE_FIXED_FILE_BIT,
322 REQ_F_IO_DRAIN_BIT = IOSQE_IO_DRAIN_BIT,
323 REQ_F_LINK_BIT = IOSQE_IO_LINK_BIT,
324 REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
325 REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
326 REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
327 REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
329 /* first byte is taken by user flags, shift it to not overlap */
334 REQ_F_LINK_TIMEOUT_BIT,
335 REQ_F_NEED_CLEANUP_BIT,
337 REQ_F_BUFFER_SELECTED_BIT,
338 REQ_F_BUFFER_RING_BIT,
342 REQ_F_ARM_LTIMEOUT_BIT,
343 REQ_F_ASYNC_DATA_BIT,
344 REQ_F_SKIP_LINK_CQES_BIT,
345 REQ_F_SINGLE_POLL_BIT,
346 REQ_F_DOUBLE_POLL_BIT,
347 REQ_F_PARTIAL_IO_BIT,
348 REQ_F_CQE32_INIT_BIT,
349 REQ_F_APOLL_MULTISHOT_BIT,
350 REQ_F_CLEAR_POLLIN_BIT,
351 REQ_F_HASH_LOCKED_BIT,
352 /* keep async read/write and isreg together and in order */
353 REQ_F_SUPPORT_NOWAIT_BIT,
356 /* not a real bit, just to check we're not overflowing the space */
362 REQ_F_FIXED_FILE = BIT(REQ_F_FIXED_FILE_BIT),
363 /* drain existing IO first */
364 REQ_F_IO_DRAIN = BIT(REQ_F_IO_DRAIN_BIT),
366 REQ_F_LINK = BIT(REQ_F_LINK_BIT),
367 /* doesn't sever on completion < 0 */
368 REQ_F_HARDLINK = BIT(REQ_F_HARDLINK_BIT),
370 REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
371 /* IOSQE_BUFFER_SELECT */
372 REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
373 /* IOSQE_CQE_SKIP_SUCCESS */
374 REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
376 /* fail rest of links */
377 REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
378 /* on inflight list, should be cancelled and waited on exit reliably */
379 REQ_F_INFLIGHT = BIT(REQ_F_INFLIGHT_BIT),
380 /* read/write uses file position */
381 REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
382 /* must not punt to workers */
383 REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
384 /* has or had linked timeout */
385 REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
387 REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
388 /* already went through poll handler */
389 REQ_F_POLLED = BIT(REQ_F_POLLED_BIT),
390 /* buffer already selected */
391 REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT),
392 /* buffer selected from ring, needs commit */
393 REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT),
394 /* caller should reissue async */
395 REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT),
396 /* supports async reads/writes */
397 REQ_F_SUPPORT_NOWAIT = BIT(REQ_F_SUPPORT_NOWAIT_BIT),
399 REQ_F_ISREG = BIT(REQ_F_ISREG_BIT),
400 /* has creds assigned */
401 REQ_F_CREDS = BIT(REQ_F_CREDS_BIT),
402 /* skip refcounting if not set */
403 REQ_F_REFCOUNT = BIT(REQ_F_REFCOUNT_BIT),
404 /* there is a linked timeout that has to be armed */
405 REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
406 /* ->async_data allocated */
407 REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
408 /* don't post CQEs while failing linked requests */
409 REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
410 /* single poll may be active */
411 REQ_F_SINGLE_POLL = BIT(REQ_F_SINGLE_POLL_BIT),
412 /* double poll may active */
413 REQ_F_DOUBLE_POLL = BIT(REQ_F_DOUBLE_POLL_BIT),
414 /* request has already done partial IO */
415 REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
416 /* fast poll multishot mode */
417 REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
418 /* ->extra1 and ->extra2 are initialised */
419 REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
420 /* recvmsg special flag, clear EPOLLIN */
421 REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
422 /* hashed into ->cancel_hash_locked, protected by ->uring_lock */
423 REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
426 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
428 struct io_task_work {
430 struct io_wq_work_node node;
431 struct llist_node fallback_node;
433 io_req_tw_func_t func;
439 /* fd initially, then cflags for completion */
447 * Each request type overlays its private data structure on top of this one.
448 * They must not exceed this one in size.
452 /* each command gets 56 bytes of data */
456 #define io_kiocb_to_cmd(req) ((void *) &(req)->cmd)
457 #define cmd_to_io_kiocb(ptr) ((struct io_kiocb *) ptr)
462 * NOTE! Each of the io_kiocb union members has the file pointer
463 * as the first entry in their struct definition. So you can
464 * access the file pointer through any of the sub-structs,
465 * or directly as just 'file' in this struct.
468 struct io_cmd_data cmd;
472 /* polled IO has completed */
475 * Can be either a fixed buffer index, or used with provided buffers.
476 * For the latter, before issue it points to the buffer group ID,
477 * and after selection it points to the buffer ID itself.
484 struct io_ring_ctx *ctx;
485 struct task_struct *task;
487 struct io_rsrc_node *rsrc_node;
490 /* store used ubuf, so we can prevent reloading */
491 struct io_mapped_ubuf *imu;
493 /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
494 struct io_buffer *kbuf;
497 * stores buffer ID for ring provided buffers, valid IFF
498 * REQ_F_BUFFER_RING is set.
500 struct io_buffer_list *buf_list;
504 /* used by request caches, completion batching and iopoll */
505 struct io_wq_work_node comp_list;
506 /* cache ->apoll->events */
507 __poll_t apoll_events;
511 struct io_task_work io_task_work;
512 /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
514 struct hlist_node hash_node;
520 /* internal polling, see IORING_FEAT_FAST_POLL */
521 struct async_poll *apoll;
522 /* opcode allocated if it needs to store data for async defer */
524 /* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
525 struct io_kiocb *link;
526 /* custom credentials, valid IFF REQ_F_CREDS is set */
527 const struct cred *creds;
528 struct io_wq_work work;
531 struct io_overflow_cqe {
532 struct list_head list;
533 struct io_uring_cqe cqe;