1 /* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
3 * Header file for the io_uring interface.
5 * Copyright (C) 2019 Jens Axboe
6 * Copyright (C) 2019 Christoph Hellwig
8 #ifndef LINUX_IO_URING_H
9 #define LINUX_IO_URING_H
12 #include <linux/types.h>
14 * this file is shared with liburing and that has to autodetect
15 * if linux/time_types.h is available or not, it can
16 * define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
17 * if linux/time_types.h is not available
19 #ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
20 #include <linux/time_types.h>
28 * IO submission data structure (Submission Queue Entry)
31 __u8 opcode; /* type of operation for this sqe */
32 __u8 flags; /* IOSQE_ flags */
33 __u16 ioprio; /* ioprio for the request */
34 __s32 fd; /* file descriptor to do IO on */
36 __u64 off; /* offset into file */
44 __u64 addr; /* pointer to buffer or iovecs */
47 __u32 len; /* buffer size or number of iovecs */
49 __kernel_rwf_t rw_flags;
51 __u16 poll_events; /* compatibility */
52 __u32 poll32_events; /* word-reversed for BE */
53 __u32 sync_range_flags;
67 __u32 uring_cmd_flags;
69 __u64 user_data; /* data to be passed back at completion time */
70 /* pack this to avoid bogus arm OABI complaints */
72 /* index into fixed buffers, if used */
74 /* for grouped buffer selection */
76 } __attribute__((packed));
77 /* personality to use, if used */
93 * If the ring is initialized with IORING_SETUP_SQE128, then
94 * this field is used for 80 bytes of arbitrary command data
101 * If sqe->file_index is set to this for opcodes that instantiate a new
102 * direct descriptor (like openat/openat2/accept), then io_uring will allocate
103 * an available direct descriptor instead of having the application pass one
104 * in. The picked direct descriptor will be returned in cqe->res, or -ENFILE
105 * if the space is full.
107 #define IORING_FILE_INDEX_ALLOC (~0U)
110 IOSQE_FIXED_FILE_BIT,
113 IOSQE_IO_HARDLINK_BIT,
115 IOSQE_BUFFER_SELECT_BIT,
116 IOSQE_CQE_SKIP_SUCCESS_BIT,
122 /* use fixed fileset */
123 #define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT)
124 /* issue after inflight IO */
125 #define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT)
127 #define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT)
128 /* like LINK, but stronger */
129 #define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT)
130 /* always go async */
131 #define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
132 /* select buffer from sqe->buf_group */
133 #define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT)
134 /* don't post CQE if request succeeded */
135 #define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT)
138 * io_uring_setup() flags
140 #define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */
141 #define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */
142 #define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */
143 #define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
144 #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */
145 #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
146 #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */
147 #define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */
149 * Cooperative task running. When requests complete, they often require
150 * forcing the submitter to transition to the kernel to complete. If this
151 * flag is set, work will be done when the task transitions anyway, rather
152 * than force an inter-processor interrupt reschedule. This avoids interrupting
153 * a task running in userspace, and saves an IPI.
155 #define IORING_SETUP_COOP_TASKRUN (1U << 8)
157 * If COOP_TASKRUN is set, get notified if task work is available for
158 * running and a kernel transition would be needed to run it. This sets
159 * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
161 #define IORING_SETUP_TASKRUN_FLAG (1U << 9)
162 #define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */
163 #define IORING_SETUP_CQE32 (1U << 11) /* CQEs are 32 byte */
165 * Only one task is allowed to submit requests
167 #define IORING_SETUP_SINGLE_ISSUER (1U << 12)
170 * Defer running task work to get events.
171 * Rather than running bits of task work whenever the task transitions
172 * try to do it just before it is needed.
174 #define IORING_SETUP_DEFER_TASKRUN (1U << 13)
177 * Application provides the memory for the rings
179 #define IORING_SETUP_NO_MMAP (1U << 14)
182 * Register the ring fd in itself for use with
183 * IORING_REGISTER_USE_REGISTERED_RING; return a registered fd index rather
186 #define IORING_SETUP_REGISTERED_FD_ONLY (1U << 15)
193 IORING_OP_READ_FIXED,
194 IORING_OP_WRITE_FIXED,
196 IORING_OP_POLL_REMOVE,
197 IORING_OP_SYNC_FILE_RANGE,
201 IORING_OP_TIMEOUT_REMOVE,
203 IORING_OP_ASYNC_CANCEL,
204 IORING_OP_LINK_TIMEOUT,
209 IORING_OP_FILES_UPDATE,
220 IORING_OP_PROVIDE_BUFFERS,
221 IORING_OP_REMOVE_BUFFERS,
237 IORING_OP_SENDMSG_ZC,
239 /* this goes last, obviously */
244 * sqe->uring_cmd_flags
245 * IORING_URING_CMD_FIXED use registered buffer; pass this flag
246 * along with setting sqe->buf_index.
247 * IORING_URING_CMD_POLLED driver use only
249 #define IORING_URING_CMD_FIXED (1U << 0)
250 #define IORING_URING_CMD_POLLED (1U << 31)
256 #define IORING_FSYNC_DATASYNC (1U << 0)
261 #define IORING_TIMEOUT_ABS (1U << 0)
262 #define IORING_TIMEOUT_UPDATE (1U << 1)
263 #define IORING_TIMEOUT_BOOTTIME (1U << 2)
264 #define IORING_TIMEOUT_REALTIME (1U << 3)
265 #define IORING_LINK_TIMEOUT_UPDATE (1U << 4)
266 #define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5)
267 #define IORING_TIMEOUT_MULTISHOT (1U << 6)
268 #define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
269 #define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
272 * extends splice(2) flags
274 #define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */
277 * POLL_ADD flags. Note that since sqe->poll_events is the flag space, the
278 * command flags for POLL_ADD are stored in sqe->len.
280 * IORING_POLL_ADD_MULTI Multishot poll. Sets IORING_CQE_F_MORE if
281 * the poll handler will continue to report
282 * CQEs on behalf of the same SQE.
284 * IORING_POLL_UPDATE Update existing poll request, matching
285 * sqe->addr as the old user_data field.
287 * IORING_POLL_LEVEL Level triggered poll.
289 #define IORING_POLL_ADD_MULTI (1U << 0)
290 #define IORING_POLL_UPDATE_EVENTS (1U << 1)
291 #define IORING_POLL_UPDATE_USER_DATA (1U << 2)
292 #define IORING_POLL_ADD_LEVEL (1U << 3)
295 * ASYNC_CANCEL flags.
297 * IORING_ASYNC_CANCEL_ALL Cancel all requests that match the given key
298 * IORING_ASYNC_CANCEL_FD Key off 'fd' for cancelation rather than the
299 * request 'user_data'
300 * IORING_ASYNC_CANCEL_ANY Match any request
301 * IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor
303 #define IORING_ASYNC_CANCEL_ALL (1U << 0)
304 #define IORING_ASYNC_CANCEL_FD (1U << 1)
305 #define IORING_ASYNC_CANCEL_ANY (1U << 2)
306 #define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3)
309 * send/sendmsg and recv/recvmsg flags (sqe->ioprio)
311 * IORING_RECVSEND_POLL_FIRST If set, instead of first attempting to send
312 * or receive and arm poll if that yields an
313 * -EAGAIN result, arm poll upfront and skip
314 * the initial transfer attempt.
316 * IORING_RECV_MULTISHOT Multishot recv. Sets IORING_CQE_F_MORE if
317 * the handler will continue to report
318 * CQEs on behalf of the same SQE.
320 * IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
321 * the buf_index field.
323 * IORING_SEND_ZC_REPORT_USAGE
324 * If set, SEND[MSG]_ZC should report
325 * the zerocopy usage in cqe.res
326 * for the IORING_CQE_F_NOTIF cqe.
327 * 0 is reported if zerocopy was actually possible.
328 * IORING_NOTIF_USAGE_ZC_COPIED if data was copied
329 * (at least partially).
331 #define IORING_RECVSEND_POLL_FIRST (1U << 0)
332 #define IORING_RECV_MULTISHOT (1U << 1)
333 #define IORING_RECVSEND_FIXED_BUF (1U << 2)
334 #define IORING_SEND_ZC_REPORT_USAGE (1U << 3)
337 * cqe.res for IORING_CQE_F_NOTIF if
338 * IORING_SEND_ZC_REPORT_USAGE was requested
340 * It should be treated as a flag, all other
341 * bits of cqe.res should be treated as reserved!
343 #define IORING_NOTIF_USAGE_ZC_COPIED (1U << 31)
346 * accept flags stored in sqe->ioprio
348 #define IORING_ACCEPT_MULTISHOT (1U << 0)
351 * IORING_OP_MSG_RING command types, stored in sqe->addr
354 IORING_MSG_DATA, /* pass sqe->len as 'res' and off as user_data */
355 IORING_MSG_SEND_FD, /* send a registered fd to another ring */
359 * IORING_OP_MSG_RING flags (sqe->msg_ring_flags)
361 * IORING_MSG_RING_CQE_SKIP Don't post a CQE to the target ring. Not
362 * applicable for IORING_MSG_DATA, obviously.
364 #define IORING_MSG_RING_CQE_SKIP (1U << 0)
365 /* Pass through the flags from sqe->file_index to cqe->flags */
366 #define IORING_MSG_RING_FLAGS_PASS (1U << 1)
369 * IO completion data structure (Completion Queue Entry)
371 struct io_uring_cqe {
372 __u64 user_data; /* sqe->data submission passed back */
373 __s32 res; /* result code for this event */
377 * If the ring is initialized with IORING_SETUP_CQE32, then this field
378 * contains 16-bytes of padding, doubling the size of the CQE.
386 * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
387 * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
388 * IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
389 * IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
392 #define IORING_CQE_F_BUFFER (1U << 0)
393 #define IORING_CQE_F_MORE (1U << 1)
394 #define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
395 #define IORING_CQE_F_NOTIF (1U << 3)
398 IORING_CQE_BUFFER_SHIFT = 16,
402 * Magic offsets for the application to mmap the data it needs
404 #define IORING_OFF_SQ_RING 0ULL
405 #define IORING_OFF_CQ_RING 0x8000000ULL
406 #define IORING_OFF_SQES 0x10000000ULL
407 #define IORING_OFF_PBUF_RING 0x80000000ULL
408 #define IORING_OFF_PBUF_SHIFT 16
409 #define IORING_OFF_MMAP_MASK 0xf8000000ULL
412 * Filled with the offset for mmap(2)
414 struct io_sqring_offsets {
429 #define IORING_SQ_NEED_WAKEUP (1U << 0) /* needs io_uring_enter wakeup */
430 #define IORING_SQ_CQ_OVERFLOW (1U << 1) /* CQ ring is overflown */
431 #define IORING_SQ_TASKRUN (1U << 2) /* task should enter the kernel */
433 struct io_cqring_offsets {
449 /* disable eventfd notifications */
450 #define IORING_CQ_EVENTFD_DISABLED (1U << 0)
453 * io_uring_enter(2) flags
455 #define IORING_ENTER_GETEVENTS (1U << 0)
456 #define IORING_ENTER_SQ_WAKEUP (1U << 1)
457 #define IORING_ENTER_SQ_WAIT (1U << 2)
458 #define IORING_ENTER_EXT_ARG (1U << 3)
459 #define IORING_ENTER_REGISTERED_RING (1U << 4)
462 * Passed in for io_uring_setup(2). Copied back with updated info on success
464 struct io_uring_params {
469 __u32 sq_thread_idle;
473 struct io_sqring_offsets sq_off;
474 struct io_cqring_offsets cq_off;
478 * io_uring_params->features flags
480 #define IORING_FEAT_SINGLE_MMAP (1U << 0)
481 #define IORING_FEAT_NODROP (1U << 1)
482 #define IORING_FEAT_SUBMIT_STABLE (1U << 2)
483 #define IORING_FEAT_RW_CUR_POS (1U << 3)
484 #define IORING_FEAT_CUR_PERSONALITY (1U << 4)
485 #define IORING_FEAT_FAST_POLL (1U << 5)
486 #define IORING_FEAT_POLL_32BITS (1U << 6)
487 #define IORING_FEAT_SQPOLL_NONFIXED (1U << 7)
488 #define IORING_FEAT_EXT_ARG (1U << 8)
489 #define IORING_FEAT_NATIVE_WORKERS (1U << 9)
490 #define IORING_FEAT_RSRC_TAGS (1U << 10)
491 #define IORING_FEAT_CQE_SKIP (1U << 11)
492 #define IORING_FEAT_LINKED_FILE (1U << 12)
493 #define IORING_FEAT_REG_REG_RING (1U << 13)
496 * io_uring_register(2) opcodes and arguments
499 IORING_REGISTER_BUFFERS = 0,
500 IORING_UNREGISTER_BUFFERS = 1,
501 IORING_REGISTER_FILES = 2,
502 IORING_UNREGISTER_FILES = 3,
503 IORING_REGISTER_EVENTFD = 4,
504 IORING_UNREGISTER_EVENTFD = 5,
505 IORING_REGISTER_FILES_UPDATE = 6,
506 IORING_REGISTER_EVENTFD_ASYNC = 7,
507 IORING_REGISTER_PROBE = 8,
508 IORING_REGISTER_PERSONALITY = 9,
509 IORING_UNREGISTER_PERSONALITY = 10,
510 IORING_REGISTER_RESTRICTIONS = 11,
511 IORING_REGISTER_ENABLE_RINGS = 12,
513 /* extended with tagging */
514 IORING_REGISTER_FILES2 = 13,
515 IORING_REGISTER_FILES_UPDATE2 = 14,
516 IORING_REGISTER_BUFFERS2 = 15,
517 IORING_REGISTER_BUFFERS_UPDATE = 16,
519 /* set/clear io-wq thread affinities */
520 IORING_REGISTER_IOWQ_AFF = 17,
521 IORING_UNREGISTER_IOWQ_AFF = 18,
523 /* set/get max number of io-wq workers */
524 IORING_REGISTER_IOWQ_MAX_WORKERS = 19,
526 /* register/unregister io_uring fd with the ring */
527 IORING_REGISTER_RING_FDS = 20,
528 IORING_UNREGISTER_RING_FDS = 21,
530 /* register ring based provide buffer group */
531 IORING_REGISTER_PBUF_RING = 22,
532 IORING_UNREGISTER_PBUF_RING = 23,
534 /* sync cancelation API */
535 IORING_REGISTER_SYNC_CANCEL = 24,
537 /* register a range of fixed file slots for automatic slot allocation */
538 IORING_REGISTER_FILE_ALLOC_RANGE = 25,
541 IORING_REGISTER_LAST,
543 /* flag added to the opcode to use a registered ring fd */
544 IORING_REGISTER_USE_REGISTERED_RING = 1U << 31
547 /* io-wq worker categories */
553 /* deprecated, see struct io_uring_rsrc_update */
554 struct io_uring_files_update {
557 __aligned_u64 /* __s32 * */ fds;
561 * Register a fully sparse file space, rather than pass in an array of all
562 * -1 file descriptors.
564 #define IORING_RSRC_REGISTER_SPARSE (1U << 0)
566 struct io_uring_rsrc_register {
574 struct io_uring_rsrc_update {
580 struct io_uring_rsrc_update2 {
589 /* Skip updating fd indexes set to this value in the fd table */
590 #define IORING_REGISTER_FILES_SKIP (-2)
592 #define IO_URING_OP_SUPPORTED (1U << 0)
594 struct io_uring_probe_op {
597 __u16 flags; /* IO_URING_OP_* flags */
601 struct io_uring_probe {
602 __u8 last_op; /* last opcode supported */
603 __u8 ops_len; /* length of ops[] array below */
606 struct io_uring_probe_op ops[];
609 struct io_uring_restriction {
612 __u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
613 __u8 sqe_op; /* IORING_RESTRICTION_SQE_OP */
614 __u8 sqe_flags; /* IORING_RESTRICTION_SQE_FLAGS_* */
620 struct io_uring_buf {
627 struct io_uring_buf_ring {
630 * To avoid spilling into more pages than we need to, the
631 * ring tail is overlaid with the io_uring_buf->resv field.
639 __DECLARE_FLEX_ARRAY(struct io_uring_buf, bufs);
644 * Flags for IORING_REGISTER_PBUF_RING.
646 * IOU_PBUF_RING_MMAP: If set, kernel will allocate the memory for the ring.
647 * The application must not set a ring_addr in struct
648 * io_uring_buf_reg, instead it must subsequently call
649 * mmap(2) with the offset set as:
650 * IORING_OFF_PBUF_RING | (bgid << IORING_OFF_PBUF_SHIFT)
651 * to get a virtual mapping for the ring.
654 IOU_PBUF_RING_MMAP = 1,
657 /* argument for IORING_(UN)REGISTER_PBUF_RING */
658 struct io_uring_buf_reg {
667 * io_uring_restriction->opcode values
670 /* Allow an io_uring_register(2) opcode */
671 IORING_RESTRICTION_REGISTER_OP = 0,
673 /* Allow an sqe opcode */
674 IORING_RESTRICTION_SQE_OP = 1,
676 /* Allow sqe flags */
677 IORING_RESTRICTION_SQE_FLAGS_ALLOWED = 2,
679 /* Require sqe flags (these flags must be set on each submission) */
680 IORING_RESTRICTION_SQE_FLAGS_REQUIRED = 3,
682 IORING_RESTRICTION_LAST
685 struct io_uring_getevents_arg {
693 * Argument for IORING_REGISTER_SYNC_CANCEL
695 struct io_uring_sync_cancel_reg {
699 struct __kernel_timespec timeout;
704 * Argument for IORING_REGISTER_FILE_ALLOC_RANGE
705 * The range is specified as [off, off + len)
707 struct io_uring_file_index_range {
713 struct io_uring_recvmsg_out {