11 #include "../../include/uapi/linux/io_uring.h"
16 * Library interface to io_uring
22 unsigned *kring_entries;
26 struct io_uring_sqe *sqes;
38 unsigned *kring_entries;
40 struct io_uring_cqe *cqes;
46 struct io_uring_sq sq;
47 struct io_uring_cq cq;
54 extern int io_uring_setup(unsigned entries, struct io_uring_params *p);
55 extern int io_uring_enter(int fd, unsigned to_submit,
56 unsigned min_complete, unsigned flags, sigset_t *sig);
57 extern int io_uring_register(int fd, unsigned int opcode, void *arg,
58 unsigned int nr_args);
63 extern int io_uring_queue_init(unsigned entries, struct io_uring *ring,
65 extern int io_uring_queue_mmap(int fd, struct io_uring_params *p,
66 struct io_uring *ring);
67 extern void io_uring_queue_exit(struct io_uring *ring);
68 extern int io_uring_peek_cqe(struct io_uring *ring,
69 struct io_uring_cqe **cqe_ptr);
70 extern int io_uring_wait_cqe(struct io_uring *ring,
71 struct io_uring_cqe **cqe_ptr);
72 extern int io_uring_submit(struct io_uring *ring);
73 extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring);
76 * Must be called after io_uring_{peek,wait}_cqe() after the cqe has
77 * been processed by the application.
79 static inline void io_uring_cqe_seen(struct io_uring *ring,
80 struct io_uring_cqe *cqe)
83 struct io_uring_cq *cq = &ring->cq;
87 * Ensure that the kernel sees our new head, the kernel has
88 * the matching read barrier.
95 * Command prep helpers
97 static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
99 sqe->user_data = (unsigned long) data;
102 static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
104 return (void *) (uintptr_t) cqe->user_data;
107 static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd,
108 const void *addr, unsigned len,
111 memset(sqe, 0, sizeof(*sqe));
115 sqe->addr = (unsigned long) addr;
119 static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd,
120 const struct iovec *iovecs,
121 unsigned nr_vecs, off_t offset)
123 io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset);
126 static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd,
127 void *buf, unsigned nbytes,
130 io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset);
133 static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd,
134 const struct iovec *iovecs,
135 unsigned nr_vecs, off_t offset)
137 io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset);
140 static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
141 const void *buf, unsigned nbytes,
144 io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset);
147 static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
150 memset(sqe, 0, sizeof(*sqe));
151 sqe->opcode = IORING_OP_POLL_ADD;
153 sqe->poll_events = poll_mask;
156 static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe,
159 memset(sqe, 0, sizeof(*sqe));
160 sqe->opcode = IORING_OP_POLL_REMOVE;
161 sqe->addr = (unsigned long) user_data;
164 static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd,
165 unsigned fsync_flags)
167 memset(sqe, 0, sizeof(*sqe));
168 sqe->opcode = IORING_OP_FSYNC;
170 sqe->fsync_flags = fsync_flags;
173 static inline void io_uring_prep_nop(struct io_uring_sqe *sqe)
175 memset(sqe, 0, sizeof(*sqe));
176 sqe->opcode = IORING_OP_NOP;