return IOU_ISSUE_SKIP_COMPLETE;
}
+static __maybe_unused int io_eopnotsupp_prep(struct io_kiocb *kiocb,
+ const struct io_uring_sqe *sqe)
+{
+ return -EOPNOTSUPP;
+}
+
+#if defined(CONFIG_EPOLL)
static int io_epoll_ctl_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
-#if defined(CONFIG_EPOLL)
struct io_epoll *epoll = io_kiocb_to_cmd(req);
if (sqe->buf_index || sqe->splice_fd_in)
}
return 0;
-#else
- return -EOPNOTSUPP;
-#endif
}
static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
{
-#if defined(CONFIG_EPOLL)
struct io_epoll *ie = io_kiocb_to_cmd(req);
int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
req_set_fail(req);
io_req_set_res(req, ret, 0);
return IOU_OK;
-#else
- return -EOPNOTSUPP;
-#endif
}
+#endif
static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
[IORING_OP_EPOLL_CTL] = {
.unbound_nonreg_file = 1,
.audit_skip = 1,
+#if defined(CONFIG_EPOLL)
.prep = io_epoll_ctl_prep,
.issue = io_epoll_ctl,
+#else
+ .prep = io_eopnotsupp_prep,
+#endif
},
[IORING_OP_SPLICE] = {
.needs_file = 1,
for (i = 0; i < ARRAY_SIZE(io_op_defs); i++) {
BUG_ON(!io_op_defs[i].prep);
- BUG_ON(!io_op_defs[i].issue);
+ if (io_op_defs[i].prep != io_eopnotsupp_prep)
+ BUG_ON(!io_op_defs[i].issue);
}
req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |