If we defer a request, we can't be reading the opcode again. Ensure that
the user_data and opcode fields are stable. For the user_data we already
have a place for it, for the opcode we can fill a one byte hold and store
that as well. For both of them, assign them when we originally read the
SQE in io_get_sqring(). Any code that uses sqe->opcode or sqe->user_data
is switched to req->opcode and req->user_data.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
bool has_user;
bool in_async;
bool needs_fixed_file;
bool has_user;
bool in_async;
bool needs_fixed_file;
struct io_ring_ctx *ctx;
union {
struct io_ring_ctx *ctx;
union {
-static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
+static inline bool io_req_needs_user(struct io_kiocb *req)
- u8 opcode = READ_ONCE(sqe->opcode);
-
- return !(opcode == IORING_OP_READ_FIXED ||
- opcode == IORING_OP_WRITE_FIXED);
+ return !(req->opcode == IORING_OP_READ_FIXED ||
+ req->opcode == IORING_OP_WRITE_FIXED);
}
static inline bool io_prep_async_work(struct io_kiocb *req,
}
static inline bool io_prep_async_work(struct io_kiocb *req,
bool do_hashed = false;
if (req->sqe) {
bool do_hashed = false;
if (req->sqe) {
- switch (req->sqe->opcode) {
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
/* only regular files should be hashed for writes */
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
/* only regular files should be hashed for writes */
req->work.flags |= IO_WQ_WORK_UNBOUND;
break;
}
req->work.flags |= IO_WQ_WORK_UNBOUND;
break;
}
- if (io_sqe_needs_user(req->sqe))
+ if (io_req_needs_user(req))
req->work.flags |= IO_WQ_WORK_NEEDS_USER;
}
req->work.flags |= IO_WQ_WORK_NEEDS_USER;
}
trace_io_uring_fail_link(req, link);
if ((req->flags & REQ_F_LINK_TIMEOUT) &&
trace_io_uring_fail_link(req, link);
if ((req->flags & REQ_F_LINK_TIMEOUT) &&
- link->sqe->opcode == IORING_OP_LINK_TIMEOUT) {
+ link->opcode == IORING_OP_LINK_TIMEOUT) {
io_link_cancel_timeout(link);
} else {
io_cqring_fill_event(link, -ECANCELED);
io_link_cancel_timeout(link);
} else {
io_cqring_fill_event(link, -ECANCELED);
* for that purpose and instead let the caller pass in the read/write
* flag.
*/
* for that purpose and instead let the caller pass in the read/write
* flag.
*/
- opcode = READ_ONCE(sqe->opcode);
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
*iovec = NULL;
return io_import_fixed(req->ctx, rw, sqe, iter);
if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
*iovec = NULL;
return io_import_fixed(req->ctx, rw, sqe, iter);
struct iov_iter iter;
ssize_t ret;
struct iov_iter iter;
ssize_t ret;
- switch (io->sqe.opcode) {
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
/* ensure prep does right import */
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
/* ensure prep does right import */
static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
static int io_issue_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_ring_ctx *ctx = req->ctx;
- opcode = READ_ONCE(req->sqe->opcode);
- switch (opcode) {
case IORING_OP_NOP:
ret = io_nop(req);
break;
case IORING_OP_NOP:
ret = io_nop(req);
break;
return op >= IORING_OP_NOP && op < IORING_OP_LAST;
}
return op >= IORING_OP_NOP && op < IORING_OP_LAST;
}
-static int io_op_needs_file(const struct io_uring_sqe *sqe)
+static int io_req_needs_file(struct io_kiocb *req)
- int op = READ_ONCE(sqe->opcode);
-
- switch (op) {
case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE:
case IORING_OP_TIMEOUT:
case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE:
case IORING_OP_TIMEOUT:
case IORING_OP_LINK_TIMEOUT:
return 0;
default:
case IORING_OP_LINK_TIMEOUT:
return 0;
default:
- if (io_req_op_valid(op))
+ if (io_req_op_valid(req->opcode))
return 1;
return -EINVAL;
}
return 1;
return -EINVAL;
}
if (flags & IOSQE_IO_DRAIN)
req->flags |= REQ_F_IO_DRAIN;
if (flags & IOSQE_IO_DRAIN)
req->flags |= REQ_F_IO_DRAIN;
- ret = io_op_needs_file(req->sqe);
+ ret = io_req_needs_file(req);
if (ret <= 0)
return ret;
if (ret <= 0)
return ret;
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
link_list);
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
link_list);
- if (!nxt || nxt->sqe->opcode != IORING_OP_LINK_TIMEOUT)
+ if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
return NULL;
req->flags |= REQ_F_LINK_TIMEOUT;
return NULL;
req->flags |= REQ_F_LINK_TIMEOUT;
struct io_ring_ctx *ctx = req->ctx;
int ret;
struct io_ring_ctx *ctx = req->ctx;
int ret;
- req->user_data = req->sqe->user_data;
-
/* enforce forwards compatibility on users */
if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL;
/* enforce forwards compatibility on users */
if (unlikely(req->sqe->flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL;
*/
req->sequence = ctx->cached_sq_head;
req->sqe = &ctx->sq_sqes[head];
*/
req->sequence = ctx->cached_sq_head;
req->sqe = &ctx->sq_sqes[head];
+ req->opcode = READ_ONCE(req->sqe->opcode);
+ req->user_data = READ_ONCE(req->sqe->user_data);
ctx->cached_sq_head++;
return true;
}
ctx->cached_sq_head++;
return true;
}
- if (io_sqe_needs_user(req->sqe) && !*mm) {
+ if (io_req_needs_user(req) && !*mm) {
mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
if (!mm_fault) {
use_mm(ctx->sqo_mm);
mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
if (!mm_fault) {
use_mm(ctx->sqo_mm);
req->has_user = *mm != NULL;
req->in_async = async;
req->needs_fixed_file = async;
req->has_user = *mm != NULL;
req->in_async = async;
req->needs_fixed_file = async;
- trace_io_uring_submit_sqe(ctx, req->sqe->user_data,
- true, async);
+ trace_io_uring_submit_sqe(ctx, req->user_data, true, async);
if (!io_submit_sqe(req, statep, &link))
break;
/*
if (!io_submit_sqe(req, statep, &link))
break;
/*