1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
18 #if defined(CONFIG_NET)
26 struct sockaddr __user *addr;
45 struct sockaddr __user *addr;
52 struct compat_msghdr __user *umsg_compat;
53 struct user_msghdr __user *umsg;
62 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
64 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
66 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
68 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
69 sqe->buf_index || sqe->splice_fd_in))
72 shutdown->how = READ_ONCE(sqe->len);
76 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
78 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
82 if (issue_flags & IO_URING_F_NONBLOCK)
85 sock = sock_from_file(req->file);
89 ret = __sys_shutdown_sock(sock, shutdown->how);
90 io_req_set_res(req, ret, 0);
94 static bool io_net_retry(struct socket *sock, int flags)
96 if (!(flags & MSG_WAITALL))
98 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
101 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
103 struct io_async_msghdr *hdr = req->async_data;
105 if (!hdr || issue_flags & IO_URING_F_UNLOCKED)
108 /* Let normal cleanup path reap it if we fail adding to the cache */
109 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
110 req->async_data = NULL;
111 req->flags &= ~REQ_F_ASYNC_DATA;
115 static struct io_async_msghdr *io_recvmsg_alloc_async(struct io_kiocb *req,
116 unsigned int issue_flags)
118 struct io_ring_ctx *ctx = req->ctx;
119 struct io_cache_entry *entry;
121 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
122 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
123 struct io_async_msghdr *hdr;
125 hdr = container_of(entry, struct io_async_msghdr, cache);
126 req->flags |= REQ_F_ASYNC_DATA;
127 req->async_data = hdr;
131 if (!io_alloc_async_data(req))
132 return req->async_data;
137 static int io_setup_async_msg(struct io_kiocb *req,
138 struct io_async_msghdr *kmsg,
139 unsigned int issue_flags)
141 struct io_async_msghdr *async_msg = req->async_data;
145 async_msg = io_recvmsg_alloc_async(req, issue_flags);
147 kfree(kmsg->free_iov);
150 req->flags |= REQ_F_NEED_CLEANUP;
151 memcpy(async_msg, kmsg, sizeof(*kmsg));
152 async_msg->msg.msg_name = &async_msg->addr;
153 /* if were using fast_iov, set it to the new one */
154 if (!async_msg->free_iov)
155 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
160 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
161 struct io_async_msghdr *iomsg)
163 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
165 iomsg->msg.msg_name = &iomsg->addr;
166 iomsg->free_iov = iomsg->fast_iov;
167 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
171 int io_sendmsg_prep_async(struct io_kiocb *req)
175 ret = io_sendmsg_copy_hdr(req, req->async_data);
177 req->flags |= REQ_F_NEED_CLEANUP;
181 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
183 struct io_async_msghdr *io = req->async_data;
188 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
190 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
192 if (unlikely(sqe->file_index || sqe->addr2))
195 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
196 sr->len = READ_ONCE(sqe->len);
197 sr->flags = READ_ONCE(sqe->ioprio);
198 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
200 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
201 if (sr->msg_flags & MSG_DONTWAIT)
202 req->flags |= REQ_F_NOWAIT;
205 if (req->ctx->compat)
206 sr->msg_flags |= MSG_CMSG_COMPAT;
212 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
214 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
215 struct io_async_msghdr iomsg, *kmsg;
221 sock = sock_from_file(req->file);
225 if (req_has_async_data(req)) {
226 kmsg = req->async_data;
228 ret = io_sendmsg_copy_hdr(req, &iomsg);
234 if (!(req->flags & REQ_F_POLLED) &&
235 (sr->flags & IORING_RECVSEND_POLL_FIRST))
236 return io_setup_async_msg(req, kmsg, issue_flags);
238 flags = sr->msg_flags;
239 if (issue_flags & IO_URING_F_NONBLOCK)
240 flags |= MSG_DONTWAIT;
241 if (flags & MSG_WAITALL)
242 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
244 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
247 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
248 return io_setup_async_msg(req, kmsg, issue_flags);
249 if (ret == -ERESTARTSYS)
251 if (ret > 0 && io_net_retry(sock, flags)) {
253 req->flags |= REQ_F_PARTIAL_IO;
254 return io_setup_async_msg(req, kmsg, issue_flags);
258 /* fast path, check for non-NULL to avoid function call */
260 kfree(kmsg->free_iov);
261 req->flags &= ~REQ_F_NEED_CLEANUP;
262 io_netmsg_recycle(req, issue_flags);
265 else if (sr->done_io)
267 io_req_set_res(req, ret, 0);
271 int io_send(struct io_kiocb *req, unsigned int issue_flags)
273 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
281 if (!(req->flags & REQ_F_POLLED) &&
282 (sr->flags & IORING_RECVSEND_POLL_FIRST))
285 sock = sock_from_file(req->file);
289 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
294 msg.msg_control = NULL;
295 msg.msg_controllen = 0;
299 flags = sr->msg_flags;
300 if (issue_flags & IO_URING_F_NONBLOCK)
301 flags |= MSG_DONTWAIT;
302 if (flags & MSG_WAITALL)
303 min_ret = iov_iter_count(&msg.msg_iter);
305 msg.msg_flags = flags;
306 ret = sock_sendmsg(sock, &msg);
308 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
310 if (ret == -ERESTARTSYS)
312 if (ret > 0 && io_net_retry(sock, flags)) {
316 req->flags |= REQ_F_PARTIAL_IO;
323 else if (sr->done_io)
325 io_req_set_res(req, ret, 0);
329 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
333 if (iomsg->namelen < 0)
335 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
336 iomsg->namelen, &hdr))
338 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
344 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
345 struct io_async_msghdr *iomsg)
347 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
348 struct user_msghdr msg;
351 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
354 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
358 if (req->flags & REQ_F_BUFFER_SELECT) {
359 if (msg.msg_iovlen == 0) {
360 sr->len = iomsg->fast_iov[0].iov_len = 0;
361 iomsg->fast_iov[0].iov_base = NULL;
362 iomsg->free_iov = NULL;
363 } else if (msg.msg_iovlen > 1) {
366 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
368 sr->len = iomsg->fast_iov[0].iov_len;
369 iomsg->free_iov = NULL;
372 if (req->flags & REQ_F_APOLL_MULTISHOT) {
373 iomsg->namelen = msg.msg_namelen;
374 iomsg->controllen = msg.msg_controllen;
375 if (io_recvmsg_multishot_overflow(iomsg))
379 iomsg->free_iov = iomsg->fast_iov;
380 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
381 &iomsg->free_iov, &iomsg->msg.msg_iter,
391 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
392 struct io_async_msghdr *iomsg)
394 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
395 struct compat_msghdr msg;
396 struct compat_iovec __user *uiov;
399 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
402 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
406 uiov = compat_ptr(msg.msg_iov);
407 if (req->flags & REQ_F_BUFFER_SELECT) {
410 if (msg.msg_iovlen == 0) {
412 iomsg->free_iov = NULL;
413 } else if (msg.msg_iovlen > 1) {
416 if (!access_ok(uiov, sizeof(*uiov)))
418 if (__get_user(clen, &uiov->iov_len))
423 iomsg->free_iov = NULL;
426 if (req->flags & REQ_F_APOLL_MULTISHOT) {
427 iomsg->namelen = msg.msg_namelen;
428 iomsg->controllen = msg.msg_controllen;
429 if (io_recvmsg_multishot_overflow(iomsg))
433 iomsg->free_iov = iomsg->fast_iov;
434 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
435 UIO_FASTIOV, &iomsg->free_iov,
436 &iomsg->msg.msg_iter, true);
445 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
446 struct io_async_msghdr *iomsg)
448 iomsg->msg.msg_name = &iomsg->addr;
451 if (req->ctx->compat)
452 return __io_compat_recvmsg_copy_hdr(req, iomsg);
455 return __io_recvmsg_copy_hdr(req, iomsg);
458 int io_recvmsg_prep_async(struct io_kiocb *req)
462 ret = io_recvmsg_copy_hdr(req, req->async_data);
464 req->flags |= REQ_F_NEED_CLEANUP;
468 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
470 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
472 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
474 if (unlikely(sqe->file_index || sqe->addr2))
477 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
478 sr->len = READ_ONCE(sqe->len);
479 sr->flags = READ_ONCE(sqe->ioprio);
480 if (sr->flags & ~(RECVMSG_FLAGS))
482 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
483 if (sr->msg_flags & MSG_DONTWAIT)
484 req->flags |= REQ_F_NOWAIT;
485 if (sr->msg_flags & MSG_ERRQUEUE)
486 req->flags |= REQ_F_CLEAR_POLLIN;
487 if (sr->flags & IORING_RECV_MULTISHOT) {
488 if (!(req->flags & REQ_F_BUFFER_SELECT))
490 if (sr->msg_flags & MSG_WAITALL)
492 if (req->opcode == IORING_OP_RECV && sr->len)
494 req->flags |= REQ_F_APOLL_MULTISHOT;
498 if (req->ctx->compat)
499 sr->msg_flags |= MSG_CMSG_COMPAT;
505 static inline void io_recv_prep_retry(struct io_kiocb *req)
507 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
510 sr->len = 0; /* get from the provided buffer */
514 * Finishes io_recv and io_recvmsg.
516 * Returns true if it is actually finished, or false if it should run
517 * again (for multishot).
519 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
520 unsigned int cflags, bool mshot_finished)
522 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
523 io_req_set_res(req, *ret, cflags);
528 if (!mshot_finished) {
529 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
530 cflags | IORING_CQE_F_MORE, false)) {
531 io_recv_prep_retry(req);
535 * Otherwise stop multishot but use the current result.
536 * Probably will end up going into overflow, but this means
537 * we cannot trust the ordering anymore
541 io_req_set_res(req, *ret, cflags);
543 if (req->flags & REQ_F_POLLED)
544 *ret = IOU_STOP_MULTISHOT;
550 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
551 struct io_sr_msg *sr, void __user **buf,
554 unsigned long ubuf = (unsigned long) *buf;
557 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
562 if (kmsg->controllen) {
563 unsigned long control = ubuf + hdr - kmsg->controllen;
565 kmsg->msg.msg_control_user = (void *) control;
566 kmsg->msg.msg_controllen = kmsg->controllen;
569 sr->buf = *buf; /* stash for later copy */
570 *buf = (void *) (ubuf + hdr);
571 kmsg->payloadlen = *len = *len - hdr;
575 struct io_recvmsg_multishot_hdr {
576 struct io_uring_recvmsg_out msg;
577 struct sockaddr_storage addr;
580 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
581 struct io_async_msghdr *kmsg,
582 unsigned int flags, bool *finished)
586 struct io_recvmsg_multishot_hdr hdr;
589 kmsg->msg.msg_name = &hdr.addr;
590 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
591 kmsg->msg.msg_namelen = 0;
593 if (sock->file->f_flags & O_NONBLOCK)
594 flags |= MSG_DONTWAIT;
596 err = sock_recvmsg(sock, &kmsg->msg, flags);
597 *finished = err <= 0;
601 hdr.msg = (struct io_uring_recvmsg_out) {
602 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
603 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
606 hdr.msg.payloadlen = err;
607 if (err > kmsg->payloadlen)
608 err = kmsg->payloadlen;
610 copy_len = sizeof(struct io_uring_recvmsg_out);
611 if (kmsg->msg.msg_namelen > kmsg->namelen)
612 copy_len += kmsg->namelen;
614 copy_len += kmsg->msg.msg_namelen;
617 * "fromlen shall refer to the value before truncation.."
620 hdr.msg.namelen = kmsg->msg.msg_namelen;
622 /* ensure that there is no gap between hdr and sockaddr_storage */
623 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
624 sizeof(struct io_uring_recvmsg_out));
625 if (copy_to_user(io->buf, &hdr, copy_len)) {
630 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
631 kmsg->controllen + err;
634 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
636 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
637 struct io_async_msghdr iomsg, *kmsg;
641 int ret, min_ret = 0;
642 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
643 bool mshot_finished = true;
645 sock = sock_from_file(req->file);
649 if (req_has_async_data(req)) {
650 kmsg = req->async_data;
652 ret = io_recvmsg_copy_hdr(req, &iomsg);
658 if (!(req->flags & REQ_F_POLLED) &&
659 (sr->flags & IORING_RECVSEND_POLL_FIRST))
660 return io_setup_async_msg(req, kmsg, issue_flags);
663 if (io_do_buffer_select(req)) {
665 size_t len = sr->len;
667 buf = io_buffer_select(req, &len, issue_flags);
671 if (req->flags & REQ_F_APOLL_MULTISHOT) {
672 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
674 io_kbuf_recycle(req, issue_flags);
679 kmsg->fast_iov[0].iov_base = buf;
680 kmsg->fast_iov[0].iov_len = len;
681 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
685 flags = sr->msg_flags;
687 flags |= MSG_DONTWAIT;
688 if (flags & MSG_WAITALL)
689 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
691 kmsg->msg.msg_get_inq = 1;
692 if (req->flags & REQ_F_APOLL_MULTISHOT)
693 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
696 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
700 if (ret == -EAGAIN && force_nonblock) {
701 ret = io_setup_async_msg(req, kmsg, issue_flags);
702 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
703 IO_APOLL_MULTI_POLLED) {
704 io_kbuf_recycle(req, issue_flags);
705 return IOU_ISSUE_SKIP_COMPLETE;
709 if (ret == -ERESTARTSYS)
711 if (ret > 0 && io_net_retry(sock, flags)) {
713 req->flags |= REQ_F_PARTIAL_IO;
714 return io_setup_async_msg(req, kmsg, issue_flags);
717 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
723 else if (sr->done_io)
726 io_kbuf_recycle(req, issue_flags);
728 cflags = io_put_kbuf(req, issue_flags);
729 if (kmsg->msg.msg_inq)
730 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
732 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
733 goto retry_multishot;
735 if (mshot_finished) {
736 io_netmsg_recycle(req, issue_flags);
737 /* fast path, check for non-NULL to avoid function call */
739 kfree(kmsg->free_iov);
740 req->flags &= ~REQ_F_NEED_CLEANUP;
746 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
748 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
754 int ret, min_ret = 0;
755 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
756 size_t len = sr->len;
758 if (!(req->flags & REQ_F_POLLED) &&
759 (sr->flags & IORING_RECVSEND_POLL_FIRST))
762 sock = sock_from_file(req->file);
767 if (io_do_buffer_select(req)) {
770 buf = io_buffer_select(req, &len, issue_flags);
776 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
782 msg.msg_control = NULL;
785 msg.msg_controllen = 0;
789 flags = sr->msg_flags;
791 flags |= MSG_DONTWAIT;
792 if (flags & MSG_WAITALL)
793 min_ret = iov_iter_count(&msg.msg_iter);
795 ret = sock_recvmsg(sock, &msg, flags);
797 if (ret == -EAGAIN && force_nonblock) {
798 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
799 io_kbuf_recycle(req, issue_flags);
800 return IOU_ISSUE_SKIP_COMPLETE;
805 if (ret == -ERESTARTSYS)
807 if (ret > 0 && io_net_retry(sock, flags)) {
811 req->flags |= REQ_F_PARTIAL_IO;
815 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
822 else if (sr->done_io)
825 io_kbuf_recycle(req, issue_flags);
827 cflags = io_put_kbuf(req, issue_flags);
829 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
831 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
832 goto retry_multishot;
837 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
839 struct io_accept *accept = io_kiocb_to_cmd(req);
842 if (sqe->len || sqe->buf_index)
845 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
846 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
847 accept->flags = READ_ONCE(sqe->accept_flags);
848 accept->nofile = rlimit(RLIMIT_NOFILE);
849 flags = READ_ONCE(sqe->ioprio);
850 if (flags & ~IORING_ACCEPT_MULTISHOT)
853 accept->file_slot = READ_ONCE(sqe->file_index);
854 if (accept->file_slot) {
855 if (accept->flags & SOCK_CLOEXEC)
857 if (flags & IORING_ACCEPT_MULTISHOT &&
858 accept->file_slot != IORING_FILE_INDEX_ALLOC)
861 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
863 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
864 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
865 if (flags & IORING_ACCEPT_MULTISHOT)
866 req->flags |= REQ_F_APOLL_MULTISHOT;
870 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
872 struct io_ring_ctx *ctx = req->ctx;
873 struct io_accept *accept = io_kiocb_to_cmd(req);
874 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
875 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
876 bool fixed = !!accept->file_slot;
882 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
883 if (unlikely(fd < 0))
886 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
892 if (ret == -EAGAIN && force_nonblock) {
894 * if it's multishot and polled, we don't need to
895 * return EAGAIN to arm the poll infra since it
896 * has already been done
898 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
899 IO_APOLL_MULTI_POLLED)
900 ret = IOU_ISSUE_SKIP_COMPLETE;
903 if (ret == -ERESTARTSYS)
907 fd_install(fd, file);
910 ret = io_fixed_fd_install(req, issue_flags, file,
914 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
915 io_req_set_res(req, ret, 0);
920 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
923 io_req_set_res(req, ret, 0);
924 if (req->flags & REQ_F_POLLED)
925 return IOU_STOP_MULTISHOT;
929 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
931 struct io_socket *sock = io_kiocb_to_cmd(req);
933 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
936 sock->domain = READ_ONCE(sqe->fd);
937 sock->type = READ_ONCE(sqe->off);
938 sock->protocol = READ_ONCE(sqe->len);
939 sock->file_slot = READ_ONCE(sqe->file_index);
940 sock->nofile = rlimit(RLIMIT_NOFILE);
942 sock->flags = sock->type & ~SOCK_TYPE_MASK;
943 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
945 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
950 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
952 struct io_socket *sock = io_kiocb_to_cmd(req);
953 bool fixed = !!sock->file_slot;
958 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
959 if (unlikely(fd < 0))
962 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
967 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
969 if (ret == -ERESTARTSYS)
973 fd_install(fd, file);
976 ret = io_fixed_fd_install(req, issue_flags, file,
979 io_req_set_res(req, ret, 0);
983 int io_connect_prep_async(struct io_kiocb *req)
985 struct io_async_connect *io = req->async_data;
986 struct io_connect *conn = io_kiocb_to_cmd(req);
988 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
991 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
993 struct io_connect *conn = io_kiocb_to_cmd(req);
995 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
998 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
999 conn->addr_len = READ_ONCE(sqe->addr2);
1003 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1005 struct io_connect *connect = io_kiocb_to_cmd(req);
1006 struct io_async_connect __io, *io;
1007 unsigned file_flags;
1009 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1011 if (req_has_async_data(req)) {
1012 io = req->async_data;
1014 ret = move_addr_to_kernel(connect->addr,
1022 file_flags = force_nonblock ? O_NONBLOCK : 0;
1024 ret = __sys_connect_file(req->file, &io->address,
1025 connect->addr_len, file_flags);
1026 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1027 if (req_has_async_data(req))
1029 if (io_alloc_async_data(req)) {
1033 memcpy(req->async_data, &__io, sizeof(__io));
1036 if (ret == -ERESTARTSYS)
1041 io_req_set_res(req, ret, 0);
1045 void io_netmsg_cache_free(struct io_cache_entry *entry)
1047 kfree(container_of(entry, struct io_async_msghdr, cache));