1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
19 #if defined(CONFIG_NET)
27 struct sockaddr __user *addr;
46 struct sockaddr __user *addr;
53 struct compat_msghdr __user *umsg_compat;
54 struct user_msghdr __user *umsg;
74 #define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
76 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
78 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
80 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
81 sqe->buf_index || sqe->splice_fd_in))
84 shutdown->how = READ_ONCE(sqe->len);
88 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
90 struct io_shutdown *shutdown = io_kiocb_to_cmd(req);
94 if (issue_flags & IO_URING_F_NONBLOCK)
97 sock = sock_from_file(req->file);
101 ret = __sys_shutdown_sock(sock, shutdown->how);
102 io_req_set_res(req, ret, 0);
106 static bool io_net_retry(struct socket *sock, int flags)
108 if (!(flags & MSG_WAITALL))
110 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
113 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
115 struct io_async_msghdr *hdr = req->async_data;
117 if (!hdr || issue_flags & IO_URING_F_UNLOCKED)
120 /* Let normal cleanup path reap it if we fail adding to the cache */
121 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
122 req->async_data = NULL;
123 req->flags &= ~REQ_F_ASYNC_DATA;
127 static struct io_async_msghdr *io_recvmsg_alloc_async(struct io_kiocb *req,
128 unsigned int issue_flags)
130 struct io_ring_ctx *ctx = req->ctx;
131 struct io_cache_entry *entry;
133 if (!(issue_flags & IO_URING_F_UNLOCKED) &&
134 (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) {
135 struct io_async_msghdr *hdr;
137 hdr = container_of(entry, struct io_async_msghdr, cache);
138 req->flags |= REQ_F_ASYNC_DATA;
139 req->async_data = hdr;
143 if (!io_alloc_async_data(req))
144 return req->async_data;
149 static int io_setup_async_msg(struct io_kiocb *req,
150 struct io_async_msghdr *kmsg,
151 unsigned int issue_flags)
153 struct io_async_msghdr *async_msg = req->async_data;
157 async_msg = io_recvmsg_alloc_async(req, issue_flags);
159 kfree(kmsg->free_iov);
162 req->flags |= REQ_F_NEED_CLEANUP;
163 memcpy(async_msg, kmsg, sizeof(*kmsg));
164 async_msg->msg.msg_name = &async_msg->addr;
165 /* if were using fast_iov, set it to the new one */
166 if (!async_msg->free_iov)
167 async_msg->msg.msg_iter.iov = async_msg->fast_iov;
172 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
173 struct io_async_msghdr *iomsg)
175 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
177 iomsg->msg.msg_name = &iomsg->addr;
178 iomsg->free_iov = iomsg->fast_iov;
179 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
183 int io_sendmsg_prep_async(struct io_kiocb *req)
187 ret = io_sendmsg_copy_hdr(req, req->async_data);
189 req->flags |= REQ_F_NEED_CLEANUP;
193 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
195 struct io_async_msghdr *io = req->async_data;
200 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
202 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
204 if (unlikely(sqe->file_index || sqe->addr2))
207 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
208 sr->len = READ_ONCE(sqe->len);
209 sr->flags = READ_ONCE(sqe->ioprio);
210 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
212 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
213 if (sr->msg_flags & MSG_DONTWAIT)
214 req->flags |= REQ_F_NOWAIT;
217 if (req->ctx->compat)
218 sr->msg_flags |= MSG_CMSG_COMPAT;
224 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
226 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
227 struct io_async_msghdr iomsg, *kmsg;
233 sock = sock_from_file(req->file);
237 if (req_has_async_data(req)) {
238 kmsg = req->async_data;
240 ret = io_sendmsg_copy_hdr(req, &iomsg);
246 if (!(req->flags & REQ_F_POLLED) &&
247 (sr->flags & IORING_RECVSEND_POLL_FIRST))
248 return io_setup_async_msg(req, kmsg, issue_flags);
250 flags = sr->msg_flags;
251 if (issue_flags & IO_URING_F_NONBLOCK)
252 flags |= MSG_DONTWAIT;
253 if (flags & MSG_WAITALL)
254 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
256 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
259 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
260 return io_setup_async_msg(req, kmsg, issue_flags);
261 if (ret == -ERESTARTSYS)
263 if (ret > 0 && io_net_retry(sock, flags)) {
265 req->flags |= REQ_F_PARTIAL_IO;
266 return io_setup_async_msg(req, kmsg, issue_flags);
270 /* fast path, check for non-NULL to avoid function call */
272 kfree(kmsg->free_iov);
273 req->flags &= ~REQ_F_NEED_CLEANUP;
274 io_netmsg_recycle(req, issue_flags);
277 else if (sr->done_io)
279 io_req_set_res(req, ret, 0);
283 int io_send(struct io_kiocb *req, unsigned int issue_flags)
285 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
293 if (!(req->flags & REQ_F_POLLED) &&
294 (sr->flags & IORING_RECVSEND_POLL_FIRST))
297 sock = sock_from_file(req->file);
301 ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
306 msg.msg_control = NULL;
307 msg.msg_controllen = 0;
311 flags = sr->msg_flags;
312 if (issue_flags & IO_URING_F_NONBLOCK)
313 flags |= MSG_DONTWAIT;
314 if (flags & MSG_WAITALL)
315 min_ret = iov_iter_count(&msg.msg_iter);
317 msg.msg_flags = flags;
318 ret = sock_sendmsg(sock, &msg);
320 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
322 if (ret == -ERESTARTSYS)
324 if (ret > 0 && io_net_retry(sock, flags)) {
328 req->flags |= REQ_F_PARTIAL_IO;
335 else if (sr->done_io)
337 io_req_set_res(req, ret, 0);
341 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
345 if (iomsg->namelen < 0)
347 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
348 iomsg->namelen, &hdr))
350 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
356 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
357 struct io_async_msghdr *iomsg)
359 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
360 struct user_msghdr msg;
363 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
366 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
370 if (req->flags & REQ_F_BUFFER_SELECT) {
371 if (msg.msg_iovlen == 0) {
372 sr->len = iomsg->fast_iov[0].iov_len = 0;
373 iomsg->fast_iov[0].iov_base = NULL;
374 iomsg->free_iov = NULL;
375 } else if (msg.msg_iovlen > 1) {
378 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
380 sr->len = iomsg->fast_iov[0].iov_len;
381 iomsg->free_iov = NULL;
384 if (req->flags & REQ_F_APOLL_MULTISHOT) {
385 iomsg->namelen = msg.msg_namelen;
386 iomsg->controllen = msg.msg_controllen;
387 if (io_recvmsg_multishot_overflow(iomsg))
391 iomsg->free_iov = iomsg->fast_iov;
392 ret = __import_iovec(READ, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
393 &iomsg->free_iov, &iomsg->msg.msg_iter,
403 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
404 struct io_async_msghdr *iomsg)
406 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
407 struct compat_msghdr msg;
408 struct compat_iovec __user *uiov;
411 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
414 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
418 uiov = compat_ptr(msg.msg_iov);
419 if (req->flags & REQ_F_BUFFER_SELECT) {
422 if (msg.msg_iovlen == 0) {
424 iomsg->free_iov = NULL;
425 } else if (msg.msg_iovlen > 1) {
428 if (!access_ok(uiov, sizeof(*uiov)))
430 if (__get_user(clen, &uiov->iov_len))
435 iomsg->free_iov = NULL;
438 if (req->flags & REQ_F_APOLL_MULTISHOT) {
439 iomsg->namelen = msg.msg_namelen;
440 iomsg->controllen = msg.msg_controllen;
441 if (io_recvmsg_multishot_overflow(iomsg))
445 iomsg->free_iov = iomsg->fast_iov;
446 ret = __import_iovec(READ, (struct iovec __user *)uiov, msg.msg_iovlen,
447 UIO_FASTIOV, &iomsg->free_iov,
448 &iomsg->msg.msg_iter, true);
457 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
458 struct io_async_msghdr *iomsg)
460 iomsg->msg.msg_name = &iomsg->addr;
463 if (req->ctx->compat)
464 return __io_compat_recvmsg_copy_hdr(req, iomsg);
467 return __io_recvmsg_copy_hdr(req, iomsg);
470 int io_recvmsg_prep_async(struct io_kiocb *req)
474 ret = io_recvmsg_copy_hdr(req, req->async_data);
476 req->flags |= REQ_F_NEED_CLEANUP;
480 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
482 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
484 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
486 if (unlikely(sqe->file_index || sqe->addr2))
489 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
490 sr->len = READ_ONCE(sqe->len);
491 sr->flags = READ_ONCE(sqe->ioprio);
492 if (sr->flags & ~(RECVMSG_FLAGS))
494 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
495 if (sr->msg_flags & MSG_DONTWAIT)
496 req->flags |= REQ_F_NOWAIT;
497 if (sr->msg_flags & MSG_ERRQUEUE)
498 req->flags |= REQ_F_CLEAR_POLLIN;
499 if (sr->flags & IORING_RECV_MULTISHOT) {
500 if (!(req->flags & REQ_F_BUFFER_SELECT))
502 if (sr->msg_flags & MSG_WAITALL)
504 if (req->opcode == IORING_OP_RECV && sr->len)
506 req->flags |= REQ_F_APOLL_MULTISHOT;
510 if (req->ctx->compat)
511 sr->msg_flags |= MSG_CMSG_COMPAT;
517 static inline void io_recv_prep_retry(struct io_kiocb *req)
519 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
522 sr->len = 0; /* get from the provided buffer */
526 * Finishes io_recv and io_recvmsg.
528 * Returns true if it is actually finished, or false if it should run
529 * again (for multishot).
531 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
532 unsigned int cflags, bool mshot_finished)
534 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
535 io_req_set_res(req, *ret, cflags);
540 if (!mshot_finished) {
541 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
542 cflags | IORING_CQE_F_MORE, false)) {
543 io_recv_prep_retry(req);
547 * Otherwise stop multishot but use the current result.
548 * Probably will end up going into overflow, but this means
549 * we cannot trust the ordering anymore
553 io_req_set_res(req, *ret, cflags);
555 if (req->flags & REQ_F_POLLED)
556 *ret = IOU_STOP_MULTISHOT;
562 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
563 struct io_sr_msg *sr, void __user **buf,
566 unsigned long ubuf = (unsigned long) *buf;
569 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
574 if (kmsg->controllen) {
575 unsigned long control = ubuf + hdr - kmsg->controllen;
577 kmsg->msg.msg_control_user = (void *) control;
578 kmsg->msg.msg_controllen = kmsg->controllen;
581 sr->buf = *buf; /* stash for later copy */
582 *buf = (void *) (ubuf + hdr);
583 kmsg->payloadlen = *len = *len - hdr;
587 struct io_recvmsg_multishot_hdr {
588 struct io_uring_recvmsg_out msg;
589 struct sockaddr_storage addr;
592 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
593 struct io_async_msghdr *kmsg,
594 unsigned int flags, bool *finished)
598 struct io_recvmsg_multishot_hdr hdr;
601 kmsg->msg.msg_name = &hdr.addr;
602 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
603 kmsg->msg.msg_namelen = 0;
605 if (sock->file->f_flags & O_NONBLOCK)
606 flags |= MSG_DONTWAIT;
608 err = sock_recvmsg(sock, &kmsg->msg, flags);
609 *finished = err <= 0;
613 hdr.msg = (struct io_uring_recvmsg_out) {
614 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
615 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
618 hdr.msg.payloadlen = err;
619 if (err > kmsg->payloadlen)
620 err = kmsg->payloadlen;
622 copy_len = sizeof(struct io_uring_recvmsg_out);
623 if (kmsg->msg.msg_namelen > kmsg->namelen)
624 copy_len += kmsg->namelen;
626 copy_len += kmsg->msg.msg_namelen;
629 * "fromlen shall refer to the value before truncation.."
632 hdr.msg.namelen = kmsg->msg.msg_namelen;
634 /* ensure that there is no gap between hdr and sockaddr_storage */
635 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
636 sizeof(struct io_uring_recvmsg_out));
637 if (copy_to_user(io->buf, &hdr, copy_len)) {
642 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
643 kmsg->controllen + err;
646 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
648 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
649 struct io_async_msghdr iomsg, *kmsg;
653 int ret, min_ret = 0;
654 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
655 bool mshot_finished = true;
657 sock = sock_from_file(req->file);
661 if (req_has_async_data(req)) {
662 kmsg = req->async_data;
664 ret = io_recvmsg_copy_hdr(req, &iomsg);
670 if (!(req->flags & REQ_F_POLLED) &&
671 (sr->flags & IORING_RECVSEND_POLL_FIRST))
672 return io_setup_async_msg(req, kmsg, issue_flags);
675 if (io_do_buffer_select(req)) {
677 size_t len = sr->len;
679 buf = io_buffer_select(req, &len, issue_flags);
683 if (req->flags & REQ_F_APOLL_MULTISHOT) {
684 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
686 io_kbuf_recycle(req, issue_flags);
691 kmsg->fast_iov[0].iov_base = buf;
692 kmsg->fast_iov[0].iov_len = len;
693 iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->fast_iov, 1,
697 flags = sr->msg_flags;
699 flags |= MSG_DONTWAIT;
700 if (flags & MSG_WAITALL)
701 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
703 kmsg->msg.msg_get_inq = 1;
704 if (req->flags & REQ_F_APOLL_MULTISHOT)
705 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
708 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
712 if (ret == -EAGAIN && force_nonblock) {
713 ret = io_setup_async_msg(req, kmsg, issue_flags);
714 if (ret == -EAGAIN && (req->flags & IO_APOLL_MULTI_POLLED) ==
715 IO_APOLL_MULTI_POLLED) {
716 io_kbuf_recycle(req, issue_flags);
717 return IOU_ISSUE_SKIP_COMPLETE;
721 if (ret == -ERESTARTSYS)
723 if (ret > 0 && io_net_retry(sock, flags)) {
725 req->flags |= REQ_F_PARTIAL_IO;
726 return io_setup_async_msg(req, kmsg, issue_flags);
729 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
735 else if (sr->done_io)
738 io_kbuf_recycle(req, issue_flags);
740 cflags = io_put_kbuf(req, issue_flags);
741 if (kmsg->msg.msg_inq)
742 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
744 if (!io_recv_finish(req, &ret, cflags, mshot_finished))
745 goto retry_multishot;
747 if (mshot_finished) {
748 io_netmsg_recycle(req, issue_flags);
749 /* fast path, check for non-NULL to avoid function call */
751 kfree(kmsg->free_iov);
752 req->flags &= ~REQ_F_NEED_CLEANUP;
758 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
760 struct io_sr_msg *sr = io_kiocb_to_cmd(req);
766 int ret, min_ret = 0;
767 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
768 size_t len = sr->len;
770 if (!(req->flags & REQ_F_POLLED) &&
771 (sr->flags & IORING_RECVSEND_POLL_FIRST))
774 sock = sock_from_file(req->file);
779 if (io_do_buffer_select(req)) {
782 buf = io_buffer_select(req, &len, issue_flags);
788 ret = import_single_range(READ, sr->buf, len, &iov, &msg.msg_iter);
794 msg.msg_control = NULL;
797 msg.msg_controllen = 0;
801 flags = sr->msg_flags;
803 flags |= MSG_DONTWAIT;
804 if (flags & MSG_WAITALL)
805 min_ret = iov_iter_count(&msg.msg_iter);
807 ret = sock_recvmsg(sock, &msg, flags);
809 if (ret == -EAGAIN && force_nonblock) {
810 if ((req->flags & IO_APOLL_MULTI_POLLED) == IO_APOLL_MULTI_POLLED) {
811 io_kbuf_recycle(req, issue_flags);
812 return IOU_ISSUE_SKIP_COMPLETE;
817 if (ret == -ERESTARTSYS)
819 if (ret > 0 && io_net_retry(sock, flags)) {
823 req->flags |= REQ_F_PARTIAL_IO;
827 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
834 else if (sr->done_io)
837 io_kbuf_recycle(req, issue_flags);
839 cflags = io_put_kbuf(req, issue_flags);
841 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
843 if (!io_recv_finish(req, &ret, cflags, ret <= 0))
844 goto retry_multishot;
849 int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
851 struct io_sendzc *zc = io_kiocb_to_cmd(req);
853 if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))
856 zc->flags = READ_ONCE(sqe->ioprio);
857 if (zc->flags & ~IORING_RECVSEND_POLL_FIRST)
860 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
861 zc->len = READ_ONCE(sqe->len);
862 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
863 zc->slot_idx = READ_ONCE(sqe->notification_idx);
864 if (zc->msg_flags & MSG_DONTWAIT)
865 req->flags |= REQ_F_NOWAIT;
867 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
868 zc->addr_len = READ_ONCE(sqe->addr_len);
871 if (req->ctx->compat)
872 zc->msg_flags |= MSG_CMSG_COMPAT;
877 int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
879 struct sockaddr_storage address;
880 struct io_ring_ctx *ctx = req->ctx;
881 struct io_sendzc *zc = io_kiocb_to_cmd(req);
882 struct io_notif_slot *notif_slot;
883 struct io_notif *notif;
888 int ret, min_ret = 0;
890 if (!(req->flags & REQ_F_POLLED) &&
891 (zc->flags & IORING_RECVSEND_POLL_FIRST))
894 if (issue_flags & IO_URING_F_UNLOCKED)
896 sock = sock_from_file(req->file);
900 notif_slot = io_get_notif_slot(ctx, zc->slot_idx);
903 notif = io_get_notif(ctx, notif_slot);
908 msg.msg_control = NULL;
909 msg.msg_controllen = 0;
912 ret = import_single_range(WRITE, zc->buf, zc->len, &iov, &msg.msg_iter);
915 mm_account_pinned_pages(¬if->uarg.mmp, zc->len);
918 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &address);
919 if (unlikely(ret < 0))
921 msg.msg_name = (struct sockaddr *)&address;
922 msg.msg_namelen = zc->addr_len;
925 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
926 if (issue_flags & IO_URING_F_NONBLOCK)
927 msg_flags |= MSG_DONTWAIT;
928 if (msg_flags & MSG_WAITALL)
929 min_ret = iov_iter_count(&msg.msg_iter);
931 msg.msg_flags = msg_flags;
932 msg.msg_ubuf = ¬if->uarg;
933 msg.sg_from_iter = NULL;
934 ret = sock_sendmsg(sock, &msg);
936 if (unlikely(ret < min_ret)) {
937 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
939 return ret == -ERESTARTSYS ? -EINTR : ret;
942 io_req_set_res(req, ret, 0);
946 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
948 struct io_accept *accept = io_kiocb_to_cmd(req);
951 if (sqe->len || sqe->buf_index)
954 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
955 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
956 accept->flags = READ_ONCE(sqe->accept_flags);
957 accept->nofile = rlimit(RLIMIT_NOFILE);
958 flags = READ_ONCE(sqe->ioprio);
959 if (flags & ~IORING_ACCEPT_MULTISHOT)
962 accept->file_slot = READ_ONCE(sqe->file_index);
963 if (accept->file_slot) {
964 if (accept->flags & SOCK_CLOEXEC)
966 if (flags & IORING_ACCEPT_MULTISHOT &&
967 accept->file_slot != IORING_FILE_INDEX_ALLOC)
970 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
972 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
973 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
974 if (flags & IORING_ACCEPT_MULTISHOT)
975 req->flags |= REQ_F_APOLL_MULTISHOT;
979 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
981 struct io_ring_ctx *ctx = req->ctx;
982 struct io_accept *accept = io_kiocb_to_cmd(req);
983 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
984 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
985 bool fixed = !!accept->file_slot;
991 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
992 if (unlikely(fd < 0))
995 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1000 ret = PTR_ERR(file);
1001 if (ret == -EAGAIN && force_nonblock) {
1003 * if it's multishot and polled, we don't need to
1004 * return EAGAIN to arm the poll infra since it
1005 * has already been done
1007 if ((req->flags & IO_APOLL_MULTI_POLLED) ==
1008 IO_APOLL_MULTI_POLLED)
1009 ret = IOU_ISSUE_SKIP_COMPLETE;
1012 if (ret == -ERESTARTSYS)
1015 } else if (!fixed) {
1016 fd_install(fd, file);
1019 ret = io_fixed_fd_install(req, issue_flags, file,
1023 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1024 io_req_set_res(req, ret, 0);
1029 io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1032 io_req_set_res(req, ret, 0);
1033 if (req->flags & REQ_F_POLLED)
1034 return IOU_STOP_MULTISHOT;
1038 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1040 struct io_socket *sock = io_kiocb_to_cmd(req);
1042 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1045 sock->domain = READ_ONCE(sqe->fd);
1046 sock->type = READ_ONCE(sqe->off);
1047 sock->protocol = READ_ONCE(sqe->len);
1048 sock->file_slot = READ_ONCE(sqe->file_index);
1049 sock->nofile = rlimit(RLIMIT_NOFILE);
1051 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1052 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1054 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1059 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1061 struct io_socket *sock = io_kiocb_to_cmd(req);
1062 bool fixed = !!sock->file_slot;
1067 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1068 if (unlikely(fd < 0))
1071 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1075 ret = PTR_ERR(file);
1076 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1078 if (ret == -ERESTARTSYS)
1081 } else if (!fixed) {
1082 fd_install(fd, file);
1085 ret = io_fixed_fd_install(req, issue_flags, file,
1088 io_req_set_res(req, ret, 0);
1092 int io_connect_prep_async(struct io_kiocb *req)
1094 struct io_async_connect *io = req->async_data;
1095 struct io_connect *conn = io_kiocb_to_cmd(req);
1097 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1100 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1102 struct io_connect *conn = io_kiocb_to_cmd(req);
1104 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1107 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1108 conn->addr_len = READ_ONCE(sqe->addr2);
1112 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1114 struct io_connect *connect = io_kiocb_to_cmd(req);
1115 struct io_async_connect __io, *io;
1116 unsigned file_flags;
1118 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1120 if (req_has_async_data(req)) {
1121 io = req->async_data;
1123 ret = move_addr_to_kernel(connect->addr,
1131 file_flags = force_nonblock ? O_NONBLOCK : 0;
1133 ret = __sys_connect_file(req->file, &io->address,
1134 connect->addr_len, file_flags);
1135 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1136 if (req_has_async_data(req))
1138 if (io_alloc_async_data(req)) {
1142 memcpy(req->async_data, &__io, sizeof(__io));
1145 if (ret == -ERESTARTSYS)
1150 io_req_set_res(req, ret, 0);
1154 void io_netmsg_cache_free(struct io_cache_entry *entry)
1156 kfree(container_of(entry, struct io_async_msghdr, cache));