1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user *addr;
47 struct sockaddr __user *addr;
55 struct compat_msghdr __user *umsg_compat;
56 struct user_msghdr __user *umsg;
63 /* initialised and used only by !msg send variants */
66 /* used only for send zerocopy */
67 struct io_kiocb *notif;
70 static inline bool io_check_multishot(struct io_kiocb *req,
71 unsigned int issue_flags)
74 * When ->locked_cq is set we only allow to post CQEs from the original
75 * task context. Usual request completions will be handled in other
76 * generic paths but multipoll may decide to post extra cqes.
78 return !(issue_flags & IO_URING_F_IOWQ) ||
79 !(issue_flags & IO_URING_F_MULTISHOT) ||
80 !req->ctx->task_complete;
83 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
85 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
87 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
88 sqe->buf_index || sqe->splice_fd_in))
91 shutdown->how = READ_ONCE(sqe->len);
95 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
97 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
101 if (issue_flags & IO_URING_F_NONBLOCK)
104 sock = sock_from_file(req->file);
108 ret = __sys_shutdown_sock(sock, shutdown->how);
109 io_req_set_res(req, ret, 0);
113 static bool io_net_retry(struct socket *sock, int flags)
115 if (!(flags & MSG_WAITALL))
117 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
120 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
122 struct io_async_msghdr *hdr = req->async_data;
124 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
127 /* Let normal cleanup path reap it if we fail adding to the cache */
128 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
129 req->async_data = NULL;
130 req->flags &= ~REQ_F_ASYNC_DATA;
134 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
135 unsigned int issue_flags)
137 struct io_ring_ctx *ctx = req->ctx;
138 struct io_cache_entry *entry;
139 struct io_async_msghdr *hdr;
141 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
142 entry = io_alloc_cache_get(&ctx->netmsg_cache);
144 hdr = container_of(entry, struct io_async_msghdr, cache);
145 hdr->free_iov = NULL;
146 req->flags |= REQ_F_ASYNC_DATA;
147 req->async_data = hdr;
152 if (!io_alloc_async_data(req)) {
153 hdr = req->async_data;
154 hdr->free_iov = NULL;
160 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
162 /* ->prep_async is always called from the submission context */
163 return io_msg_alloc_async(req, 0);
166 static int io_setup_async_msg(struct io_kiocb *req,
167 struct io_async_msghdr *kmsg,
168 unsigned int issue_flags)
170 struct io_async_msghdr *async_msg;
172 if (req_has_async_data(req))
174 async_msg = io_msg_alloc_async(req, issue_flags);
176 kfree(kmsg->free_iov);
179 req->flags |= REQ_F_NEED_CLEANUP;
180 memcpy(async_msg, kmsg, sizeof(*kmsg));
181 if (async_msg->msg.msg_name)
182 async_msg->msg.msg_name = &async_msg->addr;
183 /* if were using fast_iov, set it to the new one */
184 if (!kmsg->free_iov) {
185 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
186 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
192 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
193 struct io_async_msghdr *iomsg)
195 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
197 iomsg->msg.msg_name = &iomsg->addr;
198 iomsg->free_iov = iomsg->fast_iov;
199 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
203 int io_send_prep_async(struct io_kiocb *req)
205 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
206 struct io_async_msghdr *io;
209 if (!zc->addr || req_has_async_data(req))
211 io = io_msg_alloc_async_prep(req);
214 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
218 static int io_setup_async_addr(struct io_kiocb *req,
219 struct sockaddr_storage *addr_storage,
220 unsigned int issue_flags)
222 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
223 struct io_async_msghdr *io;
225 if (!sr->addr || req_has_async_data(req))
227 io = io_msg_alloc_async(req, issue_flags);
230 memcpy(&io->addr, addr_storage, sizeof(io->addr));
234 int io_sendmsg_prep_async(struct io_kiocb *req)
238 if (!io_msg_alloc_async_prep(req))
240 ret = io_sendmsg_copy_hdr(req, req->async_data);
242 req->flags |= REQ_F_NEED_CLEANUP;
246 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
248 struct io_async_msghdr *io = req->async_data;
253 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
255 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
257 if (req->opcode == IORING_OP_SEND) {
258 if (READ_ONCE(sqe->__pad3[0]))
260 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
261 sr->addr_len = READ_ONCE(sqe->addr_len);
262 } else if (sqe->addr2 || sqe->file_index) {
266 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
267 sr->len = READ_ONCE(sqe->len);
268 sr->flags = READ_ONCE(sqe->ioprio);
269 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
271 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
272 if (sr->msg_flags & MSG_DONTWAIT)
273 req->flags |= REQ_F_NOWAIT;
276 if (req->ctx->compat)
277 sr->msg_flags |= MSG_CMSG_COMPAT;
283 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
285 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
286 struct io_async_msghdr iomsg, *kmsg;
292 sock = sock_from_file(req->file);
296 if (req_has_async_data(req)) {
297 kmsg = req->async_data;
299 ret = io_sendmsg_copy_hdr(req, &iomsg);
305 if (!(req->flags & REQ_F_POLLED) &&
306 (sr->flags & IORING_RECVSEND_POLL_FIRST))
307 return io_setup_async_msg(req, kmsg, issue_flags);
309 flags = sr->msg_flags;
310 if (issue_flags & IO_URING_F_NONBLOCK)
311 flags |= MSG_DONTWAIT;
312 if (flags & MSG_WAITALL)
313 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
315 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
318 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
319 return io_setup_async_msg(req, kmsg, issue_flags);
320 if (ret > 0 && io_net_retry(sock, flags)) {
322 req->flags |= REQ_F_PARTIAL_IO;
323 return io_setup_async_msg(req, kmsg, issue_flags);
325 if (ret == -ERESTARTSYS)
329 /* fast path, check for non-NULL to avoid function call */
331 kfree(kmsg->free_iov);
332 req->flags &= ~REQ_F_NEED_CLEANUP;
333 io_netmsg_recycle(req, issue_flags);
336 else if (sr->done_io)
338 io_req_set_res(req, ret, 0);
342 int io_send(struct io_kiocb *req, unsigned int issue_flags)
344 struct sockaddr_storage __address;
345 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
354 msg.msg_control = NULL;
355 msg.msg_controllen = 0;
360 if (req_has_async_data(req)) {
361 struct io_async_msghdr *io = req->async_data;
363 msg.msg_name = &io->addr;
365 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
366 if (unlikely(ret < 0))
368 msg.msg_name = (struct sockaddr *)&__address;
370 msg.msg_namelen = sr->addr_len;
373 if (!(req->flags & REQ_F_POLLED) &&
374 (sr->flags & IORING_RECVSEND_POLL_FIRST))
375 return io_setup_async_addr(req, &__address, issue_flags);
377 sock = sock_from_file(req->file);
381 ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
385 flags = sr->msg_flags;
386 if (issue_flags & IO_URING_F_NONBLOCK)
387 flags |= MSG_DONTWAIT;
388 if (flags & MSG_WAITALL)
389 min_ret = iov_iter_count(&msg.msg_iter);
391 msg.msg_flags = flags;
392 ret = sock_sendmsg(sock, &msg);
394 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
395 return io_setup_async_addr(req, &__address, issue_flags);
397 if (ret > 0 && io_net_retry(sock, flags)) {
401 req->flags |= REQ_F_PARTIAL_IO;
402 return io_setup_async_addr(req, &__address, issue_flags);
404 if (ret == -ERESTARTSYS)
410 else if (sr->done_io)
412 io_req_set_res(req, ret, 0);
416 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
420 if (iomsg->namelen < 0)
422 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
423 iomsg->namelen, &hdr))
425 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
431 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
432 struct io_async_msghdr *iomsg)
434 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
435 struct user_msghdr msg;
438 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
441 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
445 if (req->flags & REQ_F_BUFFER_SELECT) {
446 if (msg.msg_iovlen == 0) {
447 sr->len = iomsg->fast_iov[0].iov_len = 0;
448 iomsg->fast_iov[0].iov_base = NULL;
449 iomsg->free_iov = NULL;
450 } else if (msg.msg_iovlen > 1) {
453 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
455 sr->len = iomsg->fast_iov[0].iov_len;
456 iomsg->free_iov = NULL;
459 if (req->flags & REQ_F_APOLL_MULTISHOT) {
460 iomsg->namelen = msg.msg_namelen;
461 iomsg->controllen = msg.msg_controllen;
462 if (io_recvmsg_multishot_overflow(iomsg))
466 iomsg->free_iov = iomsg->fast_iov;
467 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
468 &iomsg->free_iov, &iomsg->msg.msg_iter,
478 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
479 struct io_async_msghdr *iomsg)
481 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
482 struct compat_msghdr msg;
483 struct compat_iovec __user *uiov;
486 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
489 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
493 uiov = compat_ptr(msg.msg_iov);
494 if (req->flags & REQ_F_BUFFER_SELECT) {
497 iomsg->free_iov = NULL;
498 if (msg.msg_iovlen == 0) {
500 } else if (msg.msg_iovlen > 1) {
503 if (!access_ok(uiov, sizeof(*uiov)))
505 if (__get_user(clen, &uiov->iov_len))
512 if (req->flags & REQ_F_APOLL_MULTISHOT) {
513 iomsg->namelen = msg.msg_namelen;
514 iomsg->controllen = msg.msg_controllen;
515 if (io_recvmsg_multishot_overflow(iomsg))
519 iomsg->free_iov = iomsg->fast_iov;
520 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
521 UIO_FASTIOV, &iomsg->free_iov,
522 &iomsg->msg.msg_iter, true);
531 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
532 struct io_async_msghdr *iomsg)
534 iomsg->msg.msg_name = &iomsg->addr;
537 if (req->ctx->compat)
538 return __io_compat_recvmsg_copy_hdr(req, iomsg);
541 return __io_recvmsg_copy_hdr(req, iomsg);
544 int io_recvmsg_prep_async(struct io_kiocb *req)
548 if (!io_msg_alloc_async_prep(req))
550 ret = io_recvmsg_copy_hdr(req, req->async_data);
552 req->flags |= REQ_F_NEED_CLEANUP;
556 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
558 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
560 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
562 if (unlikely(sqe->file_index || sqe->addr2))
565 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
566 sr->len = READ_ONCE(sqe->len);
567 sr->flags = READ_ONCE(sqe->ioprio);
568 if (sr->flags & ~(RECVMSG_FLAGS))
570 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
571 if (sr->msg_flags & MSG_DONTWAIT)
572 req->flags |= REQ_F_NOWAIT;
573 if (sr->msg_flags & MSG_ERRQUEUE)
574 req->flags |= REQ_F_CLEAR_POLLIN;
575 if (sr->flags & IORING_RECV_MULTISHOT) {
576 if (!(req->flags & REQ_F_BUFFER_SELECT))
578 if (sr->msg_flags & MSG_WAITALL)
580 if (req->opcode == IORING_OP_RECV && sr->len)
582 req->flags |= REQ_F_APOLL_MULTISHOT;
586 if (req->ctx->compat)
587 sr->msg_flags |= MSG_CMSG_COMPAT;
593 static inline void io_recv_prep_retry(struct io_kiocb *req)
595 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
598 sr->len = 0; /* get from the provided buffer */
602 * Finishes io_recv and io_recvmsg.
604 * Returns true if it is actually finished, or false if it should run
605 * again (for multishot).
607 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
608 unsigned int cflags, bool mshot_finished,
609 unsigned issue_flags)
611 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
612 io_req_set_res(req, *ret, cflags);
617 if (!mshot_finished) {
618 if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
619 req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
620 io_recv_prep_retry(req);
623 /* Otherwise stop multishot but use the current result. */
626 io_req_set_res(req, *ret, cflags);
628 if (issue_flags & IO_URING_F_MULTISHOT)
629 *ret = IOU_STOP_MULTISHOT;
635 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
636 struct io_sr_msg *sr, void __user **buf,
639 unsigned long ubuf = (unsigned long) *buf;
642 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
647 if (kmsg->controllen) {
648 unsigned long control = ubuf + hdr - kmsg->controllen;
650 kmsg->msg.msg_control_user = (void __user *) control;
651 kmsg->msg.msg_controllen = kmsg->controllen;
654 sr->buf = *buf; /* stash for later copy */
655 *buf = (void __user *) (ubuf + hdr);
656 kmsg->payloadlen = *len = *len - hdr;
660 struct io_recvmsg_multishot_hdr {
661 struct io_uring_recvmsg_out msg;
662 struct sockaddr_storage addr;
665 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
666 struct io_async_msghdr *kmsg,
667 unsigned int flags, bool *finished)
671 struct io_recvmsg_multishot_hdr hdr;
674 kmsg->msg.msg_name = &hdr.addr;
675 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
676 kmsg->msg.msg_namelen = 0;
678 if (sock->file->f_flags & O_NONBLOCK)
679 flags |= MSG_DONTWAIT;
681 err = sock_recvmsg(sock, &kmsg->msg, flags);
682 *finished = err <= 0;
686 hdr.msg = (struct io_uring_recvmsg_out) {
687 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
688 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
691 hdr.msg.payloadlen = err;
692 if (err > kmsg->payloadlen)
693 err = kmsg->payloadlen;
695 copy_len = sizeof(struct io_uring_recvmsg_out);
696 if (kmsg->msg.msg_namelen > kmsg->namelen)
697 copy_len += kmsg->namelen;
699 copy_len += kmsg->msg.msg_namelen;
702 * "fromlen shall refer to the value before truncation.."
705 hdr.msg.namelen = kmsg->msg.msg_namelen;
707 /* ensure that there is no gap between hdr and sockaddr_storage */
708 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
709 sizeof(struct io_uring_recvmsg_out));
710 if (copy_to_user(io->buf, &hdr, copy_len)) {
715 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
716 kmsg->controllen + err;
719 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
721 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
722 struct io_async_msghdr iomsg, *kmsg;
726 int ret, min_ret = 0;
727 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
728 bool mshot_finished = true;
730 sock = sock_from_file(req->file);
734 if (req_has_async_data(req)) {
735 kmsg = req->async_data;
737 ret = io_recvmsg_copy_hdr(req, &iomsg);
743 if (!(req->flags & REQ_F_POLLED) &&
744 (sr->flags & IORING_RECVSEND_POLL_FIRST))
745 return io_setup_async_msg(req, kmsg, issue_flags);
747 if (!io_check_multishot(req, issue_flags))
748 return io_setup_async_msg(req, kmsg, issue_flags);
751 if (io_do_buffer_select(req)) {
753 size_t len = sr->len;
755 buf = io_buffer_select(req, &len, issue_flags);
759 if (req->flags & REQ_F_APOLL_MULTISHOT) {
760 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
762 io_kbuf_recycle(req, issue_flags);
767 kmsg->fast_iov[0].iov_base = buf;
768 kmsg->fast_iov[0].iov_len = len;
769 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
773 flags = sr->msg_flags;
775 flags |= MSG_DONTWAIT;
776 if (flags & MSG_WAITALL)
777 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
779 kmsg->msg.msg_get_inq = 1;
780 if (req->flags & REQ_F_APOLL_MULTISHOT)
781 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
784 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
788 if (ret == -EAGAIN && force_nonblock) {
789 ret = io_setup_async_msg(req, kmsg, issue_flags);
790 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
791 io_kbuf_recycle(req, issue_flags);
792 return IOU_ISSUE_SKIP_COMPLETE;
796 if (ret > 0 && io_net_retry(sock, flags)) {
798 req->flags |= REQ_F_PARTIAL_IO;
799 return io_setup_async_msg(req, kmsg, issue_flags);
801 if (ret == -ERESTARTSYS)
804 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
810 else if (sr->done_io)
813 io_kbuf_recycle(req, issue_flags);
815 cflags = io_put_kbuf(req, issue_flags);
816 if (kmsg->msg.msg_inq)
817 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
819 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
820 goto retry_multishot;
822 if (mshot_finished) {
823 /* fast path, check for non-NULL to avoid function call */
825 kfree(kmsg->free_iov);
826 io_netmsg_recycle(req, issue_flags);
827 req->flags &= ~REQ_F_NEED_CLEANUP;
833 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
835 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
841 int ret, min_ret = 0;
842 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
843 size_t len = sr->len;
845 if (!(req->flags & REQ_F_POLLED) &&
846 (sr->flags & IORING_RECVSEND_POLL_FIRST))
849 if (!io_check_multishot(req, issue_flags))
852 sock = sock_from_file(req->file);
857 if (io_do_buffer_select(req)) {
860 buf = io_buffer_select(req, &len, issue_flags);
866 ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
872 msg.msg_control = NULL;
875 msg.msg_controllen = 0;
879 flags = sr->msg_flags;
881 flags |= MSG_DONTWAIT;
882 if (flags & MSG_WAITALL)
883 min_ret = iov_iter_count(&msg.msg_iter);
885 ret = sock_recvmsg(sock, &msg, flags);
887 if (ret == -EAGAIN && force_nonblock) {
888 if (issue_flags & IO_URING_F_MULTISHOT) {
889 io_kbuf_recycle(req, issue_flags);
890 return IOU_ISSUE_SKIP_COMPLETE;
895 if (ret > 0 && io_net_retry(sock, flags)) {
899 req->flags |= REQ_F_PARTIAL_IO;
902 if (ret == -ERESTARTSYS)
905 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
912 else if (sr->done_io)
915 io_kbuf_recycle(req, issue_flags);
917 cflags = io_put_kbuf(req, issue_flags);
919 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
921 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
922 goto retry_multishot;
927 void io_send_zc_cleanup(struct io_kiocb *req)
929 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
930 struct io_async_msghdr *io;
932 if (req_has_async_data(req)) {
933 io = req->async_data;
934 /* might be ->fast_iov if *msg_copy_hdr failed */
935 if (io->free_iov != io->fast_iov)
939 io_notif_flush(zc->notif);
944 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
945 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
947 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
949 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
950 struct io_ring_ctx *ctx = req->ctx;
951 struct io_kiocb *notif;
953 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
955 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
956 if (req->flags & REQ_F_CQE_SKIP)
959 notif = zc->notif = io_alloc_notif(ctx);
962 notif->cqe.user_data = req->cqe.user_data;
964 notif->cqe.flags = IORING_CQE_F_NOTIF;
965 req->flags |= REQ_F_NEED_CLEANUP;
967 zc->flags = READ_ONCE(sqe->ioprio);
968 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
969 if (zc->flags & ~IO_ZC_FLAGS_VALID)
971 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
972 io_notif_set_extended(notif);
973 io_notif_to_data(notif)->zc_report = true;
977 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
978 unsigned idx = READ_ONCE(sqe->buf_index);
980 if (unlikely(idx >= ctx->nr_user_bufs))
982 idx = array_index_nospec(idx, ctx->nr_user_bufs);
983 req->imu = READ_ONCE(ctx->user_bufs[idx]);
984 io_req_set_rsrc_node(notif, ctx, 0);
987 if (req->opcode == IORING_OP_SEND_ZC) {
988 if (READ_ONCE(sqe->__pad3[0]))
990 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
991 zc->addr_len = READ_ONCE(sqe->addr_len);
993 if (unlikely(sqe->addr2 || sqe->file_index))
995 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
999 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1000 zc->len = READ_ONCE(sqe->len);
1001 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1002 if (zc->msg_flags & MSG_DONTWAIT)
1003 req->flags |= REQ_F_NOWAIT;
1007 #ifdef CONFIG_COMPAT
1008 if (req->ctx->compat)
1009 zc->msg_flags |= MSG_CMSG_COMPAT;
1014 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1015 struct iov_iter *from, size_t length)
1017 skb_zcopy_downgrade_managed(skb);
1018 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1021 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1022 struct iov_iter *from, size_t length)
1024 struct skb_shared_info *shinfo = skb_shinfo(skb);
1025 int frag = shinfo->nr_frags;
1027 struct bvec_iter bi;
1029 unsigned long truesize = 0;
1032 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1033 else if (unlikely(!skb_zcopy_managed(skb)))
1034 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1036 bi.bi_size = min(from->count, length);
1037 bi.bi_bvec_done = from->iov_offset;
1040 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1041 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1044 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1045 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1046 v.bv_offset, v.bv_len);
1047 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1052 shinfo->nr_frags = frag;
1053 from->bvec += bi.bi_idx;
1054 from->nr_segs -= bi.bi_idx;
1055 from->count -= copied;
1056 from->iov_offset = bi.bi_bvec_done;
1058 skb->data_len += copied;
1060 skb->truesize += truesize;
1062 if (sk && sk->sk_type == SOCK_STREAM) {
1063 sk_wmem_queued_add(sk, truesize);
1064 if (!skb_zcopy_pure(skb))
1065 sk_mem_charge(sk, truesize);
1067 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1072 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1074 struct sockaddr_storage __address;
1075 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1078 struct socket *sock;
1080 int ret, min_ret = 0;
1082 sock = sock_from_file(req->file);
1083 if (unlikely(!sock))
1085 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1088 msg.msg_name = NULL;
1089 msg.msg_control = NULL;
1090 msg.msg_controllen = 0;
1091 msg.msg_namelen = 0;
1094 if (req_has_async_data(req)) {
1095 struct io_async_msghdr *io = req->async_data;
1097 msg.msg_name = &io->addr;
1099 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1100 if (unlikely(ret < 0))
1102 msg.msg_name = (struct sockaddr *)&__address;
1104 msg.msg_namelen = zc->addr_len;
1107 if (!(req->flags & REQ_F_POLLED) &&
1108 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1109 return io_setup_async_addr(req, &__address, issue_flags);
1111 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1112 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1113 (u64)(uintptr_t)zc->buf, zc->len);
1116 msg.sg_from_iter = io_sg_from_iter;
1118 io_notif_set_extended(zc->notif);
1119 ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
1123 ret = io_notif_account_mem(zc->notif, zc->len);
1126 msg.sg_from_iter = io_sg_from_iter_iovec;
1129 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1130 if (issue_flags & IO_URING_F_NONBLOCK)
1131 msg_flags |= MSG_DONTWAIT;
1132 if (msg_flags & MSG_WAITALL)
1133 min_ret = iov_iter_count(&msg.msg_iter);
1135 msg.msg_flags = msg_flags;
1136 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1137 ret = sock_sendmsg(sock, &msg);
1139 if (unlikely(ret < min_ret)) {
1140 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1141 return io_setup_async_addr(req, &__address, issue_flags);
1143 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1147 req->flags |= REQ_F_PARTIAL_IO;
1148 return io_setup_async_addr(req, &__address, issue_flags);
1150 if (ret == -ERESTARTSYS)
1157 else if (zc->done_io)
1161 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1162 * flushing notif to io_send_zc_cleanup()
1164 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1165 io_notif_flush(zc->notif);
1166 req->flags &= ~REQ_F_NEED_CLEANUP;
1168 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1172 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1174 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1175 struct io_async_msghdr iomsg, *kmsg;
1176 struct socket *sock;
1178 int ret, min_ret = 0;
1180 io_notif_set_extended(sr->notif);
1182 sock = sock_from_file(req->file);
1183 if (unlikely(!sock))
1185 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1188 if (req_has_async_data(req)) {
1189 kmsg = req->async_data;
1191 ret = io_sendmsg_copy_hdr(req, &iomsg);
1197 if (!(req->flags & REQ_F_POLLED) &&
1198 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1199 return io_setup_async_msg(req, kmsg, issue_flags);
1201 flags = sr->msg_flags | MSG_ZEROCOPY;
1202 if (issue_flags & IO_URING_F_NONBLOCK)
1203 flags |= MSG_DONTWAIT;
1204 if (flags & MSG_WAITALL)
1205 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1207 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1208 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1209 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1211 if (unlikely(ret < min_ret)) {
1212 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1213 return io_setup_async_msg(req, kmsg, issue_flags);
1215 if (ret > 0 && io_net_retry(sock, flags)) {
1217 req->flags |= REQ_F_PARTIAL_IO;
1218 return io_setup_async_msg(req, kmsg, issue_flags);
1220 if (ret == -ERESTARTSYS)
1224 /* fast path, check for non-NULL to avoid function call */
1225 if (kmsg->free_iov) {
1226 kfree(kmsg->free_iov);
1227 kmsg->free_iov = NULL;
1230 io_netmsg_recycle(req, issue_flags);
1233 else if (sr->done_io)
1237 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1238 * flushing notif to io_send_zc_cleanup()
1240 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1241 io_notif_flush(sr->notif);
1242 req->flags &= ~REQ_F_NEED_CLEANUP;
1244 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1248 void io_sendrecv_fail(struct io_kiocb *req)
1250 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1252 if (req->flags & REQ_F_PARTIAL_IO)
1253 req->cqe.res = sr->done_io;
1255 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1256 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1257 req->cqe.flags |= IORING_CQE_F_MORE;
1260 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1262 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1265 if (sqe->len || sqe->buf_index)
1268 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1269 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1270 accept->flags = READ_ONCE(sqe->accept_flags);
1271 accept->nofile = rlimit(RLIMIT_NOFILE);
1272 flags = READ_ONCE(sqe->ioprio);
1273 if (flags & ~IORING_ACCEPT_MULTISHOT)
1276 accept->file_slot = READ_ONCE(sqe->file_index);
1277 if (accept->file_slot) {
1278 if (accept->flags & SOCK_CLOEXEC)
1280 if (flags & IORING_ACCEPT_MULTISHOT &&
1281 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1284 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1286 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1287 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1288 if (flags & IORING_ACCEPT_MULTISHOT)
1289 req->flags |= REQ_F_APOLL_MULTISHOT;
1293 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1295 struct io_ring_ctx *ctx = req->ctx;
1296 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1297 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1298 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1299 bool fixed = !!accept->file_slot;
1303 if (!io_check_multishot(req, issue_flags))
1307 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1308 if (unlikely(fd < 0))
1311 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1316 ret = PTR_ERR(file);
1317 if (ret == -EAGAIN && force_nonblock) {
1319 * if it's multishot and polled, we don't need to
1320 * return EAGAIN to arm the poll infra since it
1321 * has already been done
1323 if (issue_flags & IO_URING_F_MULTISHOT)
1324 ret = IOU_ISSUE_SKIP_COMPLETE;
1327 if (ret == -ERESTARTSYS)
1330 } else if (!fixed) {
1331 fd_install(fd, file);
1334 ret = io_fixed_fd_install(req, issue_flags, file,
1338 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1339 io_req_set_res(req, ret, 0);
1345 if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1346 req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
1352 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1354 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1356 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1359 sock->domain = READ_ONCE(sqe->fd);
1360 sock->type = READ_ONCE(sqe->off);
1361 sock->protocol = READ_ONCE(sqe->len);
1362 sock->file_slot = READ_ONCE(sqe->file_index);
1363 sock->nofile = rlimit(RLIMIT_NOFILE);
1365 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1366 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1368 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1373 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1375 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1376 bool fixed = !!sock->file_slot;
1381 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1382 if (unlikely(fd < 0))
1385 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1389 ret = PTR_ERR(file);
1390 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1392 if (ret == -ERESTARTSYS)
1395 } else if (!fixed) {
1396 fd_install(fd, file);
1399 ret = io_fixed_fd_install(req, issue_flags, file,
1402 io_req_set_res(req, ret, 0);
1406 int io_connect_prep_async(struct io_kiocb *req)
1408 struct io_async_connect *io = req->async_data;
1409 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1411 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1414 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1416 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1418 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1421 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1422 conn->addr_len = READ_ONCE(sqe->addr2);
1423 conn->in_progress = false;
1427 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1429 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1430 struct io_async_connect __io, *io;
1431 unsigned file_flags;
1433 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1435 if (connect->in_progress) {
1436 struct socket *socket;
1439 socket = sock_from_file(req->file);
1441 ret = sock_error(socket->sk);
1445 if (req_has_async_data(req)) {
1446 io = req->async_data;
1448 ret = move_addr_to_kernel(connect->addr,
1456 file_flags = force_nonblock ? O_NONBLOCK : 0;
1458 ret = __sys_connect_file(req->file, &io->address,
1459 connect->addr_len, file_flags);
1460 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1461 if (ret == -EINPROGRESS) {
1462 connect->in_progress = true;
1464 if (req_has_async_data(req))
1466 if (io_alloc_async_data(req)) {
1470 memcpy(req->async_data, &__io, sizeof(__io));
1474 if (ret == -ERESTARTSYS)
1479 io_req_set_res(req, ret, 0);
1483 void io_netmsg_cache_free(struct io_cache_entry *entry)
1485 kfree(container_of(entry, struct io_async_msghdr, cache));