1 // SPDX-License-Identifier: GPL-2.0
3 * linux/net/sunrpc/xprtsock.c
5 * Client-side transport implementation for sockets.
7 * TCP callback races fixes (C) 1998 Red Hat
8 * TCP send fixes (C) 1998 Red Hat
9 * TCP NFS related read + write fixes
10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
12 * Rewrite of larges part of the code in order to stabilize TCP stuff.
13 * Fix behaviour when socket buffer is full.
14 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
19 * <gilles.quillard@bull.net>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/slab.h>
25 #include <linux/module.h>
26 #include <linux/capability.h>
27 #include <linux/pagemap.h>
28 #include <linux/errno.h>
29 #include <linux/socket.h>
31 #include <linux/net.h>
34 #include <linux/udp.h>
35 #include <linux/tcp.h>
36 #include <linux/sunrpc/clnt.h>
37 #include <linux/sunrpc/addr.h>
38 #include <linux/sunrpc/sched.h>
39 #include <linux/sunrpc/svcsock.h>
40 #include <linux/sunrpc/xprtsock.h>
41 #include <linux/file.h>
42 #ifdef CONFIG_SUNRPC_BACKCHANNEL
43 #include <linux/sunrpc/bc_xprt.h>
47 #include <net/checksum.h>
50 #include <linux/bvec.h>
51 #include <linux/highmem.h>
52 #include <linux/uio.h>
53 #include <linux/sched/mm.h>
55 #include <trace/events/sunrpc.h>
60 static void xs_close(struct rpc_xprt *xprt);
61 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
67 static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
68 static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
69 static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
71 static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
72 static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
74 #define XS_TCP_LINGER_TO (15U * HZ)
75 static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
78 * We can register our own files under /proc/sys/sunrpc by
79 * calling register_sysctl_table() again. The files in that
80 * directory become the union of all files registered there.
82 * We simply need to make sure that we don't collide with
83 * someone else's file names!
86 static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
87 static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
88 static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
89 static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
90 static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
92 static struct ctl_table_header *sunrpc_table_header;
95 * FIXME: changing the UDP slot table size should also resize the UDP
96 * socket buffers for existing UDP transports
98 static struct ctl_table xs_tunables_table[] = {
100 .procname = "udp_slot_table_entries",
101 .data = &xprt_udp_slot_table_entries,
102 .maxlen = sizeof(unsigned int),
104 .proc_handler = proc_dointvec_minmax,
105 .extra1 = &min_slot_table_size,
106 .extra2 = &max_slot_table_size
109 .procname = "tcp_slot_table_entries",
110 .data = &xprt_tcp_slot_table_entries,
111 .maxlen = sizeof(unsigned int),
113 .proc_handler = proc_dointvec_minmax,
114 .extra1 = &min_slot_table_size,
115 .extra2 = &max_slot_table_size
118 .procname = "tcp_max_slot_table_entries",
119 .data = &xprt_max_tcp_slot_table_entries,
120 .maxlen = sizeof(unsigned int),
122 .proc_handler = proc_dointvec_minmax,
123 .extra1 = &min_slot_table_size,
124 .extra2 = &max_tcp_slot_table_limit
127 .procname = "min_resvport",
128 .data = &xprt_min_resvport,
129 .maxlen = sizeof(unsigned int),
131 .proc_handler = proc_dointvec_minmax,
132 .extra1 = &xprt_min_resvport_limit,
133 .extra2 = &xprt_max_resvport_limit
136 .procname = "max_resvport",
137 .data = &xprt_max_resvport,
138 .maxlen = sizeof(unsigned int),
140 .proc_handler = proc_dointvec_minmax,
141 .extra1 = &xprt_min_resvport_limit,
142 .extra2 = &xprt_max_resvport_limit
145 .procname = "tcp_fin_timeout",
146 .data = &xs_tcp_fin_timeout,
147 .maxlen = sizeof(xs_tcp_fin_timeout),
149 .proc_handler = proc_dointvec_jiffies,
154 static struct ctl_table sunrpc_table[] = {
156 .procname = "sunrpc",
158 .child = xs_tunables_table
164 * Wait duration for a reply from the RPC portmapper.
166 #define XS_BIND_TO (60U * HZ)
169 * Delay if a UDP socket connect error occurs. This is most likely some
170 * kind of resource problem on the local host.
172 #define XS_UDP_REEST_TO (2U * HZ)
175 * The reestablish timeout allows clients to delay for a bit before attempting
176 * to reconnect to a server that just dropped our connection.
178 * We implement an exponential backoff when trying to reestablish a TCP
179 * transport connection with the server. Some servers like to drop a TCP
180 * connection when they are overworked, so we start with a short timeout and
181 * increase over time if the server is down or not responding.
183 #define XS_TCP_INIT_REEST_TO (3U * HZ)
186 * TCP idle timeout; client drops the transport socket if it is idle
187 * for this long. Note that we also timeout UDP sockets to prevent
188 * holding port numbers when there is no RPC traffic.
190 #define XS_IDLE_DISC_TO (5U * 60 * HZ)
192 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
193 # undef RPC_DEBUG_DATA
194 # define RPCDBG_FACILITY RPCDBG_TRANS
197 #ifdef RPC_DEBUG_DATA
198 static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
200 u8 *buf = (u8 *) packet;
203 dprintk("RPC: %s\n", msg);
204 for (j = 0; j < count && j < 128; j += 4) {
208 dprintk("0x%04x ", j);
210 dprintk("%02x%02x%02x%02x ",
211 buf[j], buf[j+1], buf[j+2], buf[j+3]);
216 static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
222 static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
224 return (struct rpc_xprt *) sk->sk_user_data;
227 static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
229 return (struct sockaddr *) &xprt->addr;
232 static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
234 return (struct sockaddr_un *) &xprt->addr;
237 static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
239 return (struct sockaddr_in *) &xprt->addr;
242 static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
244 return (struct sockaddr_in6 *) &xprt->addr;
247 static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
249 struct sockaddr *sap = xs_addr(xprt);
250 struct sockaddr_in6 *sin6;
251 struct sockaddr_in *sin;
252 struct sockaddr_un *sun;
255 switch (sap->sa_family) {
257 sun = xs_addr_un(xprt);
258 strlcpy(buf, sun->sun_path, sizeof(buf));
259 xprt->address_strings[RPC_DISPLAY_ADDR] =
260 kstrdup(buf, GFP_KERNEL);
263 (void)rpc_ntop(sap, buf, sizeof(buf));
264 xprt->address_strings[RPC_DISPLAY_ADDR] =
265 kstrdup(buf, GFP_KERNEL);
266 sin = xs_addr_in(xprt);
267 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
270 (void)rpc_ntop(sap, buf, sizeof(buf));
271 xprt->address_strings[RPC_DISPLAY_ADDR] =
272 kstrdup(buf, GFP_KERNEL);
273 sin6 = xs_addr_in6(xprt);
274 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
280 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
283 static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
285 struct sockaddr *sap = xs_addr(xprt);
288 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
289 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
291 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
292 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
295 static void xs_format_peer_addresses(struct rpc_xprt *xprt,
296 const char *protocol,
299 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
300 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
301 xs_format_common_peer_addresses(xprt);
302 xs_format_common_peer_ports(xprt);
305 static void xs_update_peer_port(struct rpc_xprt *xprt)
307 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
308 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
310 xs_format_common_peer_ports(xprt);
313 static void xs_free_peer_addresses(struct rpc_xprt *xprt)
317 for (i = 0; i < RPC_DISPLAY_MAX; i++)
319 case RPC_DISPLAY_PROTO:
320 case RPC_DISPLAY_NETID:
323 kfree(xprt->address_strings[i]);
328 xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
332 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
334 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
335 for (i = 0; i < n; i++) {
338 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
339 if (!buf->pages[i]) {
341 return i > buf->page_base ? i - buf->page_base : 0;
348 xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
352 iov_iter_advance(&msg->msg_iter, seek);
353 ret = sock_recvmsg(sock, msg, flags);
354 return ret > 0 ? ret + seek : ret;
358 xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
359 struct kvec *kvec, size_t count, size_t seek)
361 iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
362 return xs_sock_recvmsg(sock, msg, flags, seek);
366 xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
367 struct bio_vec *bvec, unsigned long nr, size_t count,
370 iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
371 return xs_sock_recvmsg(sock, msg, flags, seek);
375 xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
378 iov_iter_discard(&msg->msg_iter, READ, count);
379 return sock_recvmsg(sock, msg, flags);
382 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
384 xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
386 struct bvec_iter bi = {
391 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
392 for_each_bvec(bv, bvec, bi, bi)
393 flush_dcache_page(bv.bv_page);
397 xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
403 xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
404 struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
406 size_t want, seek_init = seek, offset = 0;
409 want = min_t(size_t, count, buf->head[0].iov_len);
411 ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
415 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
425 want = xs_alloc_sparse_pages(buf,
426 min_t(size_t, count - offset, buf->page_len),
429 ret = xs_read_bvec(sock, msg, flags, buf->bvec,
430 xdr_buf_pagecount(buf),
431 want + buf->page_base,
432 seek + buf->page_base);
435 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
436 ret -= buf->page_base;
438 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
448 want = min_t(size_t, count - offset, buf->tail[0].iov_len);
450 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
454 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
458 } else if (offset < seek_init)
462 *read = offset - seek_init;
470 xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
472 if (!transport->recv.copied) {
473 if (buf->head[0].iov_len >= transport->recv.offset)
474 memcpy(buf->head[0].iov_base,
475 &transport->recv.xid,
476 transport->recv.offset);
477 transport->recv.copied = transport->recv.offset;
482 xs_read_stream_request_done(struct sock_xprt *transport)
484 return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
488 xs_read_stream_check_eor(struct sock_xprt *transport,
491 if (xs_read_stream_request_done(transport))
492 msg->msg_flags |= MSG_EOR;
496 xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
497 int flags, struct rpc_rqst *req)
499 struct xdr_buf *buf = &req->rq_private_buf;
503 xs_read_header(transport, buf);
505 want = transport->recv.len - transport->recv.offset;
507 ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
508 transport->recv.copied + want,
509 transport->recv.copied,
511 transport->recv.offset += read;
512 transport->recv.copied += read;
515 if (transport->recv.offset == transport->recv.len)
516 xs_read_stream_check_eor(transport, msg);
526 msg->msg_flags |= MSG_TRUNC;
531 return ret < 0 ? ret : read;
535 xs_read_stream_headersize(bool isfrag)
538 return sizeof(__be32);
539 return 3 * sizeof(__be32);
543 xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
544 int flags, size_t want, size_t seek)
547 .iov_base = &transport->recv.fraghdr,
550 return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
553 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
555 xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
557 struct rpc_xprt *xprt = &transport->xprt;
558 struct rpc_rqst *req;
561 /* Is this transport associated with the backchannel? */
565 /* Look up and lock the request corresponding to the given XID */
566 req = xprt_lookup_bc_request(xprt, transport->recv.xid);
568 printk(KERN_WARNING "Callback slot table overflowed\n");
571 if (transport->recv.copied && !req->rq_private_buf.len)
574 ret = xs_read_stream_request(transport, msg, flags, req);
575 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
576 xprt_complete_bc_request(req, transport->recv.copied);
578 req->rq_private_buf.len = transport->recv.copied;
582 #else /* CONFIG_SUNRPC_BACKCHANNEL */
584 xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
588 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
591 xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
593 struct rpc_xprt *xprt = &transport->xprt;
594 struct rpc_rqst *req;
597 /* Look up and lock the request corresponding to the given XID */
598 spin_lock(&xprt->queue_lock);
599 req = xprt_lookup_rqst(xprt, transport->recv.xid);
600 if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
601 msg->msg_flags |= MSG_TRUNC;
605 spin_unlock(&xprt->queue_lock);
607 ret = xs_read_stream_request(transport, msg, flags, req);
609 spin_lock(&xprt->queue_lock);
610 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
611 xprt_complete_rqst(req->rq_task, transport->recv.copied);
613 req->rq_private_buf.len = transport->recv.copied;
614 xprt_unpin_rqst(req);
616 spin_unlock(&xprt->queue_lock);
621 xs_read_stream(struct sock_xprt *transport, int flags)
623 struct msghdr msg = { 0 };
624 size_t want, read = 0;
627 if (transport->recv.len == 0) {
628 want = xs_read_stream_headersize(transport->recv.copied != 0);
629 ret = xs_read_stream_header(transport, &msg, flags, want,
630 transport->recv.offset);
633 transport->recv.offset = ret;
634 if (transport->recv.offset != want)
635 return transport->recv.offset;
636 transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
637 RPC_FRAGMENT_SIZE_MASK;
638 transport->recv.offset -= sizeof(transport->recv.fraghdr);
642 switch (be32_to_cpu(transport->recv.calldir)) {
644 msg.msg_flags |= MSG_TRUNC;
647 ret = xs_read_stream_call(transport, &msg, flags);
650 ret = xs_read_stream_reply(transport, &msg, flags);
652 if (msg.msg_flags & MSG_TRUNC) {
653 transport->recv.calldir = cpu_to_be32(-1);
654 transport->recv.copied = -1;
659 if (transport->recv.offset < transport->recv.len) {
660 if (!(msg.msg_flags & MSG_TRUNC))
663 ret = xs_read_discard(transport->sock, &msg, flags,
664 transport->recv.len - transport->recv.offset);
667 transport->recv.offset += ret;
669 if (transport->recv.offset != transport->recv.len)
672 if (xs_read_stream_request_done(transport)) {
673 trace_xs_stream_read_request(transport);
674 transport->recv.copied = 0;
676 transport->recv.offset = 0;
677 transport->recv.len = 0;
680 return ret != 0 ? ret : -ESHUTDOWN;
683 static __poll_t xs_poll_socket(struct sock_xprt *transport)
685 return transport->sock->ops->poll(transport->file, transport->sock,
689 static bool xs_poll_socket_readable(struct sock_xprt *transport)
691 __poll_t events = xs_poll_socket(transport);
693 return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP);
696 static void xs_poll_check_readable(struct sock_xprt *transport)
699 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
700 if (!xs_poll_socket_readable(transport))
702 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
703 queue_work(xprtiod_workqueue, &transport->recv_worker);
706 static void xs_stream_data_receive(struct sock_xprt *transport)
711 mutex_lock(&transport->recv_mutex);
712 if (transport->sock == NULL)
715 ret = xs_read_stream(transport, MSG_DONTWAIT);
721 if (ret == -ESHUTDOWN)
722 kernel_sock_shutdown(transport->sock, SHUT_RDWR);
724 xs_poll_check_readable(transport);
726 mutex_unlock(&transport->recv_mutex);
727 trace_xs_stream_read_data(&transport->xprt, ret, read);
730 static void xs_stream_data_receive_workfn(struct work_struct *work)
732 struct sock_xprt *transport =
733 container_of(work, struct sock_xprt, recv_worker);
734 unsigned int pflags = memalloc_nofs_save();
736 xs_stream_data_receive(transport);
737 memalloc_nofs_restore(pflags);
741 xs_stream_reset_connect(struct sock_xprt *transport)
743 transport->recv.offset = 0;
744 transport->recv.len = 0;
745 transport->recv.copied = 0;
746 transport->xmit.offset = 0;
750 xs_stream_start_connect(struct sock_xprt *transport)
752 transport->xprt.stat.connect_count++;
753 transport->xprt.stat.connect_start = jiffies;
756 #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
759 * xs_nospace - handle transmit was incomplete
760 * @req: pointer to RPC request
763 static int xs_nospace(struct rpc_rqst *req)
765 struct rpc_xprt *xprt = req->rq_xprt;
766 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
767 struct sock *sk = transport->inet;
770 trace_rpc_socket_nospace(req, transport);
772 /* Protect against races with write_space */
773 spin_lock(&xprt->transport_lock);
775 /* Don't race with disconnect */
776 if (xprt_connected(xprt)) {
777 /* wait for more buffer space */
778 sk->sk_write_pending++;
779 xprt_wait_for_buffer_space(xprt);
783 spin_unlock(&xprt->transport_lock);
785 /* Race breaker in case memory is freed before above code is called */
786 if (ret == -EAGAIN) {
787 struct socket_wq *wq;
790 wq = rcu_dereference(sk->sk_wq);
791 set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
794 sk->sk_write_space(sk);
800 xs_stream_prepare_request(struct rpc_rqst *req)
802 xdr_free_bvec(&req->rq_rcv_buf);
803 req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
807 * Determine if the previous message in the stream was aborted before it
808 * could complete transmission.
811 xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
813 return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
817 * Return the stream record marker field for a record of length < 2^31-1
820 xs_stream_record_marker(struct xdr_buf *xdr)
824 return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len);
828 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
829 * @req: pointer to RPC request
832 * 0: The request has been sent
833 * EAGAIN: The socket was blocked, please call again later to
834 * complete the request
835 * ENOTCONN: Caller needs to invoke connect logic then call again
836 * other: Some other error occurred, the request was not sent
838 static int xs_local_send_request(struct rpc_rqst *req)
840 struct rpc_xprt *xprt = req->rq_xprt;
841 struct sock_xprt *transport =
842 container_of(xprt, struct sock_xprt, xprt);
843 struct xdr_buf *xdr = &req->rq_snd_buf;
844 rpc_fraghdr rm = xs_stream_record_marker(xdr);
845 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
846 struct msghdr msg = {
847 .msg_flags = XS_SENDMSG_FLAGS,
852 /* Close the stream if the previous transmission was incomplete */
853 if (xs_send_request_was_aborted(transport, req)) {
858 xs_pktdump("packet data:",
859 req->rq_svec->iov_base, req->rq_svec->iov_len);
861 req->rq_xtime = ktime_get();
862 status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
863 transport->xmit.offset, rm, &sent);
864 dprintk("RPC: %s(%u) = %d\n",
865 __func__, xdr->len - transport->xmit.offset, status);
867 if (status == -EAGAIN && sock_writeable(transport->inet))
870 if (likely(sent > 0) || status == 0) {
871 transport->xmit.offset += sent;
872 req->rq_bytes_sent = transport->xmit.offset;
873 if (likely(req->rq_bytes_sent >= msglen)) {
874 req->rq_xmit_bytes_sent += transport->xmit.offset;
875 transport->xmit.offset = 0;
885 status = xs_nospace(req);
888 dprintk("RPC: sendmsg returned unrecognized error %d\n",
900 * xs_udp_send_request - write an RPC request to a UDP socket
901 * @req: pointer to RPC request
904 * 0: The request has been sent
905 * EAGAIN: The socket was blocked, please call again later to
906 * complete the request
907 * ENOTCONN: Caller needs to invoke connect logic then call again
908 * other: Some other error occurred, the request was not sent
910 static int xs_udp_send_request(struct rpc_rqst *req)
912 struct rpc_xprt *xprt = req->rq_xprt;
913 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
914 struct xdr_buf *xdr = &req->rq_snd_buf;
915 struct msghdr msg = {
916 .msg_name = xs_addr(xprt),
917 .msg_namelen = xprt->addrlen,
918 .msg_flags = XS_SENDMSG_FLAGS,
923 xs_pktdump("packet data:",
924 req->rq_svec->iov_base,
925 req->rq_svec->iov_len);
927 if (!xprt_bound(xprt))
930 if (!xprt_request_get_cong(xprt, req))
933 req->rq_xtime = ktime_get();
934 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
936 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
939 /* firewall is blocking us, don't return -EAGAIN or we end up looping */
940 if (status == -EPERM)
943 if (status == -EAGAIN && sock_writeable(transport->inet))
946 if (sent > 0 || status == 0) {
947 req->rq_xmit_bytes_sent += sent;
948 if (sent >= req->rq_slen)
950 /* Still some bytes left; set up for a retry later. */
958 /* Should we call xs_close() here? */
961 status = xs_nospace(req);
968 /* When the server has died, an ICMP port unreachable message
969 * prompts ECONNREFUSED. */
972 dprintk("RPC: sendmsg returned unrecognized error %d\n",
980 * xs_tcp_send_request - write an RPC request to a TCP socket
981 * @req: pointer to RPC request
984 * 0: The request has been sent
985 * EAGAIN: The socket was blocked, please call again later to
986 * complete the request
987 * ENOTCONN: Caller needs to invoke connect logic then call again
988 * other: Some other error occurred, the request was not sent
990 * XXX: In the case of soft timeouts, should we eventually give up
991 * if sendmsg is not able to make progress?
993 static int xs_tcp_send_request(struct rpc_rqst *req)
995 struct rpc_xprt *xprt = req->rq_xprt;
996 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
997 struct xdr_buf *xdr = &req->rq_snd_buf;
998 rpc_fraghdr rm = xs_stream_record_marker(xdr);
999 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
1000 struct msghdr msg = {
1001 .msg_flags = XS_SENDMSG_FLAGS,
1003 bool vm_wait = false;
1007 /* Close the stream if the previous transmission was incomplete */
1008 if (xs_send_request_was_aborted(transport, req)) {
1009 if (transport->sock != NULL)
1010 kernel_sock_shutdown(transport->sock, SHUT_RDWR);
1013 if (!transport->inet)
1016 xs_pktdump("packet data:",
1017 req->rq_svec->iov_base,
1018 req->rq_svec->iov_len);
1020 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
1021 xs_tcp_set_socket_timeouts(xprt, transport->sock);
1023 /* Continue transmitting the packet/record. We must be careful
1024 * to cope with writespace callbacks arriving _after_ we have
1025 * called sendmsg(). */
1026 req->rq_xtime = ktime_get();
1027 tcp_sock_set_cork(transport->inet, true);
1029 status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
1030 transport->xmit.offset, rm, &sent);
1032 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
1033 xdr->len - transport->xmit.offset, status);
1035 /* If we've sent the entire packet, immediately
1036 * reset the count of bytes sent. */
1037 transport->xmit.offset += sent;
1038 req->rq_bytes_sent = transport->xmit.offset;
1039 if (likely(req->rq_bytes_sent >= msglen)) {
1040 req->rq_xmit_bytes_sent += transport->xmit.offset;
1041 transport->xmit.offset = 0;
1042 if (atomic_long_read(&xprt->xmit_queuelen) == 1)
1043 tcp_sock_set_cork(transport->inet, false);
1047 WARN_ON_ONCE(sent == 0 && status == 0);
1049 if (status == -EAGAIN ) {
1051 * Return EAGAIN if we're sure we're hitting the
1052 * socket send buffer limits.
1054 if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
1057 * Did we hit a memory allocation failure?
1063 /* Retry, knowing now that we're below the
1064 * socket send buffer limit
1078 /* Should we call xs_close() here? */
1081 status = xs_nospace(req);
1091 dprintk("RPC: sendmsg returned unrecognized error %d\n",
1098 static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1100 transport->old_data_ready = sk->sk_data_ready;
1101 transport->old_state_change = sk->sk_state_change;
1102 transport->old_write_space = sk->sk_write_space;
1103 transport->old_error_report = sk->sk_error_report;
1106 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1108 sk->sk_data_ready = transport->old_data_ready;
1109 sk->sk_state_change = transport->old_state_change;
1110 sk->sk_write_space = transport->old_write_space;
1111 sk->sk_error_report = transport->old_error_report;
1114 static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
1116 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1118 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1119 clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state);
1120 clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
1121 clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state);
1124 static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
1126 set_bit(nr, &transport->sock_state);
1127 queue_work(xprtiod_workqueue, &transport->error_worker);
1130 static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1132 smp_mb__before_atomic();
1133 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1134 clear_bit(XPRT_CLOSING, &xprt->state);
1135 xs_sock_reset_state_flags(xprt);
1136 smp_mb__after_atomic();
1140 * xs_error_report - callback to handle TCP socket state errors
1143 * Note: we don't call sock_error() since there may be a rpc_task
1144 * using the socket, and so we don't want to clear sk->sk_err.
1146 static void xs_error_report(struct sock *sk)
1148 struct sock_xprt *transport;
1149 struct rpc_xprt *xprt;
1151 read_lock_bh(&sk->sk_callback_lock);
1152 if (!(xprt = xprt_from_sock(sk)))
1155 transport = container_of(xprt, struct sock_xprt, xprt);
1156 transport->xprt_err = -sk->sk_err;
1157 if (transport->xprt_err == 0)
1159 dprintk("RPC: xs_error_report client %p, error=%d...\n",
1160 xprt, -transport->xprt_err);
1161 trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
1163 /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
1164 smp_mb__before_atomic();
1165 xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
1167 read_unlock_bh(&sk->sk_callback_lock);
1170 static void xs_reset_transport(struct sock_xprt *transport)
1172 struct socket *sock = transport->sock;
1173 struct sock *sk = transport->inet;
1174 struct rpc_xprt *xprt = &transport->xprt;
1175 struct file *filp = transport->file;
1180 if (atomic_read(&transport->xprt.swapper))
1181 sk_clear_memalloc(sk);
1183 kernel_sock_shutdown(sock, SHUT_RDWR);
1185 mutex_lock(&transport->recv_mutex);
1186 write_lock_bh(&sk->sk_callback_lock);
1187 transport->inet = NULL;
1188 transport->sock = NULL;
1189 transport->file = NULL;
1191 sk->sk_user_data = NULL;
1193 xs_restore_old_callbacks(transport, sk);
1194 xprt_clear_connected(xprt);
1195 write_unlock_bh(&sk->sk_callback_lock);
1196 xs_sock_reset_connection_flags(xprt);
1197 /* Reset stream record info */
1198 xs_stream_reset_connect(transport);
1199 mutex_unlock(&transport->recv_mutex);
1201 trace_rpc_socket_close(xprt, sock);
1204 xprt_disconnect_done(xprt);
1208 * xs_close - close a socket
1211 * This is used when all requests are complete; ie, no DRC state remains
1212 * on the server we want to save.
1214 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1215 * xs_reset_transport() zeroing the socket from underneath a writer.
1217 static void xs_close(struct rpc_xprt *xprt)
1219 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1221 dprintk("RPC: xs_close xprt %p\n", xprt);
1223 xs_reset_transport(transport);
1224 xprt->reestablish_timeout = 0;
1227 static void xs_inject_disconnect(struct rpc_xprt *xprt)
1229 dprintk("RPC: injecting transport disconnect on xprt=%p\n",
1231 xprt_disconnect_done(xprt);
1234 static void xs_xprt_free(struct rpc_xprt *xprt)
1236 xs_free_peer_addresses(xprt);
1241 * xs_destroy - prepare to shutdown a transport
1242 * @xprt: doomed transport
1245 static void xs_destroy(struct rpc_xprt *xprt)
1247 struct sock_xprt *transport = container_of(xprt,
1248 struct sock_xprt, xprt);
1249 dprintk("RPC: xs_destroy xprt %p\n", xprt);
1251 cancel_delayed_work_sync(&transport->connect_worker);
1253 cancel_work_sync(&transport->recv_worker);
1254 cancel_work_sync(&transport->error_worker);
1256 module_put(THIS_MODULE);
1260 * xs_udp_data_read_skb - receive callback for UDP sockets
1266 static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1268 struct sk_buff *skb)
1270 struct rpc_task *task;
1271 struct rpc_rqst *rovr;
1272 int repsize, copied;
1278 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
1282 /* Copy the XID from the skb... */
1283 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
1287 /* Look up and lock the request corresponding to the given XID */
1288 spin_lock(&xprt->queue_lock);
1289 rovr = xprt_lookup_rqst(xprt, *xp);
1292 xprt_pin_rqst(rovr);
1293 xprt_update_rtt(rovr->rq_task);
1294 spin_unlock(&xprt->queue_lock);
1295 task = rovr->rq_task;
1297 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1300 /* Suck it into the iovec, verify checksum if not done by hw. */
1301 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1302 spin_lock(&xprt->queue_lock);
1303 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1308 spin_lock(&xprt->transport_lock);
1309 xprt_adjust_cwnd(xprt, task, copied);
1310 spin_unlock(&xprt->transport_lock);
1311 spin_lock(&xprt->queue_lock);
1312 xprt_complete_rqst(task, copied);
1313 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1315 xprt_unpin_rqst(rovr);
1317 spin_unlock(&xprt->queue_lock);
1320 static void xs_udp_data_receive(struct sock_xprt *transport)
1322 struct sk_buff *skb;
1326 mutex_lock(&transport->recv_mutex);
1327 sk = transport->inet;
1331 skb = skb_recv_udp(sk, 0, 1, &err);
1334 xs_udp_data_read_skb(&transport->xprt, sk, skb);
1338 xs_poll_check_readable(transport);
1340 mutex_unlock(&transport->recv_mutex);
1343 static void xs_udp_data_receive_workfn(struct work_struct *work)
1345 struct sock_xprt *transport =
1346 container_of(work, struct sock_xprt, recv_worker);
1347 unsigned int pflags = memalloc_nofs_save();
1349 xs_udp_data_receive(transport);
1350 memalloc_nofs_restore(pflags);
1354 * xs_data_ready - "data ready" callback for UDP sockets
1355 * @sk: socket with data to read
1358 static void xs_data_ready(struct sock *sk)
1360 struct rpc_xprt *xprt;
1362 read_lock_bh(&sk->sk_callback_lock);
1363 dprintk("RPC: xs_data_ready...\n");
1364 xprt = xprt_from_sock(sk);
1366 struct sock_xprt *transport = container_of(xprt,
1367 struct sock_xprt, xprt);
1368 transport->old_data_ready(sk);
1369 /* Any data means we had a useful conversation, so
1370 * then we don't need to delay the next reconnect
1372 if (xprt->reestablish_timeout)
1373 xprt->reestablish_timeout = 0;
1374 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1375 queue_work(xprtiod_workqueue, &transport->recv_worker);
1377 read_unlock_bh(&sk->sk_callback_lock);
1381 * Helper function to force a TCP close if the server is sending
1382 * junk and/or it has put us in CLOSE_WAIT
1384 static void xs_tcp_force_close(struct rpc_xprt *xprt)
1386 xprt_force_disconnect(xprt);
1389 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1390 static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
1394 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1397 * xs_tcp_state_change - callback to handle TCP socket state changes
1398 * @sk: socket whose state has changed
1401 static void xs_tcp_state_change(struct sock *sk)
1403 struct rpc_xprt *xprt;
1404 struct sock_xprt *transport;
1406 read_lock_bh(&sk->sk_callback_lock);
1407 if (!(xprt = xprt_from_sock(sk)))
1409 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1410 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1411 sk->sk_state, xprt_connected(xprt),
1412 sock_flag(sk, SOCK_DEAD),
1413 sock_flag(sk, SOCK_ZAPPED),
1416 transport = container_of(xprt, struct sock_xprt, xprt);
1417 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1418 switch (sk->sk_state) {
1419 case TCP_ESTABLISHED:
1420 if (!xprt_test_and_set_connected(xprt)) {
1421 xprt->connect_cookie++;
1422 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1423 xprt_clear_connecting(xprt);
1425 xprt->stat.connect_count++;
1426 xprt->stat.connect_time += (long)jiffies -
1427 xprt->stat.connect_start;
1428 xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING);
1432 /* The client initiated a shutdown of the socket */
1433 xprt->connect_cookie++;
1434 xprt->reestablish_timeout = 0;
1435 set_bit(XPRT_CLOSING, &xprt->state);
1436 smp_mb__before_atomic();
1437 clear_bit(XPRT_CONNECTED, &xprt->state);
1438 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1439 smp_mb__after_atomic();
1441 case TCP_CLOSE_WAIT:
1442 /* The server initiated a shutdown of the socket */
1443 xprt->connect_cookie++;
1444 clear_bit(XPRT_CONNECTED, &xprt->state);
1445 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1449 * If the server closed down the connection, make sure that
1450 * we back off before reconnecting
1452 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1453 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1456 set_bit(XPRT_CLOSING, &xprt->state);
1457 smp_mb__before_atomic();
1458 clear_bit(XPRT_CONNECTED, &xprt->state);
1459 smp_mb__after_atomic();
1462 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1463 &transport->sock_state))
1464 xprt_clear_connecting(xprt);
1465 clear_bit(XPRT_CLOSING, &xprt->state);
1466 /* Trigger the socket release */
1467 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1470 read_unlock_bh(&sk->sk_callback_lock);
1473 static void xs_write_space(struct sock *sk)
1475 struct socket_wq *wq;
1476 struct sock_xprt *transport;
1477 struct rpc_xprt *xprt;
1481 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1483 if (unlikely(!(xprt = xprt_from_sock(sk))))
1485 transport = container_of(xprt, struct sock_xprt, xprt);
1487 wq = rcu_dereference(sk->sk_wq);
1488 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
1491 xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE);
1492 sk->sk_write_pending--;
1498 * xs_udp_write_space - callback invoked when socket buffer space
1500 * @sk: socket whose state has changed
1502 * Called when more output buffer space is available for this socket.
1503 * We try not to wake our writers until they can make "significant"
1504 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1505 * with a bunch of small requests.
1507 static void xs_udp_write_space(struct sock *sk)
1509 read_lock_bh(&sk->sk_callback_lock);
1511 /* from net/core/sock.c:sock_def_write_space */
1512 if (sock_writeable(sk))
1515 read_unlock_bh(&sk->sk_callback_lock);
1519 * xs_tcp_write_space - callback invoked when socket buffer space
1521 * @sk: socket whose state has changed
1523 * Called when more output buffer space is available for this socket.
1524 * We try not to wake our writers until they can make "significant"
1525 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1526 * with a bunch of small requests.
1528 static void xs_tcp_write_space(struct sock *sk)
1530 read_lock_bh(&sk->sk_callback_lock);
1532 /* from net/core/stream.c:sk_stream_write_space */
1533 if (sk_stream_is_writeable(sk))
1536 read_unlock_bh(&sk->sk_callback_lock);
1539 static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1541 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1542 struct sock *sk = transport->inet;
1544 if (transport->rcvsize) {
1545 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1546 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1548 if (transport->sndsize) {
1549 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1550 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1551 sk->sk_write_space(sk);
1556 * xs_udp_set_buffer_size - set send and receive limits
1557 * @xprt: generic transport
1558 * @sndsize: requested size of send buffer, in bytes
1559 * @rcvsize: requested size of receive buffer, in bytes
1561 * Set socket send and receive buffer size limits.
1563 static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1565 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1567 transport->sndsize = 0;
1569 transport->sndsize = sndsize + 1024;
1570 transport->rcvsize = 0;
1572 transport->rcvsize = rcvsize + 1024;
1574 xs_udp_do_set_buffer_size(xprt);
1578 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1579 * @xprt: controlling transport
1580 * @task: task that timed out
1582 * Adjust the congestion window after a retransmit timeout has occurred.
1584 static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1586 spin_lock(&xprt->transport_lock);
1587 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1588 spin_unlock(&xprt->transport_lock);
1591 static int xs_get_random_port(void)
1593 unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
1594 unsigned short range;
1595 unsigned short rand;
1599 range = max - min + 1;
1600 rand = (unsigned short) prandom_u32() % range;
1604 static unsigned short xs_sock_getport(struct socket *sock)
1606 struct sockaddr_storage buf;
1607 unsigned short port = 0;
1609 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0)
1611 switch (buf.ss_family) {
1613 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1616 port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1623 * xs_set_port - reset the port number in the remote endpoint address
1624 * @xprt: generic transport
1625 * @port: new port number
1628 static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1630 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1632 rpc_set_port(xs_addr(xprt), port);
1633 xs_update_peer_port(xprt);
1636 static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1638 if (transport->srcport == 0 && transport->xprt.reuseport)
1639 transport->srcport = xs_sock_getport(sock);
1642 static int xs_get_srcport(struct sock_xprt *transport)
1644 int port = transport->srcport;
1646 if (port == 0 && transport->xprt.resvport)
1647 port = xs_get_random_port();
1651 static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1653 if (transport->srcport != 0)
1654 transport->srcport = 0;
1655 if (!transport->xprt.resvport)
1657 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1658 return xprt_max_resvport;
1661 static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1663 struct sockaddr_storage myaddr;
1665 int port = xs_get_srcport(transport);
1666 unsigned short last;
1669 * If we are asking for any ephemeral port (i.e. port == 0 &&
1670 * transport->xprt.resvport == 0), don't bind. Let the local
1671 * port selection happen implicitly when the socket is used
1672 * (for example at connect time).
1674 * This ensures that we can continue to establish TCP
1675 * connections even when all local ephemeral ports are already
1676 * a part of some TCP connection. This makes no difference
1677 * for UDP sockets, but also doesn't harm them.
1679 * If we're asking for any reserved port (i.e. port == 0 &&
1680 * transport->xprt.resvport == 1) xs_get_srcport above will
1681 * ensure that port is non-zero and we will bind as needed.
1686 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1688 rpc_set_port((struct sockaddr *)&myaddr, port);
1689 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1690 transport->xprt.addrlen);
1692 transport->srcport = port;
1696 port = xs_next_srcport(transport, port);
1699 } while (err == -EADDRINUSE && nloop != 2);
1701 if (myaddr.ss_family == AF_INET)
1702 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1703 &((struct sockaddr_in *)&myaddr)->sin_addr,
1704 port, err ? "failed" : "ok", err);
1706 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1707 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1708 port, err ? "failed" : "ok", err);
1713 * We don't support autobind on AF_LOCAL sockets
1715 static void xs_local_rpcbind(struct rpc_task *task)
1717 xprt_set_bound(task->tk_xprt);
1720 static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1724 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1725 static struct lock_class_key xs_key[2];
1726 static struct lock_class_key xs_slock_key[2];
1728 static inline void xs_reclassify_socketu(struct socket *sock)
1730 struct sock *sk = sock->sk;
1732 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1733 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1736 static inline void xs_reclassify_socket4(struct socket *sock)
1738 struct sock *sk = sock->sk;
1740 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1741 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1744 static inline void xs_reclassify_socket6(struct socket *sock)
1746 struct sock *sk = sock->sk;
1748 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1749 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1752 static inline void xs_reclassify_socket(int family, struct socket *sock)
1754 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
1759 xs_reclassify_socketu(sock);
1762 xs_reclassify_socket4(sock);
1765 xs_reclassify_socket6(sock);
1770 static inline void xs_reclassify_socket(int family, struct socket *sock)
1775 static void xs_dummy_setup_socket(struct work_struct *work)
1779 static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1780 struct sock_xprt *transport, int family, int type,
1781 int protocol, bool reuseport)
1784 struct socket *sock;
1787 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1789 dprintk("RPC: can't create %d transport socket (%d).\n",
1793 xs_reclassify_socket(family, sock);
1796 sock_set_reuseport(sock->sk);
1798 err = xs_bind(transport, sock);
1804 filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1806 return ERR_CAST(filp);
1807 transport->file = filp;
1811 return ERR_PTR(err);
1814 static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1815 struct socket *sock)
1817 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1820 if (!transport->inet) {
1821 struct sock *sk = sock->sk;
1823 write_lock_bh(&sk->sk_callback_lock);
1825 xs_save_old_callbacks(transport, sk);
1827 sk->sk_user_data = xprt;
1828 sk->sk_data_ready = xs_data_ready;
1829 sk->sk_write_space = xs_udp_write_space;
1830 sock_set_flag(sk, SOCK_FASYNC);
1831 sk->sk_error_report = xs_error_report;
1833 xprt_clear_connected(xprt);
1835 /* Reset to new socket */
1836 transport->sock = sock;
1837 transport->inet = sk;
1839 write_unlock_bh(&sk->sk_callback_lock);
1842 xs_stream_start_connect(transport);
1844 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1848 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1849 * @transport: socket transport to connect
1851 static int xs_local_setup_socket(struct sock_xprt *transport)
1853 struct rpc_xprt *xprt = &transport->xprt;
1855 struct socket *sock;
1858 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1859 SOCK_STREAM, 0, &sock, 1);
1861 dprintk("RPC: can't create AF_LOCAL "
1862 "transport socket (%d).\n", -status);
1865 xs_reclassify_socket(AF_LOCAL, sock);
1867 filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1869 status = PTR_ERR(filp);
1872 transport->file = filp;
1874 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1875 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1877 status = xs_local_finish_connecting(xprt, sock);
1878 trace_rpc_socket_connect(xprt, sock, status);
1881 dprintk("RPC: xprt %p connected to %s\n",
1882 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1883 xprt->stat.connect_count++;
1884 xprt->stat.connect_time += (long)jiffies -
1885 xprt->stat.connect_start;
1886 xprt_set_connected(xprt);
1891 dprintk("RPC: xprt %p: socket %s does not exist\n",
1892 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1895 dprintk("RPC: xprt %p: connection refused for %s\n",
1896 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1899 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1901 xprt->address_strings[RPC_DISPLAY_ADDR]);
1905 xprt_clear_connecting(xprt);
1906 xprt_wake_pending_tasks(xprt, status);
1910 static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
1912 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1915 if (RPC_IS_ASYNC(task)) {
1917 * We want the AF_LOCAL connect to be resolved in the
1918 * filesystem namespace of the process making the rpc
1919 * call. Thus we connect synchronously.
1921 * If we want to support asynchronous AF_LOCAL calls,
1922 * we'll need to figure out how to pass a namespace to
1925 task->tk_rpc_status = -ENOTCONN;
1926 rpc_exit(task, -ENOTCONN);
1929 ret = xs_local_setup_socket(transport);
1930 if (ret && !RPC_IS_SOFTCONN(task))
1931 msleep_interruptible(15000);
1934 #if IS_ENABLED(CONFIG_SUNRPC_SWAP)
1936 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
1937 * know that we have exclusive access to the socket), to guard against
1938 * races with xs_reset_transport.
1940 static void xs_set_memalloc(struct rpc_xprt *xprt)
1942 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1946 * If there's no sock, then we have nothing to set. The
1947 * reconnecting process will get it for us.
1949 if (!transport->inet)
1951 if (atomic_read(&xprt->swapper))
1952 sk_set_memalloc(transport->inet);
1956 * xs_enable_swap - Tag this transport as being used for swap.
1957 * @xprt: transport to tag
1959 * Take a reference to this transport on behalf of the rpc_clnt, and
1960 * optionally mark it for swapping if it wasn't already.
1963 xs_enable_swap(struct rpc_xprt *xprt)
1965 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
1967 if (atomic_inc_return(&xprt->swapper) != 1)
1969 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
1970 return -ERESTARTSYS;
1972 sk_set_memalloc(xs->inet);
1973 xprt_release_xprt(xprt, NULL);
1978 * xs_disable_swap - Untag this transport as being used for swap.
1979 * @xprt: transport to tag
1981 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
1982 * swapper refcount goes to 0, untag the socket as a memalloc socket.
1985 xs_disable_swap(struct rpc_xprt *xprt)
1987 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
1989 if (!atomic_dec_and_test(&xprt->swapper))
1991 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
1994 sk_clear_memalloc(xs->inet);
1995 xprt_release_xprt(xprt, NULL);
1998 static void xs_set_memalloc(struct rpc_xprt *xprt)
2003 xs_enable_swap(struct rpc_xprt *xprt)
2009 xs_disable_swap(struct rpc_xprt *xprt)
2014 static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2016 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2018 if (!transport->inet) {
2019 struct sock *sk = sock->sk;
2021 write_lock_bh(&sk->sk_callback_lock);
2023 xs_save_old_callbacks(transport, sk);
2025 sk->sk_user_data = xprt;
2026 sk->sk_data_ready = xs_data_ready;
2027 sk->sk_write_space = xs_udp_write_space;
2028 sock_set_flag(sk, SOCK_FASYNC);
2030 xprt_set_connected(xprt);
2032 /* Reset to new socket */
2033 transport->sock = sock;
2034 transport->inet = sk;
2036 xs_set_memalloc(xprt);
2038 write_unlock_bh(&sk->sk_callback_lock);
2040 xs_udp_do_set_buffer_size(xprt);
2042 xprt->stat.connect_start = jiffies;
2045 static void xs_udp_setup_socket(struct work_struct *work)
2047 struct sock_xprt *transport =
2048 container_of(work, struct sock_xprt, connect_worker.work);
2049 struct rpc_xprt *xprt = &transport->xprt;
2050 struct socket *sock;
2053 sock = xs_create_sock(xprt, transport,
2054 xs_addr(xprt)->sa_family, SOCK_DGRAM,
2055 IPPROTO_UDP, false);
2059 dprintk("RPC: worker connecting xprt %p via %s to "
2060 "%s (port %s)\n", xprt,
2061 xprt->address_strings[RPC_DISPLAY_PROTO],
2062 xprt->address_strings[RPC_DISPLAY_ADDR],
2063 xprt->address_strings[RPC_DISPLAY_PORT]);
2065 xs_udp_finish_connecting(xprt, sock);
2066 trace_rpc_socket_connect(xprt, sock, 0);
2069 xprt_clear_connecting(xprt);
2070 xprt_unlock_connect(xprt, transport);
2071 xprt_wake_pending_tasks(xprt, status);
2075 * xs_tcp_shutdown - gracefully shut down a TCP socket
2078 * Initiates a graceful shutdown of the TCP socket by calling the
2079 * equivalent of shutdown(SHUT_RDWR);
2081 static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2083 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2084 struct socket *sock = transport->sock;
2085 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
2091 kernel_sock_shutdown(sock, SHUT_RDWR);
2092 trace_rpc_socket_shutdown(xprt, sock);
2096 xs_reset_transport(transport);
2100 static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
2101 struct socket *sock)
2103 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2104 unsigned int keepidle;
2105 unsigned int keepcnt;
2108 spin_lock(&xprt->transport_lock);
2109 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
2110 keepcnt = xprt->timeout->to_retries + 1;
2111 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2112 (xprt->timeout->to_retries + 1);
2113 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2114 spin_unlock(&xprt->transport_lock);
2116 /* TCP Keepalive options */
2117 sock_set_keepalive(sock->sk);
2118 tcp_sock_set_keepidle(sock->sk, keepidle);
2119 tcp_sock_set_keepintvl(sock->sk, keepidle);
2120 tcp_sock_set_keepcnt(sock->sk, keepcnt);
2122 /* TCP user timeout (see RFC5482) */
2123 tcp_sock_set_user_timeout(sock->sk, timeo);
2126 static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
2127 unsigned long connect_timeout,
2128 unsigned long reconnect_timeout)
2130 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2131 struct rpc_timeout to;
2132 unsigned long initval;
2134 spin_lock(&xprt->transport_lock);
2135 if (reconnect_timeout < xprt->max_reconnect_timeout)
2136 xprt->max_reconnect_timeout = reconnect_timeout;
2137 if (connect_timeout < xprt->connect_timeout) {
2138 memcpy(&to, xprt->timeout, sizeof(to));
2139 initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
2140 /* Arbitrary lower limit */
2141 if (initval < XS_TCP_INIT_REEST_TO << 1)
2142 initval = XS_TCP_INIT_REEST_TO << 1;
2143 to.to_initval = initval;
2144 to.to_maxval = initval;
2145 memcpy(&transport->tcp_timeout, &to,
2146 sizeof(transport->tcp_timeout));
2147 xprt->timeout = &transport->tcp_timeout;
2148 xprt->connect_timeout = connect_timeout;
2150 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2151 spin_unlock(&xprt->transport_lock);
2154 static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2156 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2157 int ret = -ENOTCONN;
2159 if (!transport->inet) {
2160 struct sock *sk = sock->sk;
2162 /* Avoid temporary address, they are bad for long-lived
2163 * connections such as NFS mounts.
2164 * RFC4941, section 3.6 suggests that:
2165 * Individual applications, which have specific
2166 * knowledge about the normal duration of connections,
2167 * MAY override this as appropriate.
2169 if (xs_addr(xprt)->sa_family == PF_INET6) {
2170 ip6_sock_set_addr_preferences(sk,
2171 IPV6_PREFER_SRC_PUBLIC);
2174 xs_tcp_set_socket_timeouts(xprt, sock);
2175 tcp_sock_set_nodelay(sk);
2177 write_lock_bh(&sk->sk_callback_lock);
2179 xs_save_old_callbacks(transport, sk);
2181 sk->sk_user_data = xprt;
2182 sk->sk_data_ready = xs_data_ready;
2183 sk->sk_state_change = xs_tcp_state_change;
2184 sk->sk_write_space = xs_tcp_write_space;
2185 sock_set_flag(sk, SOCK_FASYNC);
2186 sk->sk_error_report = xs_error_report;
2188 /* socket options */
2189 sock_reset_flag(sk, SOCK_LINGER);
2191 xprt_clear_connected(xprt);
2193 /* Reset to new socket */
2194 transport->sock = sock;
2195 transport->inet = sk;
2197 write_unlock_bh(&sk->sk_callback_lock);
2200 if (!xprt_bound(xprt))
2203 xs_set_memalloc(xprt);
2205 xs_stream_start_connect(transport);
2207 /* Tell the socket layer to start connecting... */
2208 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2209 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2212 xs_set_srcport(transport, sock);
2216 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2217 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2219 case -EADDRNOTAVAIL:
2220 /* Source port number is unavailable. Try a new one! */
2221 transport->srcport = 0;
2228 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2229 * @work: queued work item
2231 * Invoked by a work queue tasklet.
2233 static void xs_tcp_setup_socket(struct work_struct *work)
2235 struct sock_xprt *transport =
2236 container_of(work, struct sock_xprt, connect_worker.work);
2237 struct socket *sock = transport->sock;
2238 struct rpc_xprt *xprt = &transport->xprt;
2242 sock = xs_create_sock(xprt, transport,
2243 xs_addr(xprt)->sa_family, SOCK_STREAM,
2246 status = PTR_ERR(sock);
2251 dprintk("RPC: worker connecting xprt %p via %s to "
2252 "%s (port %s)\n", xprt,
2253 xprt->address_strings[RPC_DISPLAY_PROTO],
2254 xprt->address_strings[RPC_DISPLAY_ADDR],
2255 xprt->address_strings[RPC_DISPLAY_PORT]);
2257 status = xs_tcp_finish_connecting(xprt, sock);
2258 trace_rpc_socket_connect(xprt, sock, status);
2259 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2260 xprt, -status, xprt_connected(xprt),
2261 sock->sk->sk_state);
2264 printk("%s: connect returned unhandled error %d\n",
2267 case -EADDRNOTAVAIL:
2268 /* We're probably in TIME_WAIT. Get rid of existing socket,
2271 xs_tcp_force_close(xprt);
2276 xprt_unlock_connect(xprt, transport);
2279 /* Happens, for instance, if the user specified a link
2280 * local IPv6 address without a scope-id.
2289 /* xs_tcp_force_close() wakes tasks with a fixed error code.
2290 * We need to wake them first to ensure the correct error code.
2292 xprt_wake_pending_tasks(xprt, status);
2293 xs_tcp_force_close(xprt);
2298 xprt_clear_connecting(xprt);
2299 xprt_unlock_connect(xprt, transport);
2300 xprt_wake_pending_tasks(xprt, status);
2304 * xs_connect - connect a socket to a remote endpoint
2305 * @xprt: pointer to transport structure
2306 * @task: address of RPC task that manages state of connect request
2308 * TCP: If the remote end dropped the connection, delay reconnecting.
2310 * UDP socket connects are synchronous, but we use a work queue anyway
2311 * to guarantee that even unprivileged user processes can set up a
2312 * socket on a privileged port.
2314 * If a UDP socket connect fails, the delay behavior here prevents
2315 * retry floods (hard mounts).
2317 static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2319 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2320 unsigned long delay = 0;
2322 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2324 if (transport->sock != NULL) {
2325 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2327 xprt, xprt->reestablish_timeout / HZ);
2329 /* Start by resetting any existing state */
2330 xs_reset_transport(transport);
2332 delay = xprt_reconnect_delay(xprt);
2333 xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
2336 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2338 queue_delayed_work(xprtiod_workqueue,
2339 &transport->connect_worker,
2343 static void xs_wake_disconnect(struct sock_xprt *transport)
2345 if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state))
2346 xs_tcp_force_close(&transport->xprt);
2349 static void xs_wake_write(struct sock_xprt *transport)
2351 if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state))
2352 xprt_write_space(&transport->xprt);
2355 static void xs_wake_error(struct sock_xprt *transport)
2359 if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2361 mutex_lock(&transport->recv_mutex);
2362 if (transport->sock == NULL)
2364 if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2366 sockerr = xchg(&transport->xprt_err, 0);
2368 xprt_wake_pending_tasks(&transport->xprt, sockerr);
2370 mutex_unlock(&transport->recv_mutex);
2373 static void xs_wake_pending(struct sock_xprt *transport)
2375 if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state))
2376 xprt_wake_pending_tasks(&transport->xprt, -EAGAIN);
2379 static void xs_error_handle(struct work_struct *work)
2381 struct sock_xprt *transport = container_of(work,
2382 struct sock_xprt, error_worker);
2384 xs_wake_disconnect(transport);
2385 xs_wake_write(transport);
2386 xs_wake_error(transport);
2387 xs_wake_pending(transport);
2391 * xs_local_print_stats - display AF_LOCAL socket-specific stats
2392 * @xprt: rpc_xprt struct containing statistics
2396 static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2400 if (xprt_connected(xprt))
2401 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2403 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2404 "%llu %llu %lu %llu %llu\n",
2405 xprt->stat.bind_count,
2406 xprt->stat.connect_count,
2407 xprt->stat.connect_time / HZ,
2411 xprt->stat.bad_xids,
2414 xprt->stat.max_slots,
2415 xprt->stat.sending_u,
2416 xprt->stat.pending_u);
2420 * xs_udp_print_stats - display UDP socket-specific stats
2421 * @xprt: rpc_xprt struct containing statistics
2425 static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2427 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2429 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2432 xprt->stat.bind_count,
2435 xprt->stat.bad_xids,
2438 xprt->stat.max_slots,
2439 xprt->stat.sending_u,
2440 xprt->stat.pending_u);
2444 * xs_tcp_print_stats - display TCP socket-specific stats
2445 * @xprt: rpc_xprt struct containing statistics
2449 static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2451 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2454 if (xprt_connected(xprt))
2455 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2457 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2458 "%llu %llu %lu %llu %llu\n",
2460 xprt->stat.bind_count,
2461 xprt->stat.connect_count,
2462 xprt->stat.connect_time / HZ,
2466 xprt->stat.bad_xids,
2469 xprt->stat.max_slots,
2470 xprt->stat.sending_u,
2471 xprt->stat.pending_u);
2475 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2476 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2477 * to use the server side send routines.
2479 static int bc_malloc(struct rpc_task *task)
2481 struct rpc_rqst *rqst = task->tk_rqstp;
2482 size_t size = rqst->rq_callsize;
2484 struct rpc_buffer *buf;
2486 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
2487 WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
2492 page = alloc_page(GFP_KERNEL);
2496 buf = page_address(page);
2497 buf->len = PAGE_SIZE;
2499 rqst->rq_buffer = buf->data;
2500 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2505 * Free the space allocated in the bc_alloc routine
2507 static void bc_free(struct rpc_task *task)
2509 void *buffer = task->tk_rqstp->rq_buffer;
2510 struct rpc_buffer *buf;
2512 buf = container_of(buffer, struct rpc_buffer, data);
2513 free_page((unsigned long)buf);
2516 static int bc_sendto(struct rpc_rqst *req)
2518 struct xdr_buf *xdr = &req->rq_snd_buf;
2519 struct sock_xprt *transport =
2520 container_of(req->rq_xprt, struct sock_xprt, xprt);
2521 struct msghdr msg = {
2524 rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
2526 unsigned int sent = 0;
2529 req->rq_xtime = ktime_get();
2530 err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
2532 if (err < 0 || sent != (xdr->len + sizeof(marker)))
2538 * bc_send_request - Send a backchannel Call on a TCP socket
2539 * @req: rpc_rqst containing Call message to be sent
2541 * xpt_mutex ensures @rqstp's whole message is written to the socket
2542 * without interruption.
2545 * %0 if the message was sent successfully
2546 * %ENOTCONN if the message was not sent
2548 static int bc_send_request(struct rpc_rqst *req)
2550 struct svc_xprt *xprt;
2554 * Get the server socket associated with this callback xprt
2556 xprt = req->rq_xprt->bc_xprt;
2559 * Grab the mutex to serialize data as the connection is shared
2560 * with the fore channel
2562 mutex_lock(&xprt->xpt_mutex);
2563 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2566 len = bc_sendto(req);
2567 mutex_unlock(&xprt->xpt_mutex);
2576 * The close routine. Since this is client initiated, we do nothing
2579 static void bc_close(struct rpc_xprt *xprt)
2581 xprt_disconnect_done(xprt);
2585 * The xprt destroy routine. Again, because this connection is client
2586 * initiated, we do nothing
2589 static void bc_destroy(struct rpc_xprt *xprt)
2591 dprintk("RPC: bc_destroy xprt %p\n", xprt);
2594 module_put(THIS_MODULE);
2597 static const struct rpc_xprt_ops xs_local_ops = {
2598 .reserve_xprt = xprt_reserve_xprt,
2599 .release_xprt = xprt_release_xprt,
2600 .alloc_slot = xprt_alloc_slot,
2601 .free_slot = xprt_free_slot,
2602 .rpcbind = xs_local_rpcbind,
2603 .set_port = xs_local_set_port,
2604 .connect = xs_local_connect,
2605 .buf_alloc = rpc_malloc,
2606 .buf_free = rpc_free,
2607 .prepare_request = xs_stream_prepare_request,
2608 .send_request = xs_local_send_request,
2609 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2611 .destroy = xs_destroy,
2612 .print_stats = xs_local_print_stats,
2613 .enable_swap = xs_enable_swap,
2614 .disable_swap = xs_disable_swap,
2617 static const struct rpc_xprt_ops xs_udp_ops = {
2618 .set_buffer_size = xs_udp_set_buffer_size,
2619 .reserve_xprt = xprt_reserve_xprt_cong,
2620 .release_xprt = xprt_release_xprt_cong,
2621 .alloc_slot = xprt_alloc_slot,
2622 .free_slot = xprt_free_slot,
2623 .rpcbind = rpcb_getport_async,
2624 .set_port = xs_set_port,
2625 .connect = xs_connect,
2626 .buf_alloc = rpc_malloc,
2627 .buf_free = rpc_free,
2628 .send_request = xs_udp_send_request,
2629 .wait_for_reply_request = xprt_wait_for_reply_request_rtt,
2630 .timer = xs_udp_timer,
2631 .release_request = xprt_release_rqst_cong,
2633 .destroy = xs_destroy,
2634 .print_stats = xs_udp_print_stats,
2635 .enable_swap = xs_enable_swap,
2636 .disable_swap = xs_disable_swap,
2637 .inject_disconnect = xs_inject_disconnect,
2640 static const struct rpc_xprt_ops xs_tcp_ops = {
2641 .reserve_xprt = xprt_reserve_xprt,
2642 .release_xprt = xprt_release_xprt,
2643 .alloc_slot = xprt_alloc_slot,
2644 .free_slot = xprt_free_slot,
2645 .rpcbind = rpcb_getport_async,
2646 .set_port = xs_set_port,
2647 .connect = xs_connect,
2648 .buf_alloc = rpc_malloc,
2649 .buf_free = rpc_free,
2650 .prepare_request = xs_stream_prepare_request,
2651 .send_request = xs_tcp_send_request,
2652 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2653 .close = xs_tcp_shutdown,
2654 .destroy = xs_destroy,
2655 .set_connect_timeout = xs_tcp_set_connect_timeout,
2656 .print_stats = xs_tcp_print_stats,
2657 .enable_swap = xs_enable_swap,
2658 .disable_swap = xs_disable_swap,
2659 .inject_disconnect = xs_inject_disconnect,
2660 #ifdef CONFIG_SUNRPC_BACKCHANNEL
2661 .bc_setup = xprt_setup_bc,
2662 .bc_maxpayload = xs_tcp_bc_maxpayload,
2663 .bc_num_slots = xprt_bc_max_slots,
2664 .bc_free_rqst = xprt_free_bc_rqst,
2665 .bc_destroy = xprt_destroy_bc,
2670 * The rpc_xprt_ops for the server backchannel
2673 static const struct rpc_xprt_ops bc_tcp_ops = {
2674 .reserve_xprt = xprt_reserve_xprt,
2675 .release_xprt = xprt_release_xprt,
2676 .alloc_slot = xprt_alloc_slot,
2677 .free_slot = xprt_free_slot,
2678 .buf_alloc = bc_malloc,
2679 .buf_free = bc_free,
2680 .send_request = bc_send_request,
2681 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2683 .destroy = bc_destroy,
2684 .print_stats = xs_tcp_print_stats,
2685 .enable_swap = xs_enable_swap,
2686 .disable_swap = xs_disable_swap,
2687 .inject_disconnect = xs_inject_disconnect,
2690 static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2692 static const struct sockaddr_in sin = {
2693 .sin_family = AF_INET,
2694 .sin_addr.s_addr = htonl(INADDR_ANY),
2696 static const struct sockaddr_in6 sin6 = {
2697 .sin6_family = AF_INET6,
2698 .sin6_addr = IN6ADDR_ANY_INIT,
2705 memcpy(sap, &sin, sizeof(sin));
2708 memcpy(sap, &sin6, sizeof(sin6));
2711 dprintk("RPC: %s: Bad address family\n", __func__);
2712 return -EAFNOSUPPORT;
2717 static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2718 unsigned int slot_table_size,
2719 unsigned int max_slot_table_size)
2721 struct rpc_xprt *xprt;
2722 struct sock_xprt *new;
2724 if (args->addrlen > sizeof(xprt->addr)) {
2725 dprintk("RPC: xs_setup_xprt: address too large\n");
2726 return ERR_PTR(-EBADF);
2729 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2730 max_slot_table_size);
2732 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2734 return ERR_PTR(-ENOMEM);
2737 new = container_of(xprt, struct sock_xprt, xprt);
2738 mutex_init(&new->recv_mutex);
2739 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2740 xprt->addrlen = args->addrlen;
2742 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2745 err = xs_init_anyaddr(args->dstaddr->sa_family,
2746 (struct sockaddr *)&new->srcaddr);
2749 return ERR_PTR(err);
2756 static const struct rpc_timeout xs_local_default_timeout = {
2757 .to_initval = 10 * HZ,
2758 .to_maxval = 10 * HZ,
2763 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2764 * @args: rpc transport creation arguments
2766 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2768 static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2770 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2771 struct sock_xprt *transport;
2772 struct rpc_xprt *xprt;
2773 struct rpc_xprt *ret;
2775 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2776 xprt_max_tcp_slot_table_entries);
2779 transport = container_of(xprt, struct sock_xprt, xprt);
2782 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2784 xprt->bind_timeout = XS_BIND_TO;
2785 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2786 xprt->idle_timeout = XS_IDLE_DISC_TO;
2788 xprt->ops = &xs_local_ops;
2789 xprt->timeout = &xs_local_default_timeout;
2791 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2792 INIT_WORK(&transport->error_worker, xs_error_handle);
2793 INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket);
2795 switch (sun->sun_family) {
2797 if (sun->sun_path[0] != '/') {
2798 dprintk("RPC: bad AF_LOCAL address: %s\n",
2800 ret = ERR_PTR(-EINVAL);
2803 xprt_set_bound(xprt);
2804 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2805 ret = ERR_PTR(xs_local_setup_socket(transport));
2810 ret = ERR_PTR(-EAFNOSUPPORT);
2814 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2815 xprt->address_strings[RPC_DISPLAY_ADDR]);
2817 if (try_module_get(THIS_MODULE))
2819 ret = ERR_PTR(-EINVAL);
2825 static const struct rpc_timeout xs_udp_default_timeout = {
2826 .to_initval = 5 * HZ,
2827 .to_maxval = 30 * HZ,
2828 .to_increment = 5 * HZ,
2833 * xs_setup_udp - Set up transport to use a UDP socket
2834 * @args: rpc transport creation arguments
2837 static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2839 struct sockaddr *addr = args->dstaddr;
2840 struct rpc_xprt *xprt;
2841 struct sock_xprt *transport;
2842 struct rpc_xprt *ret;
2844 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2845 xprt_udp_slot_table_entries);
2848 transport = container_of(xprt, struct sock_xprt, xprt);
2850 xprt->prot = IPPROTO_UDP;
2851 /* XXX: header size can vary due to auth type, IPv6, etc. */
2852 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2854 xprt->bind_timeout = XS_BIND_TO;
2855 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2856 xprt->idle_timeout = XS_IDLE_DISC_TO;
2858 xprt->ops = &xs_udp_ops;
2860 xprt->timeout = &xs_udp_default_timeout;
2862 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
2863 INIT_WORK(&transport->error_worker, xs_error_handle);
2864 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
2866 switch (addr->sa_family) {
2868 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2869 xprt_set_bound(xprt);
2871 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2874 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2875 xprt_set_bound(xprt);
2877 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2880 ret = ERR_PTR(-EAFNOSUPPORT);
2884 if (xprt_bound(xprt))
2885 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2886 xprt->address_strings[RPC_DISPLAY_ADDR],
2887 xprt->address_strings[RPC_DISPLAY_PORT],
2888 xprt->address_strings[RPC_DISPLAY_PROTO]);
2890 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2891 xprt->address_strings[RPC_DISPLAY_ADDR],
2892 xprt->address_strings[RPC_DISPLAY_PROTO]);
2894 if (try_module_get(THIS_MODULE))
2896 ret = ERR_PTR(-EINVAL);
2902 static const struct rpc_timeout xs_tcp_default_timeout = {
2903 .to_initval = 60 * HZ,
2904 .to_maxval = 60 * HZ,
2909 * xs_setup_tcp - Set up transport to use a TCP socket
2910 * @args: rpc transport creation arguments
2913 static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2915 struct sockaddr *addr = args->dstaddr;
2916 struct rpc_xprt *xprt;
2917 struct sock_xprt *transport;
2918 struct rpc_xprt *ret;
2919 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2921 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2922 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2924 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2925 max_slot_table_size);
2928 transport = container_of(xprt, struct sock_xprt, xprt);
2930 xprt->prot = IPPROTO_TCP;
2931 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2933 xprt->bind_timeout = XS_BIND_TO;
2934 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2935 xprt->idle_timeout = XS_IDLE_DISC_TO;
2937 xprt->ops = &xs_tcp_ops;
2938 xprt->timeout = &xs_tcp_default_timeout;
2940 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
2941 xprt->connect_timeout = xprt->timeout->to_initval *
2942 (xprt->timeout->to_retries + 1);
2944 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2945 INIT_WORK(&transport->error_worker, xs_error_handle);
2946 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
2948 switch (addr->sa_family) {
2950 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2951 xprt_set_bound(xprt);
2953 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2956 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2957 xprt_set_bound(xprt);
2959 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2962 ret = ERR_PTR(-EAFNOSUPPORT);
2966 if (xprt_bound(xprt))
2967 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2968 xprt->address_strings[RPC_DISPLAY_ADDR],
2969 xprt->address_strings[RPC_DISPLAY_PORT],
2970 xprt->address_strings[RPC_DISPLAY_PROTO]);
2972 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2973 xprt->address_strings[RPC_DISPLAY_ADDR],
2974 xprt->address_strings[RPC_DISPLAY_PROTO]);
2976 if (try_module_get(THIS_MODULE))
2978 ret = ERR_PTR(-EINVAL);
2985 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2986 * @args: rpc transport creation arguments
2989 static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2991 struct sockaddr *addr = args->dstaddr;
2992 struct rpc_xprt *xprt;
2993 struct sock_xprt *transport;
2994 struct svc_sock *bc_sock;
2995 struct rpc_xprt *ret;
2997 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2998 xprt_tcp_slot_table_entries);
3001 transport = container_of(xprt, struct sock_xprt, xprt);
3003 xprt->prot = IPPROTO_TCP;
3004 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3005 xprt->timeout = &xs_tcp_default_timeout;
3008 xprt_set_bound(xprt);
3009 xprt->bind_timeout = 0;
3010 xprt->reestablish_timeout = 0;
3011 xprt->idle_timeout = 0;
3013 xprt->ops = &bc_tcp_ops;
3015 switch (addr->sa_family) {
3017 xs_format_peer_addresses(xprt, "tcp",
3021 xs_format_peer_addresses(xprt, "tcp",
3022 RPCBIND_NETID_TCP6);
3025 ret = ERR_PTR(-EAFNOSUPPORT);
3029 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3030 xprt->address_strings[RPC_DISPLAY_ADDR],
3031 xprt->address_strings[RPC_DISPLAY_PORT],
3032 xprt->address_strings[RPC_DISPLAY_PROTO]);
3035 * Once we've associated a backchannel xprt with a connection,
3036 * we want to keep it around as long as the connection lasts,
3037 * in case we need to start using it for a backchannel again;
3038 * this reference won't be dropped until bc_xprt is destroyed.
3041 args->bc_xprt->xpt_bc_xprt = xprt;
3042 xprt->bc_xprt = args->bc_xprt;
3043 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3044 transport->sock = bc_sock->sk_sock;
3045 transport->inet = bc_sock->sk_sk;
3048 * Since we don't want connections for the backchannel, we set
3049 * the xprt status to connected
3051 xprt_set_connected(xprt);
3053 if (try_module_get(THIS_MODULE))
3056 args->bc_xprt->xpt_bc_xprt = NULL;
3057 args->bc_xprt->xpt_bc_xps = NULL;
3059 ret = ERR_PTR(-EINVAL);
3065 static struct xprt_class xs_local_transport = {
3066 .list = LIST_HEAD_INIT(xs_local_transport.list),
3067 .name = "named UNIX socket",
3068 .owner = THIS_MODULE,
3069 .ident = XPRT_TRANSPORT_LOCAL,
3070 .setup = xs_setup_local,
3074 static struct xprt_class xs_udp_transport = {
3075 .list = LIST_HEAD_INIT(xs_udp_transport.list),
3077 .owner = THIS_MODULE,
3078 .ident = XPRT_TRANSPORT_UDP,
3079 .setup = xs_setup_udp,
3080 .netid = { "udp", "udp6", "" },
3083 static struct xprt_class xs_tcp_transport = {
3084 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
3086 .owner = THIS_MODULE,
3087 .ident = XPRT_TRANSPORT_TCP,
3088 .setup = xs_setup_tcp,
3089 .netid = { "tcp", "tcp6", "" },
3092 static struct xprt_class xs_bc_tcp_transport = {
3093 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3094 .name = "tcp NFSv4.1 backchannel",
3095 .owner = THIS_MODULE,
3096 .ident = XPRT_TRANSPORT_BC_TCP,
3097 .setup = xs_setup_bc_tcp,
3102 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3105 int init_socket_xprt(void)
3107 if (!sunrpc_table_header)
3108 sunrpc_table_header = register_sysctl_table(sunrpc_table);
3110 xprt_register_transport(&xs_local_transport);
3111 xprt_register_transport(&xs_udp_transport);
3112 xprt_register_transport(&xs_tcp_transport);
3113 xprt_register_transport(&xs_bc_tcp_transport);
3119 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3122 void cleanup_socket_xprt(void)
3124 if (sunrpc_table_header) {
3125 unregister_sysctl_table(sunrpc_table_header);
3126 sunrpc_table_header = NULL;
3129 xprt_unregister_transport(&xs_local_transport);
3130 xprt_unregister_transport(&xs_udp_transport);
3131 xprt_unregister_transport(&xs_tcp_transport);
3132 xprt_unregister_transport(&xs_bc_tcp_transport);
3135 static int param_set_uint_minmax(const char *val,
3136 const struct kernel_param *kp,
3137 unsigned int min, unsigned int max)
3144 ret = kstrtouint(val, 0, &num);
3147 if (num < min || num > max)
3149 *((unsigned int *)kp->arg) = num;
3153 static int param_set_portnr(const char *val, const struct kernel_param *kp)
3155 return param_set_uint_minmax(val, kp,
3160 static const struct kernel_param_ops param_ops_portnr = {
3161 .set = param_set_portnr,
3162 .get = param_get_uint,
3165 #define param_check_portnr(name, p) \
3166 __param_check(name, p, unsigned int);
3168 module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3169 module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3171 static int param_set_slot_table_size(const char *val,
3172 const struct kernel_param *kp)
3174 return param_set_uint_minmax(val, kp,
3176 RPC_MAX_SLOT_TABLE);
3179 static const struct kernel_param_ops param_ops_slot_table_size = {
3180 .set = param_set_slot_table_size,
3181 .get = param_get_uint,
3184 #define param_check_slot_table_size(name, p) \
3185 __param_check(name, p, unsigned int);
3187 static int param_set_max_slot_table_size(const char *val,
3188 const struct kernel_param *kp)
3190 return param_set_uint_minmax(val, kp,
3192 RPC_MAX_SLOT_TABLE_LIMIT);
3195 static const struct kernel_param_ops param_ops_max_slot_table_size = {
3196 .set = param_set_max_slot_table_size,
3197 .get = param_get_uint,
3200 #define param_check_max_slot_table_size(name, p) \
3201 __param_check(name, p, unsigned int);
3203 module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3204 slot_table_size, 0644);
3205 module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3206 max_slot_table_size, 0644);
3207 module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3208 slot_table_size, 0644);