4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
40 ksocknal_lib_get_conn_addrs (ksock_conn_t *conn)
42 int rc = libcfs_sock_getaddr(conn->ksnc_sock, 1,
46 /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
47 LASSERT (!conn->ksnc_closing);
50 CERROR ("Error %d getting sock peer IP\n", rc);
54 rc = libcfs_sock_getaddr(conn->ksnc_sock, 0,
55 &conn->ksnc_myipaddr, NULL);
57 CERROR ("Error %d getting sock local IP\n", rc);
65 ksocknal_lib_zc_capable(ksock_conn_t *conn)
67 int caps = conn->ksnc_sock->sk->sk_route_caps;
69 if (conn->ksnc_proto == &ksocknal_protocol_v1x)
72 /* ZC if the socket supports scatter/gather and doesn't need software
74 return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_ALL_CSUM) != 0);
78 ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
80 struct socket *sock = conn->ksnc_sock;
84 if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
85 conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
86 tx->tx_nob == tx->tx_resid && /* frist sending */
87 tx->tx_msg.ksm_csum == 0) /* not checksummed */
88 ksocknal_lib_csum_tx(tx);
90 /* NB we can't trust socket ops to either consume our iovs
91 * or leave them alone. */
94 #if SOCKNAL_SINGLE_FRAG_TX
96 struct iovec *scratchiov = &scratch;
97 unsigned int niov = 1;
99 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
100 unsigned int niov = tx->tx_niov;
102 struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
105 for (nob = i = 0; i < niov; i++) {
106 scratchiov[i] = tx->tx_iov[i];
107 nob += scratchiov[i].iov_len;
110 if (!list_empty(&conn->ksnc_tx_queue) ||
112 msg.msg_flags |= MSG_MORE;
114 rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
120 ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
122 struct socket *sock = conn->ksnc_sock;
123 lnet_kiov_t *kiov = tx->tx_kiov;
127 /* Not NOOP message */
128 LASSERT (tx->tx_lnetmsg != NULL);
130 /* NB we can't trust socket ops to either consume our iovs
131 * or leave them alone. */
132 if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
133 /* Zero copy is enabled */
134 struct sock *sk = sock->sk;
135 struct page *page = kiov->kiov_page;
136 int offset = kiov->kiov_offset;
137 int fragsize = kiov->kiov_len;
138 int msgflg = MSG_DONTWAIT;
140 CDEBUG(D_NET, "page %p + offset %x for %d\n",
141 page, offset, kiov->kiov_len);
143 if (!list_empty(&conn->ksnc_tx_queue) ||
144 fragsize < tx->tx_resid)
147 if (sk->sk_prot->sendpage != NULL) {
148 rc = sk->sk_prot->sendpage(sk, page,
149 offset, fragsize, msgflg);
151 rc = cfs_tcp_sendpage(sk, page, offset, fragsize,
155 #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK
156 struct iovec scratch;
157 struct iovec *scratchiov = &scratch;
158 unsigned int niov = 1;
160 #ifdef CONFIG_HIGHMEM
161 #warning "XXX risk of kmap deadlock on multiple frags..."
163 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
164 unsigned int niov = tx->tx_nkiov;
166 struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
169 for (nob = i = 0; i < niov; i++) {
170 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
172 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
175 if (!list_empty(&conn->ksnc_tx_queue) ||
177 msg.msg_flags |= MSG_MORE;
179 rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
181 for (i = 0; i < niov; i++)
182 kunmap(kiov[i].kiov_page);
188 ksocknal_lib_eager_ack (ksock_conn_t *conn)
191 mm_segment_t oldmm = get_fs();
192 struct socket *sock = conn->ksnc_sock;
194 /* Remind the socket to ACK eagerly. If I don't, the socket might
195 * think I'm about to send something it could piggy-back the ACK
196 * on, introducing delay in completing zero-copy sends in my
200 sock->ops->setsockopt (sock, SOL_TCP, TCP_QUICKACK,
201 (char *)&opt, sizeof (opt));
206 ksocknal_lib_recv_iov (ksock_conn_t *conn)
208 #if SOCKNAL_SINGLE_FRAG_RX
209 struct iovec scratch;
210 struct iovec *scratchiov = &scratch;
211 unsigned int niov = 1;
213 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
214 unsigned int niov = conn->ksnc_rx_niov;
216 struct iovec *iov = conn->ksnc_rx_iov;
217 struct msghdr msg = {
220 .msg_iov = scratchiov,
226 mm_segment_t oldmm = get_fs();
234 /* NB we can't trust socket ops to either consume our iovs
235 * or leave them alone. */
238 for (nob = i = 0; i < niov; i++) {
239 scratchiov[i] = iov[i];
240 nob += scratchiov[i].iov_len;
242 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
245 rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
246 /* NB this is just a boolean..........................^ */
250 if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
251 saved_csum = conn->ksnc_msg.ksm_csum;
252 conn->ksnc_msg.ksm_csum = 0;
255 if (saved_csum != 0) {
256 /* accumulate checksum */
257 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
260 fragnob = iov[i].iov_len;
264 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
265 iov[i].iov_base, fragnob);
267 conn->ksnc_msg.ksm_csum = saved_csum;
274 ksocknal_lib_kiov_vunmap(void *addr)
283 ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
284 struct iovec *iov, struct page **pages)
290 if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
293 LASSERT (niov <= LNET_MAX_IOV);
296 niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
299 for (nob = i = 0; i < niov; i++) {
300 if ((kiov[i].kiov_offset != 0 && i > 0) ||
301 (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
304 pages[i] = kiov[i].kiov_page;
305 nob += kiov[i].kiov_len;
308 addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
312 iov->iov_base = addr + kiov[0].kiov_offset;
319 ksocknal_lib_recv_kiov (ksock_conn_t *conn)
321 #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
322 struct iovec scratch;
323 struct iovec *scratchiov = &scratch;
324 struct page **pages = NULL;
325 unsigned int niov = 1;
327 #ifdef CONFIG_HIGHMEM
328 #warning "XXX risk of kmap deadlock on multiple frags..."
330 struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
331 struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs;
332 unsigned int niov = conn->ksnc_rx_nkiov;
334 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
335 struct msghdr msg = {
338 .msg_iov = scratchiov,
343 mm_segment_t oldmm = get_fs();
352 /* NB we can't trust socket ops to either consume our iovs
353 * or leave them alone. */
354 if ((addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages)) != NULL) {
355 nob = scratchiov[0].iov_len;
359 for (nob = i = 0; i < niov; i++) {
360 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
361 scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
364 msg.msg_iovlen = niov;
367 LASSERT (nob <= conn->ksnc_rx_nob_wanted);
370 rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
371 /* NB this is just a boolean.......................^ */
374 if (conn->ksnc_msg.ksm_csum != 0) {
375 for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
378 /* Dang! have to kmap again because I have nowhere to stash the
379 * mapped address. But by doing it while the page is still
380 * mapped, the kernel just bumps the map count and returns me
381 * the address it stashed. */
382 base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
383 fragnob = kiov[i].kiov_len;
387 conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
390 kunmap(kiov[i].kiov_page);
395 ksocknal_lib_kiov_vunmap(addr);
397 for (i = 0; i < niov; i++)
398 kunmap(kiov[i].kiov_page);
405 ksocknal_lib_csum_tx(ksock_tx_t *tx)
411 LASSERT(tx->tx_iov[0].iov_base == (void *)&tx->tx_msg);
412 LASSERT(tx->tx_conn != NULL);
413 LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
415 tx->tx_msg.ksm_csum = 0;
417 csum = ksocknal_csum(~0, (void *)tx->tx_iov[0].iov_base,
418 tx->tx_iov[0].iov_len);
420 if (tx->tx_kiov != NULL) {
421 for (i = 0; i < tx->tx_nkiov; i++) {
422 base = kmap(tx->tx_kiov[i].kiov_page) +
423 tx->tx_kiov[i].kiov_offset;
425 csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
427 kunmap(tx->tx_kiov[i].kiov_page);
430 for (i = 1; i < tx->tx_niov; i++)
431 csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base,
432 tx->tx_iov[i].iov_len);
435 if (*ksocknal_tunables.ksnd_inject_csum_error) {
437 *ksocknal_tunables.ksnd_inject_csum_error = 0;
440 tx->tx_msg.ksm_csum = csum;
444 ksocknal_lib_get_conn_tunables (ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
446 mm_segment_t oldmm = get_fs ();
447 struct socket *sock = conn->ksnc_sock;
451 rc = ksocknal_connsock_addref(conn);
453 LASSERT (conn->ksnc_closing);
454 *txmem = *rxmem = *nagle = 0;
458 rc = libcfs_sock_getbuf(sock, txmem, rxmem);
460 len = sizeof(*nagle);
462 rc = sock->ops->getsockopt(sock, SOL_TCP, TCP_NODELAY,
463 (char *)nagle, &len);
467 ksocknal_connsock_decref(conn);
472 *txmem = *rxmem = *nagle = 0;
478 ksocknal_lib_setup_sock (struct socket *sock)
480 mm_segment_t oldmm = get_fs ();
487 struct linger linger;
489 sock->sk->sk_allocation = GFP_NOFS;
491 /* Ensure this socket aborts active sends immediately when we close
498 rc = sock_setsockopt (sock, SOL_SOCKET, SO_LINGER,
499 (char *)&linger, sizeof (linger));
502 CERROR ("Can't set SO_LINGER: %d\n", rc);
508 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_LINGER2,
509 (char *)&option, sizeof (option));
512 CERROR ("Can't set SO_LINGER2: %d\n", rc);
516 if (!*ksocknal_tunables.ksnd_nagle) {
520 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_NODELAY,
521 (char *)&option, sizeof (option));
524 CERROR ("Can't disable nagle: %d\n", rc);
529 rc = libcfs_sock_setbuf(sock,
530 *ksocknal_tunables.ksnd_tx_buffer_size,
531 *ksocknal_tunables.ksnd_rx_buffer_size);
533 CERROR ("Can't set buffer tx %d, rx %d buffers: %d\n",
534 *ksocknal_tunables.ksnd_tx_buffer_size,
535 *ksocknal_tunables.ksnd_rx_buffer_size, rc);
539 /* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */
541 /* snapshot tunables */
542 keep_idle = *ksocknal_tunables.ksnd_keepalive_idle;
543 keep_count = *ksocknal_tunables.ksnd_keepalive_count;
544 keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl;
546 do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
548 option = (do_keepalive ? 1 : 0);
550 rc = sock_setsockopt (sock, SOL_SOCKET, SO_KEEPALIVE,
551 (char *)&option, sizeof (option));
554 CERROR ("Can't set SO_KEEPALIVE: %d\n", rc);
562 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPIDLE,
563 (char *)&keep_idle, sizeof (keep_idle));
566 CERROR ("Can't set TCP_KEEPIDLE: %d\n", rc);
571 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPINTVL,
572 (char *)&keep_intvl, sizeof (keep_intvl));
575 CERROR ("Can't set TCP_KEEPINTVL: %d\n", rc);
580 rc = sock->ops->setsockopt (sock, SOL_TCP, TCP_KEEPCNT,
581 (char *)&keep_count, sizeof (keep_count));
584 CERROR ("Can't set TCP_KEEPCNT: %d\n", rc);
592 ksocknal_lib_push_conn (ksock_conn_t *conn)
601 rc = ksocknal_connsock_addref(conn);
602 if (rc != 0) /* being shut down */
605 sk = conn->ksnc_sock->sk;
609 nonagle = tp->nonagle;
616 rc = sk->sk_prot->setsockopt (sk, SOL_TCP, TCP_NODELAY,
617 (char *)&val, sizeof (val));
623 tp->nonagle = nonagle;
626 ksocknal_connsock_decref(conn);
629 extern void ksocknal_read_callback (ksock_conn_t *conn);
630 extern void ksocknal_write_callback (ksock_conn_t *conn);
632 * socket call back in Linux
635 ksocknal_data_ready (struct sock *sk, int n)
639 /* interleave correctly with closing sockets... */
641 read_lock(&ksocknal_data.ksnd_global_lock);
643 conn = sk->sk_user_data;
644 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
645 LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
646 sk->sk_data_ready (sk, n);
648 ksocknal_read_callback(conn);
650 read_unlock(&ksocknal_data.ksnd_global_lock);
654 ksocknal_write_space (struct sock *sk)
660 /* interleave correctly with closing sockets... */
662 read_lock(&ksocknal_data.ksnd_global_lock);
664 conn = sk->sk_user_data;
665 wspace = SOCKNAL_WSPACE(sk);
666 min_wpace = SOCKNAL_MIN_WSPACE(sk);
668 CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
669 sk, wspace, min_wpace, conn,
670 (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
671 " ready" : " blocked"),
672 (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
673 " scheduled" : " idle"),
674 (conn == NULL) ? "" : (list_empty (&conn->ksnc_tx_queue) ?
675 " empty" : " queued"));
677 if (conn == NULL) { /* raced with ksocknal_terminate_conn */
678 LASSERT (sk->sk_write_space != &ksocknal_write_space);
679 sk->sk_write_space (sk);
681 read_unlock(&ksocknal_data.ksnd_global_lock);
685 if (wspace >= min_wpace) { /* got enough space */
686 ksocknal_write_callback(conn);
688 /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
689 * ENOMEM check in ksocknal_transmit is race-free (think about
692 clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
695 read_unlock(&ksocknal_data.ksnd_global_lock);
699 ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
701 conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
702 conn->ksnc_saved_write_space = sock->sk->sk_write_space;
706 ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
708 sock->sk->sk_user_data = conn;
709 sock->sk->sk_data_ready = ksocknal_data_ready;
710 sock->sk->sk_write_space = ksocknal_write_space;
715 ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
717 /* Remove conn's network callbacks.
718 * NB I _have_ to restore the callback, rather than storing a noop,
719 * since the socket could survive past this module being unloaded!! */
720 sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
721 sock->sk->sk_write_space = conn->ksnc_saved_write_space;
723 /* A callback could be in progress already; they hold a read lock
724 * on ksnd_global_lock (to serialise with me) and NOOP if
725 * sk_user_data is NULL. */
726 sock->sk->sk_user_data = NULL;
732 ksocknal_lib_memory_pressure(ksock_conn_t *conn)
735 ksock_sched_t *sched;
737 sched = conn->ksnc_scheduler;
738 spin_lock_bh(&sched->kss_lock);
740 if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
741 !conn->ksnc_tx_ready) {
742 /* SOCK_NOSPACE is set when the socket fills
743 * and cleared in the write_space callback
744 * (which also sets ksnc_tx_ready). If
745 * SOCK_NOSPACE and ksnc_tx_ready are BOTH
746 * zero, I didn't fill the socket and
747 * write_space won't reschedule me, so I
748 * return -ENOMEM to get my caller to retry
753 spin_unlock_bh(&sched->kss_lock);