1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
28 #include <linux/rcupdate_wait.h>
33 #include <asm/ioctls.h>
35 #include <net/net_namespace.h>
36 #include <net/netns/generic.h>
37 #include "smc_netns.h"
49 #include "smc_close.h"
51 static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
54 static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
58 static void smc_tcp_listen_work(struct work_struct *);
59 static void smc_connect_work(struct work_struct *);
61 static void smc_set_keepalive(struct sock *sk, int val)
63 struct smc_sock *smc = smc_sk(sk);
65 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
68 static struct smc_hashinfo smc_v4_hashinfo = {
69 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
72 static struct smc_hashinfo smc_v6_hashinfo = {
73 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
76 int smc_hash_sk(struct sock *sk)
78 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
79 struct hlist_head *head;
83 write_lock_bh(&h->lock);
84 sk_add_node(sk, head);
85 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
86 write_unlock_bh(&h->lock);
90 EXPORT_SYMBOL_GPL(smc_hash_sk);
92 void smc_unhash_sk(struct sock *sk)
94 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
96 write_lock_bh(&h->lock);
97 if (sk_del_node_init(sk))
98 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
99 write_unlock_bh(&h->lock);
101 EXPORT_SYMBOL_GPL(smc_unhash_sk);
103 struct proto smc_proto = {
105 .owner = THIS_MODULE,
106 .keepalive = smc_set_keepalive,
108 .unhash = smc_unhash_sk,
109 .obj_size = sizeof(struct smc_sock),
110 .h.smc_hash = &smc_v4_hashinfo,
111 .slab_flags = SLAB_TYPESAFE_BY_RCU,
113 EXPORT_SYMBOL_GPL(smc_proto);
115 struct proto smc_proto6 = {
117 .owner = THIS_MODULE,
118 .keepalive = smc_set_keepalive,
120 .unhash = smc_unhash_sk,
121 .obj_size = sizeof(struct smc_sock),
122 .h.smc_hash = &smc_v6_hashinfo,
123 .slab_flags = SLAB_TYPESAFE_BY_RCU,
125 EXPORT_SYMBOL_GPL(smc_proto6);
127 static void smc_restore_fallback_changes(struct smc_sock *smc)
129 smc->clcsock->file->private_data = smc->sk.sk_socket;
130 smc->clcsock->file = NULL;
133 static int __smc_release(struct smc_sock *smc)
135 struct sock *sk = &smc->sk;
138 if (!smc->use_fallback) {
139 rc = smc_close_active(smc);
140 sock_set_flag(sk, SOCK_DEAD);
141 sk->sk_shutdown |= SHUTDOWN_MASK;
143 if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
144 sock_put(sk); /* passive closing */
145 if (sk->sk_state == SMC_LISTEN) {
146 /* wake up clcsock accept */
147 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
149 sk->sk_state = SMC_CLOSED;
150 sk->sk_state_change(sk);
151 smc_restore_fallback_changes(smc);
154 sk->sk_prot->unhash(sk);
156 if (sk->sk_state == SMC_CLOSED) {
159 smc_clcsock_release(smc);
162 if (!smc->use_fallback)
163 smc_conn_free(&smc->conn);
169 static int smc_release(struct socket *sock)
171 struct sock *sk = sock->sk;
172 struct smc_sock *smc;
178 sock_hold(sk); /* sock_put below */
181 /* cleanup for a dangling non-blocking connect */
182 if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
183 tcp_abort(smc->clcsock->sk, ECONNABORTED);
184 flush_work(&smc->connect_work);
186 if (sk->sk_state == SMC_LISTEN)
187 /* smc_close_non_accepted() is called and acquires
188 * sock lock for child sockets again
190 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
194 rc = __smc_release(smc);
201 sock_put(sk); /* sock_hold above */
202 sock_put(sk); /* final sock_put */
207 static void smc_destruct(struct sock *sk)
209 if (sk->sk_state != SMC_CLOSED)
211 if (!sock_flag(sk, SOCK_DEAD))
214 sk_refcnt_debug_dec(sk);
217 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
220 struct smc_sock *smc;
224 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
225 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
229 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
230 sk->sk_state = SMC_INIT;
231 sk->sk_destruct = smc_destruct;
232 sk->sk_protocol = protocol;
234 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
235 INIT_WORK(&smc->connect_work, smc_connect_work);
236 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
237 INIT_LIST_HEAD(&smc->accept_q);
238 spin_lock_init(&smc->accept_q_lock);
239 spin_lock_init(&smc->conn.send_lock);
240 sk->sk_prot->hash(sk);
241 sk_refcnt_debug_inc(sk);
242 mutex_init(&smc->clcsock_release_lock);
247 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
250 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
251 struct sock *sk = sock->sk;
252 struct smc_sock *smc;
257 /* replicate tests from inet_bind(), to be safe wrt. future changes */
259 if (addr_len < sizeof(struct sockaddr_in))
263 if (addr->sin_family != AF_INET &&
264 addr->sin_family != AF_INET6 &&
265 addr->sin_family != AF_UNSPEC)
267 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
268 if (addr->sin_family == AF_UNSPEC &&
269 addr->sin_addr.s_addr != htonl(INADDR_ANY))
274 /* Check if socket is already active */
276 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
279 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
280 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
288 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
291 /* options we don't get control via setsockopt for */
292 nsk->sk_type = osk->sk_type;
293 nsk->sk_sndbuf = osk->sk_sndbuf;
294 nsk->sk_rcvbuf = osk->sk_rcvbuf;
295 nsk->sk_sndtimeo = osk->sk_sndtimeo;
296 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
297 nsk->sk_mark = osk->sk_mark;
298 nsk->sk_priority = osk->sk_priority;
299 nsk->sk_rcvlowat = osk->sk_rcvlowat;
300 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
301 nsk->sk_err = osk->sk_err;
303 nsk->sk_flags &= ~mask;
304 nsk->sk_flags |= osk->sk_flags & mask;
307 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
308 (1UL << SOCK_KEEPOPEN) | \
309 (1UL << SOCK_LINGER) | \
310 (1UL << SOCK_BROADCAST) | \
311 (1UL << SOCK_TIMESTAMP) | \
312 (1UL << SOCK_DBG) | \
313 (1UL << SOCK_RCVTSTAMP) | \
314 (1UL << SOCK_RCVTSTAMPNS) | \
315 (1UL << SOCK_LOCALROUTE) | \
316 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
317 (1UL << SOCK_RXQ_OVFL) | \
318 (1UL << SOCK_WIFI_STATUS) | \
319 (1UL << SOCK_NOFCS) | \
320 (1UL << SOCK_FILTER_LOCKED) | \
321 (1UL << SOCK_TSTAMP_NEW))
322 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
323 * clc socket (since smc is not called for these options from net/core)
325 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
327 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
330 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
331 (1UL << SOCK_KEEPOPEN) | \
332 (1UL << SOCK_LINGER) | \
334 /* copy only settings and flags relevant for smc from clc to smc socket */
335 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
337 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
340 /* register a new rmb, send confirm_rkey msg to register with peer */
341 static int smcr_link_reg_rmb(struct smc_link *link,
342 struct smc_buf_desc *rmb_desc, bool conf_rkey)
344 if (!rmb_desc->is_reg_mr[link->link_idx]) {
345 /* register memory region for new rmb */
346 if (smc_wr_reg_send(link, rmb_desc->mr_rx[link->link_idx])) {
347 rmb_desc->is_reg_err = true;
350 rmb_desc->is_reg_mr[link->link_idx] = true;
355 /* exchange confirm_rkey msg with peer */
356 if (!rmb_desc->is_conf_rkey) {
357 if (smc_llc_do_confirm_rkey(link, rmb_desc)) {
358 rmb_desc->is_reg_err = true;
361 rmb_desc->is_conf_rkey = true;
366 /* register the new rmb on all links */
367 static int smcr_lgr_reg_rmbs(struct smc_link_group *lgr,
368 struct smc_buf_desc *rmb_desc)
372 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
373 if (lgr->lnk[i].state != SMC_LNK_ACTIVE)
375 rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc, true);
382 static int smcr_clnt_conf_first_link(struct smc_sock *smc)
384 struct net *net = sock_net(smc->clcsock->sk);
385 struct smc_link *link = smc->conn.lnk;
389 /* receive CONFIRM LINK request from server over RoCE fabric */
390 rest = wait_for_completion_interruptible_timeout(
392 SMC_LLC_WAIT_FIRST_TIME);
394 struct smc_clc_msg_decline dclc;
396 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
397 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
398 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
401 if (link->llc_confirm_rc)
402 return SMC_CLC_DECL_RMBE_EC;
404 rc = smc_ib_modify_qp_rts(link);
406 return SMC_CLC_DECL_ERR_RDYLNK;
408 smc_wr_remember_qp_attr(link);
410 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc, false))
411 return SMC_CLC_DECL_ERR_REGRMB;
413 /* send CONFIRM LINK response over RoCE fabric */
414 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
416 return SMC_CLC_DECL_TIMEOUT_CL;
418 /* receive ADD LINK request from server over RoCE fabric */
419 rest = wait_for_completion_interruptible_timeout(&link->llc_add,
422 struct smc_clc_msg_decline dclc;
424 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
425 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
426 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
429 /* send add link reject message, only one link supported for now */
430 rc = smc_llc_send_add_link(link,
431 link->smcibdev->mac[link->ibport - 1],
432 link->gid, SMC_LLC_RESP);
434 return SMC_CLC_DECL_TIMEOUT_AL;
436 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
441 static void smcr_conn_save_peer_info(struct smc_sock *smc,
442 struct smc_clc_msg_accept_confirm *clc)
444 int bufsize = smc_uncompress_bufsize(clc->rmbe_size);
446 smc->conn.peer_rmbe_idx = clc->rmbe_idx;
447 smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
448 smc->conn.peer_rmbe_size = bufsize;
449 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
450 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
453 static void smcd_conn_save_peer_info(struct smc_sock *smc,
454 struct smc_clc_msg_accept_confirm *clc)
456 int bufsize = smc_uncompress_bufsize(clc->dmbe_size);
458 smc->conn.peer_rmbe_idx = clc->dmbe_idx;
459 smc->conn.peer_token = clc->token;
460 /* msg header takes up space in the buffer */
461 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
462 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
463 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
466 static void smc_conn_save_peer_info(struct smc_sock *smc,
467 struct smc_clc_msg_accept_confirm *clc)
469 if (smc->conn.lgr->is_smcd)
470 smcd_conn_save_peer_info(smc, clc);
472 smcr_conn_save_peer_info(smc, clc);
475 static void smc_link_save_peer_info(struct smc_link *link,
476 struct smc_clc_msg_accept_confirm *clc)
478 link->peer_qpn = ntoh24(clc->qpn);
479 memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
480 memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
481 link->peer_psn = ntoh24(clc->psn);
482 link->peer_mtu = clc->qp_mtu;
485 static void smc_switch_to_fallback(struct smc_sock *smc)
487 smc->use_fallback = true;
488 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
489 smc->clcsock->file = smc->sk.sk_socket->file;
490 smc->clcsock->file->private_data = smc->clcsock;
491 smc->clcsock->wq.fasync_list =
492 smc->sk.sk_socket->wq.fasync_list;
496 /* fall back during connect */
497 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
499 smc_switch_to_fallback(smc);
500 smc->fallback_rsn = reason_code;
501 smc_copy_sock_settings_to_clc(smc);
502 smc->connect_nonblock = 0;
503 if (smc->sk.sk_state == SMC_INIT)
504 smc->sk.sk_state = SMC_ACTIVE;
508 /* decline and fall back during connect */
509 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
513 if (reason_code < 0) { /* error, fallback is not possible */
514 if (smc->sk.sk_state == SMC_INIT)
515 sock_put(&smc->sk); /* passive closing */
518 if (reason_code != SMC_CLC_DECL_PEERDECL) {
519 rc = smc_clc_send_decline(smc, reason_code);
521 if (smc->sk.sk_state == SMC_INIT)
522 sock_put(&smc->sk); /* passive closing */
526 return smc_connect_fallback(smc, reason_code);
529 /* abort connecting */
530 static int smc_connect_abort(struct smc_sock *smc, int reason_code,
533 bool is_smcd = smc->conn.lgr->is_smcd;
535 if (local_contact == SMC_FIRST_CONTACT)
536 smc_lgr_cleanup_early(&smc->conn);
538 smc_conn_free(&smc->conn);
540 /* there is only one lgr role for SMC-D; use server lock */
541 mutex_unlock(&smc_server_lgr_pending);
543 mutex_unlock(&smc_client_lgr_pending);
545 smc->connect_nonblock = 0;
549 /* check if there is a rdma device available for this connection. */
550 /* called for connect and listen */
551 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
553 /* PNET table look up: search active ib_device and port
554 * within same PNETID that also contains the ethernet device
555 * used for the internal TCP socket
557 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
559 return SMC_CLC_DECL_NOSMCRDEV;
563 /* check if there is an ISM device available for this connection. */
564 /* called for connect and listen */
565 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
567 /* Find ISM device with same PNETID as connecting interface */
568 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
570 return SMC_CLC_DECL_NOSMCDDEV;
574 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
575 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
576 struct smc_init_info *ini)
578 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev, ini->vlan_id))
579 return SMC_CLC_DECL_ISMVLANERR;
583 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
584 * used, the VLAN ID will be registered again during the connection setup.
586 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd,
587 struct smc_init_info *ini)
591 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev, ini->vlan_id))
592 return SMC_CLC_DECL_CNFERR;
596 /* CLC handshake during connect */
597 static int smc_connect_clc(struct smc_sock *smc, int smc_type,
598 struct smc_clc_msg_accept_confirm *aclc,
599 struct smc_init_info *ini)
603 /* do inband token exchange */
604 rc = smc_clc_send_proposal(smc, smc_type, ini);
607 /* receive SMC Accept CLC message */
608 return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT,
612 /* setup for RDMA connection of client */
613 static int smc_connect_rdma(struct smc_sock *smc,
614 struct smc_clc_msg_accept_confirm *aclc,
615 struct smc_init_info *ini)
617 struct smc_link *link;
620 ini->is_smcd = false;
621 ini->ib_lcl = &aclc->lcl;
622 ini->ib_clcqpn = ntoh24(aclc->qpn);
623 ini->srv_first_contact = aclc->hdr.flag;
625 mutex_lock(&smc_client_lgr_pending);
626 reason_code = smc_conn_create(smc, ini);
628 mutex_unlock(&smc_client_lgr_pending);
631 link = smc->conn.lnk;
633 smc_conn_save_peer_info(smc, aclc);
635 /* create send buffer and rmb */
636 if (smc_buf_create(smc, false))
637 return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
638 ini->cln_first_contact);
640 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
641 smc_link_save_peer_info(link, aclc);
643 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc))
644 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RTOK,
645 ini->cln_first_contact);
650 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
651 if (smc_ib_ready_link(link))
652 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK,
653 ini->cln_first_contact);
655 if (smcr_lgr_reg_rmbs(smc->conn.lgr, smc->conn.rmb_desc))
656 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB,
657 ini->cln_first_contact);
659 smc_rmb_sync_sg_for_device(&smc->conn);
661 reason_code = smc_clc_send_confirm(smc);
663 return smc_connect_abort(smc, reason_code,
664 ini->cln_first_contact);
668 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
669 /* QP confirmation over RoCE fabric */
670 reason_code = smcr_clnt_conf_first_link(smc);
672 return smc_connect_abort(smc, reason_code,
673 ini->cln_first_contact);
675 mutex_unlock(&smc_client_lgr_pending);
677 smc_copy_sock_settings_to_clc(smc);
678 smc->connect_nonblock = 0;
679 if (smc->sk.sk_state == SMC_INIT)
680 smc->sk.sk_state = SMC_ACTIVE;
685 /* setup for ISM connection of client */
686 static int smc_connect_ism(struct smc_sock *smc,
687 struct smc_clc_msg_accept_confirm *aclc,
688 struct smc_init_info *ini)
693 ini->ism_gid = aclc->gid;
694 ini->srv_first_contact = aclc->hdr.flag;
696 /* there is only one lgr role for SMC-D; use server lock */
697 mutex_lock(&smc_server_lgr_pending);
698 rc = smc_conn_create(smc, ini);
700 mutex_unlock(&smc_server_lgr_pending);
704 /* Create send and receive buffers */
705 if (smc_buf_create(smc, true))
706 return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
707 ini->cln_first_contact);
709 smc_conn_save_peer_info(smc, aclc);
714 rc = smc_clc_send_confirm(smc);
716 return smc_connect_abort(smc, rc, ini->cln_first_contact);
717 mutex_unlock(&smc_server_lgr_pending);
719 smc_copy_sock_settings_to_clc(smc);
720 smc->connect_nonblock = 0;
721 if (smc->sk.sk_state == SMC_INIT)
722 smc->sk.sk_state = SMC_ACTIVE;
727 /* perform steps before actually connecting */
728 static int __smc_connect(struct smc_sock *smc)
730 bool ism_supported = false, rdma_supported = false;
731 struct smc_clc_msg_accept_confirm aclc;
732 struct smc_init_info ini = {0};
736 if (smc->use_fallback)
737 return smc_connect_fallback(smc, smc->fallback_rsn);
739 /* if peer has not signalled SMC-capability, fall back */
740 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
741 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
743 /* IPSec connections opt out of SMC-R optimizations */
744 if (using_ipsec(smc))
745 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
747 /* get vlan id from IP device */
748 if (smc_vlan_by_tcpsk(smc->clcsock, &ini))
749 return smc_connect_decline_fallback(smc,
750 SMC_CLC_DECL_GETVLANERR);
752 /* check if there is an ism device available */
753 if (!smc_find_ism_device(smc, &ini) &&
754 !smc_connect_ism_vlan_setup(smc, &ini)) {
755 /* ISM is supported for this connection */
756 ism_supported = true;
757 smc_type = SMC_TYPE_D;
760 /* check if there is a rdma device available */
761 if (!smc_find_rdma_device(smc, &ini)) {
762 /* RDMA is supported for this connection */
763 rdma_supported = true;
765 smc_type = SMC_TYPE_B; /* both */
767 smc_type = SMC_TYPE_R; /* only RDMA */
770 /* if neither ISM nor RDMA are supported, fallback */
771 if (!rdma_supported && !ism_supported)
772 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_NOSMCDEV);
774 /* perform CLC handshake */
775 rc = smc_connect_clc(smc, smc_type, &aclc, &ini);
777 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
778 return smc_connect_decline_fallback(smc, rc);
781 /* depending on previous steps, connect using rdma or ism */
782 if (rdma_supported && aclc.hdr.path == SMC_TYPE_R)
783 rc = smc_connect_rdma(smc, &aclc, &ini);
784 else if (ism_supported && aclc.hdr.path == SMC_TYPE_D)
785 rc = smc_connect_ism(smc, &aclc, &ini);
787 rc = SMC_CLC_DECL_MODEUNSUPP;
789 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
790 return smc_connect_decline_fallback(smc, rc);
793 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
797 static void smc_connect_work(struct work_struct *work)
799 struct smc_sock *smc = container_of(work, struct smc_sock,
801 long timeo = smc->sk.sk_sndtimeo;
805 timeo = MAX_SCHEDULE_TIMEOUT;
806 lock_sock(smc->clcsock->sk);
807 if (smc->clcsock->sk->sk_err) {
808 smc->sk.sk_err = smc->clcsock->sk->sk_err;
809 } else if ((1 << smc->clcsock->sk->sk_state) &
810 (TCPF_SYN_SENT | TCP_SYN_RECV)) {
811 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
812 if ((rc == -EPIPE) &&
813 ((1 << smc->clcsock->sk->sk_state) &
814 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
817 release_sock(smc->clcsock->sk);
819 if (rc != 0 || smc->sk.sk_err) {
820 smc->sk.sk_state = SMC_CLOSED;
821 if (rc == -EPIPE || rc == -EAGAIN)
822 smc->sk.sk_err = EPIPE;
823 else if (signal_pending(current))
824 smc->sk.sk_err = -sock_intr_errno(timeo);
825 sock_put(&smc->sk); /* passive closing */
829 rc = __smc_connect(smc);
831 smc->sk.sk_err = -rc;
834 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
835 if (smc->sk.sk_err) {
836 smc->sk.sk_state_change(&smc->sk);
837 } else { /* allow polling before and after fallback decision */
838 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
839 smc->sk.sk_write_space(&smc->sk);
842 release_sock(&smc->sk);
845 static int smc_connect(struct socket *sock, struct sockaddr *addr,
848 struct sock *sk = sock->sk;
849 struct smc_sock *smc;
854 /* separate smc parameter checking to be safe */
855 if (alen < sizeof(addr->sa_family))
857 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
861 switch (sk->sk_state) {
872 smc_copy_sock_settings_to_clc(smc);
873 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
874 if (smc->connect_nonblock) {
878 rc = kernel_connect(smc->clcsock, addr, alen, flags);
879 if (rc && rc != -EINPROGRESS)
882 sock_hold(&smc->sk); /* sock put in passive closing */
883 if (smc->use_fallback)
885 if (flags & O_NONBLOCK) {
886 if (schedule_work(&smc->connect_work))
887 smc->connect_nonblock = 1;
890 rc = __smc_connect(smc);
894 rc = 0; /* success cases including fallback */
903 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
905 struct socket *new_clcsock = NULL;
906 struct sock *lsk = &lsmc->sk;
911 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
914 lsk->sk_err = ENOMEM;
919 *new_smc = smc_sk(new_sk);
921 mutex_lock(&lsmc->clcsock_release_lock);
923 rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
924 mutex_unlock(&lsmc->clcsock_release_lock);
928 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
929 new_sk->sk_prot->unhash(new_sk);
931 sock_release(new_clcsock);
932 new_sk->sk_state = SMC_CLOSED;
933 sock_set_flag(new_sk, SOCK_DEAD);
934 sock_put(new_sk); /* final */
939 (*new_smc)->clcsock = new_clcsock;
944 /* add a just created sock to the accept queue of the listen sock as
945 * candidate for a following socket accept call from user space
947 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
949 struct smc_sock *par = smc_sk(parent);
951 sock_hold(sk); /* sock_put in smc_accept_unlink () */
952 spin_lock(&par->accept_q_lock);
953 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
954 spin_unlock(&par->accept_q_lock);
955 sk_acceptq_added(parent);
958 /* remove a socket from the accept queue of its parental listening socket */
959 static void smc_accept_unlink(struct sock *sk)
961 struct smc_sock *par = smc_sk(sk)->listen_smc;
963 spin_lock(&par->accept_q_lock);
964 list_del_init(&smc_sk(sk)->accept_q);
965 spin_unlock(&par->accept_q_lock);
966 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
967 sock_put(sk); /* sock_hold in smc_accept_enqueue */
970 /* remove a sock from the accept queue to bind it to a new socket created
971 * for a socket accept call from user space
973 struct sock *smc_accept_dequeue(struct sock *parent,
974 struct socket *new_sock)
976 struct smc_sock *isk, *n;
979 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
980 new_sk = (struct sock *)isk;
982 smc_accept_unlink(new_sk);
983 if (new_sk->sk_state == SMC_CLOSED) {
984 new_sk->sk_prot->unhash(new_sk);
986 sock_release(isk->clcsock);
989 sock_put(new_sk); /* final */
993 sock_graft(new_sk, new_sock);
994 if (isk->use_fallback) {
995 smc_sk(new_sk)->clcsock->file = new_sock->file;
996 isk->clcsock->file->private_data = isk->clcsock;
1004 /* clean up for a created but never accepted sock */
1005 void smc_close_non_accepted(struct sock *sk)
1007 struct smc_sock *smc = smc_sk(sk);
1009 sock_hold(sk); /* sock_put below */
1011 if (!sk->sk_lingertime)
1012 /* wait for peer closing */
1013 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
1016 sock_put(sk); /* sock_hold above */
1017 sock_put(sk); /* final sock_put */
1020 static int smcr_serv_conf_first_link(struct smc_sock *smc)
1022 struct net *net = sock_net(smc->clcsock->sk);
1023 struct smc_link *link = smc->conn.lnk;
1027 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc, false))
1028 return SMC_CLC_DECL_ERR_REGRMB;
1030 /* send CONFIRM LINK request to client over the RoCE fabric */
1031 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1033 return SMC_CLC_DECL_TIMEOUT_CL;
1035 /* receive CONFIRM LINK response from client over the RoCE fabric */
1036 rest = wait_for_completion_interruptible_timeout(
1037 &link->llc_confirm_resp,
1038 SMC_LLC_WAIT_FIRST_TIME);
1040 struct smc_clc_msg_decline dclc;
1042 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1043 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1044 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1047 if (link->llc_confirm_resp_rc)
1048 return SMC_CLC_DECL_RMBE_EC;
1050 /* send ADD LINK request to client over the RoCE fabric */
1051 rc = smc_llc_send_add_link(link,
1052 link->smcibdev->mac[link->ibport - 1],
1053 link->gid, SMC_LLC_REQ);
1055 return SMC_CLC_DECL_TIMEOUT_AL;
1057 /* receive ADD LINK response from client over the RoCE fabric */
1058 rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp,
1061 struct smc_clc_msg_decline dclc;
1063 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1064 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1065 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
1068 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
1073 /* listen worker: finish */
1074 static void smc_listen_out(struct smc_sock *new_smc)
1076 struct smc_sock *lsmc = new_smc->listen_smc;
1077 struct sock *newsmcsk = &new_smc->sk;
1079 if (lsmc->sk.sk_state == SMC_LISTEN) {
1080 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1081 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1082 release_sock(&lsmc->sk);
1083 } else { /* no longer listening */
1084 smc_close_non_accepted(newsmcsk);
1087 /* Wake up accept */
1088 lsmc->sk.sk_data_ready(&lsmc->sk);
1089 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1092 /* listen worker: finish in state connected */
1093 static void smc_listen_out_connected(struct smc_sock *new_smc)
1095 struct sock *newsmcsk = &new_smc->sk;
1097 sk_refcnt_debug_inc(newsmcsk);
1098 if (newsmcsk->sk_state == SMC_INIT)
1099 newsmcsk->sk_state = SMC_ACTIVE;
1101 smc_listen_out(new_smc);
1104 /* listen worker: finish in error state */
1105 static void smc_listen_out_err(struct smc_sock *new_smc)
1107 struct sock *newsmcsk = &new_smc->sk;
1109 if (newsmcsk->sk_state == SMC_INIT)
1110 sock_put(&new_smc->sk); /* passive closing */
1111 newsmcsk->sk_state = SMC_CLOSED;
1113 smc_listen_out(new_smc);
1116 /* listen worker: decline and fall back if possible */
1117 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1120 /* RDMA setup failed, switch back to TCP */
1121 if (local_contact == SMC_FIRST_CONTACT)
1122 smc_lgr_cleanup_early(&new_smc->conn);
1124 smc_conn_free(&new_smc->conn);
1125 if (reason_code < 0) { /* error, no fallback possible */
1126 smc_listen_out_err(new_smc);
1129 smc_switch_to_fallback(new_smc);
1130 new_smc->fallback_rsn = reason_code;
1131 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1132 if (smc_clc_send_decline(new_smc, reason_code) < 0) {
1133 smc_listen_out_err(new_smc);
1137 smc_listen_out_connected(new_smc);
1140 /* listen worker: check prefixes */
1141 static int smc_listen_prfx_check(struct smc_sock *new_smc,
1142 struct smc_clc_msg_proposal *pclc)
1144 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1145 struct socket *newclcsock = new_smc->clcsock;
1147 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1148 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1149 return SMC_CLC_DECL_DIFFPREFIX;
1154 /* listen worker: initialize connection and buffers */
1155 static int smc_listen_rdma_init(struct smc_sock *new_smc,
1156 struct smc_init_info *ini)
1160 /* allocate connection / link group */
1161 rc = smc_conn_create(new_smc, ini);
1165 /* create send buffer and rmb */
1166 if (smc_buf_create(new_smc, false))
1167 return SMC_CLC_DECL_MEM;
1172 /* listen worker: initialize connection and buffers for SMC-D */
1173 static int smc_listen_ism_init(struct smc_sock *new_smc,
1174 struct smc_clc_msg_proposal *pclc,
1175 struct smc_init_info *ini)
1177 struct smc_clc_msg_smcd *pclc_smcd;
1180 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1181 ini->ism_gid = pclc_smcd->gid;
1182 rc = smc_conn_create(new_smc, ini);
1186 /* Check if peer can be reached via ISM device */
1187 if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid,
1188 new_smc->conn.lgr->vlan_id,
1189 new_smc->conn.lgr->smcd)) {
1190 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
1191 smc_lgr_cleanup_early(&new_smc->conn);
1193 smc_conn_free(&new_smc->conn);
1194 return SMC_CLC_DECL_SMCDNOTALK;
1197 /* Create send and receive buffers */
1198 if (smc_buf_create(new_smc, true)) {
1199 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
1200 smc_lgr_cleanup_early(&new_smc->conn);
1202 smc_conn_free(&new_smc->conn);
1203 return SMC_CLC_DECL_MEM;
1209 /* listen worker: register buffers */
1210 static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
1212 struct smc_connection *conn = &new_smc->conn;
1214 if (local_contact != SMC_FIRST_CONTACT) {
1215 if (smcr_lgr_reg_rmbs(conn->lgr, conn->rmb_desc))
1216 return SMC_CLC_DECL_ERR_REGRMB;
1218 smc_rmb_sync_sg_for_device(&new_smc->conn);
1223 /* listen worker: finish RDMA setup */
1224 static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1225 struct smc_clc_msg_accept_confirm *cclc,
1228 struct smc_link *link = new_smc->conn.lnk;
1229 int reason_code = 0;
1231 if (local_contact == SMC_FIRST_CONTACT)
1232 smc_link_save_peer_info(link, cclc);
1234 if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc)) {
1235 reason_code = SMC_CLC_DECL_ERR_RTOK;
1239 if (local_contact == SMC_FIRST_CONTACT) {
1240 if (smc_ib_ready_link(link)) {
1241 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
1244 /* QP confirmation over RoCE fabric */
1245 reason_code = smcr_serv_conf_first_link(new_smc);
1252 smc_listen_decline(new_smc, reason_code, local_contact);
1256 /* setup for RDMA connection of server */
1257 static void smc_listen_work(struct work_struct *work)
1259 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1261 struct socket *newclcsock = new_smc->clcsock;
1262 struct smc_clc_msg_accept_confirm cclc;
1263 struct smc_clc_msg_proposal *pclc;
1264 struct smc_init_info ini = {0};
1265 bool ism_supported = false;
1266 u8 buf[SMC_CLC_MAX_LEN];
1269 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
1270 return smc_listen_out_err(new_smc);
1272 if (new_smc->use_fallback) {
1273 smc_listen_out_connected(new_smc);
1277 /* check if peer is smc capable */
1278 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1279 smc_switch_to_fallback(new_smc);
1280 new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
1281 smc_listen_out_connected(new_smc);
1285 /* do inband token exchange -
1286 * wait for and receive SMC Proposal CLC message
1288 pclc = (struct smc_clc_msg_proposal *)&buf;
1289 rc = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
1290 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
1294 /* IPSec connections opt out of SMC-R optimizations */
1295 if (using_ipsec(new_smc)) {
1296 rc = SMC_CLC_DECL_IPSEC;
1300 /* check for matching IP prefix and subnet length */
1301 rc = smc_listen_prfx_check(new_smc, pclc);
1305 /* get vlan id from IP device */
1306 if (smc_vlan_by_tcpsk(new_smc->clcsock, &ini)) {
1307 rc = SMC_CLC_DECL_GETVLANERR;
1311 mutex_lock(&smc_server_lgr_pending);
1312 smc_close_init(new_smc);
1313 smc_rx_init(new_smc);
1314 smc_tx_init(new_smc);
1316 /* check if ISM is available */
1317 if (pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) {
1318 ini.is_smcd = true; /* prepare ISM check */
1319 rc = smc_find_ism_device(new_smc, &ini);
1321 rc = smc_listen_ism_init(new_smc, pclc, &ini);
1323 ism_supported = true;
1324 else if (pclc->hdr.path == SMC_TYPE_D)
1325 goto out_unlock; /* skip RDMA and decline */
1328 /* check if RDMA is available */
1329 if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
1330 /* prepare RDMA check */
1331 ini.is_smcd = false;
1333 ini.ib_lcl = &pclc->lcl;
1334 rc = smc_find_rdma_device(new_smc, &ini);
1336 /* no RDMA device found */
1337 if (pclc->hdr.path == SMC_TYPE_B)
1338 /* neither ISM nor RDMA device found */
1339 rc = SMC_CLC_DECL_NOSMCDEV;
1342 rc = smc_listen_rdma_init(new_smc, &ini);
1345 rc = smc_listen_rdma_reg(new_smc, ini.cln_first_contact);
1350 /* send SMC Accept CLC message */
1351 rc = smc_clc_send_accept(new_smc, ini.cln_first_contact);
1355 /* SMC-D does not need this lock any more */
1357 mutex_unlock(&smc_server_lgr_pending);
1359 /* receive SMC Confirm CLC message */
1360 rc = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
1361 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
1369 if (!ism_supported) {
1370 rc = smc_listen_rdma_finish(new_smc, &cclc,
1371 ini.cln_first_contact);
1372 mutex_unlock(&smc_server_lgr_pending);
1376 smc_conn_save_peer_info(new_smc, &cclc);
1377 smc_listen_out_connected(new_smc);
1381 mutex_unlock(&smc_server_lgr_pending);
1383 smc_listen_decline(new_smc, rc, ini.cln_first_contact);
1386 static void smc_tcp_listen_work(struct work_struct *work)
1388 struct smc_sock *lsmc = container_of(work, struct smc_sock,
1390 struct sock *lsk = &lsmc->sk;
1391 struct smc_sock *new_smc;
1395 while (lsk->sk_state == SMC_LISTEN) {
1396 rc = smc_clcsock_accept(lsmc, &new_smc);
1402 new_smc->listen_smc = lsmc;
1403 new_smc->use_fallback = lsmc->use_fallback;
1404 new_smc->fallback_rsn = lsmc->fallback_rsn;
1405 sock_hold(lsk); /* sock_put in smc_listen_work */
1406 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
1407 smc_copy_sock_settings_to_smc(new_smc);
1408 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
1409 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
1410 sock_hold(&new_smc->sk); /* sock_put in passive closing */
1411 if (!schedule_work(&new_smc->smc_listen_work))
1412 sock_put(&new_smc->sk);
1417 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
1420 static int smc_listen(struct socket *sock, int backlog)
1422 struct sock *sk = sock->sk;
1423 struct smc_sock *smc;
1430 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
1431 smc->connect_nonblock)
1435 if (sk->sk_state == SMC_LISTEN) {
1436 sk->sk_max_ack_backlog = backlog;
1439 /* some socket options are handled in core, so we could not apply
1440 * them to the clc socket -- copy smc socket options to clc socket
1442 smc_copy_sock_settings_to_clc(smc);
1443 if (!smc->use_fallback)
1444 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1446 rc = kernel_listen(smc->clcsock, backlog);
1449 sk->sk_max_ack_backlog = backlog;
1450 sk->sk_ack_backlog = 0;
1451 sk->sk_state = SMC_LISTEN;
1452 sock_hold(sk); /* sock_hold in tcp_listen_worker */
1453 if (!schedule_work(&smc->tcp_listen_work))
1461 static int smc_accept(struct socket *sock, struct socket *new_sock,
1462 int flags, bool kern)
1464 struct sock *sk = sock->sk, *nsk;
1465 DECLARE_WAITQUEUE(wait, current);
1466 struct smc_sock *lsmc;
1471 sock_hold(sk); /* sock_put below */
1474 if (lsmc->sk.sk_state != SMC_LISTEN) {
1480 /* Wait for an incoming connection */
1481 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1482 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1483 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
1484 set_current_state(TASK_INTERRUPTIBLE);
1490 timeo = schedule_timeout(timeo);
1491 /* wakeup by sk_data_ready in smc_listen_work() */
1492 sched_annotate_sleep();
1494 if (signal_pending(current)) {
1495 rc = sock_intr_errno(timeo);
1499 set_current_state(TASK_RUNNING);
1500 remove_wait_queue(sk_sleep(sk), &wait);
1503 rc = sock_error(nsk);
1508 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
1509 /* wait till data arrives on the socket */
1510 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
1512 if (smc_sk(nsk)->use_fallback) {
1513 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
1516 if (skb_queue_empty(&clcsk->sk_receive_queue))
1517 sk_wait_data(clcsk, &timeo, NULL);
1518 release_sock(clcsk);
1519 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
1521 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
1527 sock_put(sk); /* sock_hold above */
1531 static int smc_getname(struct socket *sock, struct sockaddr *addr,
1534 struct smc_sock *smc;
1536 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
1537 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
1540 smc = smc_sk(sock->sk);
1542 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
1545 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1547 struct sock *sk = sock->sk;
1548 struct smc_sock *smc;
1553 if ((sk->sk_state != SMC_ACTIVE) &&
1554 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1555 (sk->sk_state != SMC_INIT))
1558 if (msg->msg_flags & MSG_FASTOPEN) {
1559 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
1560 smc_switch_to_fallback(smc);
1561 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1568 if (smc->use_fallback)
1569 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
1571 rc = smc_tx_sendmsg(smc, msg, len);
1577 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1580 struct sock *sk = sock->sk;
1581 struct smc_sock *smc;
1586 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1587 /* socket was connected before, no more data to read */
1591 if ((sk->sk_state == SMC_INIT) ||
1592 (sk->sk_state == SMC_LISTEN) ||
1593 (sk->sk_state == SMC_CLOSED))
1596 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1601 if (smc->use_fallback) {
1602 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
1604 msg->msg_namelen = 0;
1605 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
1613 static __poll_t smc_accept_poll(struct sock *parent)
1615 struct smc_sock *isk = smc_sk(parent);
1618 spin_lock(&isk->accept_q_lock);
1619 if (!list_empty(&isk->accept_q))
1620 mask = EPOLLIN | EPOLLRDNORM;
1621 spin_unlock(&isk->accept_q_lock);
1626 static __poll_t smc_poll(struct file *file, struct socket *sock,
1629 struct sock *sk = sock->sk;
1630 struct smc_sock *smc;
1636 smc = smc_sk(sock->sk);
1637 if (smc->use_fallback) {
1638 /* delegate to CLC child sock */
1639 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1640 sk->sk_err = smc->clcsock->sk->sk_err;
1642 if (sk->sk_state != SMC_CLOSED)
1643 sock_poll_wait(file, sock, wait);
1646 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1647 (sk->sk_state == SMC_CLOSED))
1649 if (sk->sk_state == SMC_LISTEN) {
1650 /* woken up by sk_data_ready in smc_listen_work() */
1651 mask |= smc_accept_poll(sk);
1652 } else if (smc->use_fallback) { /* as result of connect_work()*/
1653 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
1655 sk->sk_err = smc->clcsock->sk->sk_err;
1657 if ((sk->sk_state != SMC_INIT &&
1658 atomic_read(&smc->conn.sndbuf_space)) ||
1659 sk->sk_shutdown & SEND_SHUTDOWN) {
1660 mask |= EPOLLOUT | EPOLLWRNORM;
1662 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1663 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1665 if (atomic_read(&smc->conn.bytes_to_rcv))
1666 mask |= EPOLLIN | EPOLLRDNORM;
1667 if (sk->sk_shutdown & RCV_SHUTDOWN)
1668 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1669 if (sk->sk_state == SMC_APPCLOSEWAIT1)
1671 if (smc->conn.urg_state == SMC_URG_VALID)
1679 static int smc_shutdown(struct socket *sock, int how)
1681 struct sock *sk = sock->sk;
1682 struct smc_sock *smc;
1688 if ((how < SHUT_RD) || (how > SHUT_RDWR))
1694 if ((sk->sk_state != SMC_ACTIVE) &&
1695 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
1696 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
1697 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1698 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
1699 (sk->sk_state != SMC_APPFINCLOSEWAIT))
1701 if (smc->use_fallback) {
1702 rc = kernel_sock_shutdown(smc->clcsock, how);
1703 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
1704 if (sk->sk_shutdown == SHUTDOWN_MASK)
1705 sk->sk_state = SMC_CLOSED;
1709 case SHUT_RDWR: /* shutdown in both directions */
1710 rc = smc_close_active(smc);
1713 rc = smc_close_shutdown_write(smc);
1717 /* nothing more to do because peer is not involved */
1721 rc1 = kernel_sock_shutdown(smc->clcsock, how);
1722 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1723 sk->sk_shutdown |= how + 1;
1727 return rc ? rc : rc1;
1730 static int smc_setsockopt(struct socket *sock, int level, int optname,
1731 char __user *optval, unsigned int optlen)
1733 struct sock *sk = sock->sk;
1734 struct smc_sock *smc;
1739 /* generic setsockopts reaching us here always apply to the
1742 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
1744 if (smc->clcsock->sk->sk_err) {
1745 sk->sk_err = smc->clcsock->sk->sk_err;
1746 sk->sk_error_report(sk);
1749 if (optlen < sizeof(int))
1751 if (get_user(val, (int __user *)optval))
1755 if (rc || smc->use_fallback)
1760 case TCP_FASTOPEN_CONNECT:
1761 case TCP_FASTOPEN_KEY:
1762 case TCP_FASTOPEN_NO_COOKIE:
1763 /* option not supported by SMC */
1764 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
1765 smc_switch_to_fallback(smc);
1766 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1772 if (sk->sk_state != SMC_INIT &&
1773 sk->sk_state != SMC_LISTEN &&
1774 sk->sk_state != SMC_CLOSED) {
1776 mod_delayed_work(system_wq, &smc->conn.tx_work,
1781 if (sk->sk_state != SMC_INIT &&
1782 sk->sk_state != SMC_LISTEN &&
1783 sk->sk_state != SMC_CLOSED) {
1785 mod_delayed_work(system_wq, &smc->conn.tx_work,
1789 case TCP_DEFER_ACCEPT:
1790 smc->sockopt_defer_accept = val;
1801 static int smc_getsockopt(struct socket *sock, int level, int optname,
1802 char __user *optval, int __user *optlen)
1804 struct smc_sock *smc;
1806 smc = smc_sk(sock->sk);
1807 /* socket options apply to the CLC socket */
1808 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
1812 static int smc_ioctl(struct socket *sock, unsigned int cmd,
1815 union smc_host_cursor cons, urg;
1816 struct smc_connection *conn;
1817 struct smc_sock *smc;
1820 smc = smc_sk(sock->sk);
1822 lock_sock(&smc->sk);
1823 if (smc->use_fallback) {
1824 if (!smc->clcsock) {
1825 release_sock(&smc->sk);
1828 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1829 release_sock(&smc->sk);
1833 case SIOCINQ: /* same as FIONREAD */
1834 if (smc->sk.sk_state == SMC_LISTEN) {
1835 release_sock(&smc->sk);
1838 if (smc->sk.sk_state == SMC_INIT ||
1839 smc->sk.sk_state == SMC_CLOSED)
1842 answ = atomic_read(&smc->conn.bytes_to_rcv);
1845 /* output queue size (not send + not acked) */
1846 if (smc->sk.sk_state == SMC_LISTEN) {
1847 release_sock(&smc->sk);
1850 if (smc->sk.sk_state == SMC_INIT ||
1851 smc->sk.sk_state == SMC_CLOSED)
1854 answ = smc->conn.sndbuf_desc->len -
1855 atomic_read(&smc->conn.sndbuf_space);
1858 /* output queue size (not send only) */
1859 if (smc->sk.sk_state == SMC_LISTEN) {
1860 release_sock(&smc->sk);
1863 if (smc->sk.sk_state == SMC_INIT ||
1864 smc->sk.sk_state == SMC_CLOSED)
1867 answ = smc_tx_prepared_sends(&smc->conn);
1870 if (smc->sk.sk_state == SMC_LISTEN) {
1871 release_sock(&smc->sk);
1874 if (smc->sk.sk_state == SMC_INIT ||
1875 smc->sk.sk_state == SMC_CLOSED) {
1878 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
1879 smc_curs_copy(&urg, &conn->urg_curs, conn);
1880 answ = smc_curs_diff(conn->rmb_desc->len,
1885 release_sock(&smc->sk);
1886 return -ENOIOCTLCMD;
1888 release_sock(&smc->sk);
1890 return put_user(answ, (int __user *)arg);
1893 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1894 int offset, size_t size, int flags)
1896 struct sock *sk = sock->sk;
1897 struct smc_sock *smc;
1902 if (sk->sk_state != SMC_ACTIVE) {
1907 if (smc->use_fallback)
1908 rc = kernel_sendpage(smc->clcsock, page, offset,
1911 rc = sock_no_sendpage(sock, page, offset, size, flags);
1917 /* Map the affected portions of the rmbe into an spd, note the number of bytes
1918 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
1919 * updates till whenever a respective page has been fully processed.
1920 * Note that subsequent recv() calls have to wait till all splice() processing
1923 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1924 struct pipe_inode_info *pipe, size_t len,
1927 struct sock *sk = sock->sk;
1928 struct smc_sock *smc;
1933 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1934 /* socket was connected before, no more data to read */
1938 if (sk->sk_state == SMC_INIT ||
1939 sk->sk_state == SMC_LISTEN ||
1940 sk->sk_state == SMC_CLOSED)
1943 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1948 if (smc->use_fallback) {
1949 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
1956 if (flags & SPLICE_F_NONBLOCK)
1957 flags = MSG_DONTWAIT;
1960 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
1968 /* must look like tcp */
1969 static const struct proto_ops smc_sock_ops = {
1971 .owner = THIS_MODULE,
1972 .release = smc_release,
1974 .connect = smc_connect,
1975 .socketpair = sock_no_socketpair,
1976 .accept = smc_accept,
1977 .getname = smc_getname,
1980 .listen = smc_listen,
1981 .shutdown = smc_shutdown,
1982 .setsockopt = smc_setsockopt,
1983 .getsockopt = smc_getsockopt,
1984 .sendmsg = smc_sendmsg,
1985 .recvmsg = smc_recvmsg,
1986 .mmap = sock_no_mmap,
1987 .sendpage = smc_sendpage,
1988 .splice_read = smc_splice_read,
1991 static int smc_create(struct net *net, struct socket *sock, int protocol,
1994 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
1995 struct smc_sock *smc;
1999 rc = -ESOCKTNOSUPPORT;
2000 if (sock->type != SOCK_STREAM)
2003 rc = -EPROTONOSUPPORT;
2004 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
2008 sock->ops = &smc_sock_ops;
2009 sk = smc_sock_alloc(net, sock, protocol);
2013 /* create internal TCP socket for CLC handshake and fallback */
2015 smc->use_fallback = false; /* assume rdma capability first */
2016 smc->fallback_rsn = 0;
2017 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
2020 sk_common_release(sk);
2023 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
2024 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
2030 static const struct net_proto_family smc_sock_family_ops = {
2032 .owner = THIS_MODULE,
2033 .create = smc_create,
2036 unsigned int smc_net_id;
2038 static __net_init int smc_net_init(struct net *net)
2040 return smc_pnet_net_init(net);
2043 static void __net_exit smc_net_exit(struct net *net)
2045 smc_pnet_net_exit(net);
2048 static struct pernet_operations smc_net_ops = {
2049 .init = smc_net_init,
2050 .exit = smc_net_exit,
2052 .size = sizeof(struct smc_net),
2055 static int __init smc_init(void)
2059 rc = register_pernet_subsys(&smc_net_ops);
2063 rc = smc_pnet_init();
2065 goto out_pernet_subsys;
2067 rc = smc_core_init();
2069 pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
2073 rc = smc_llc_init();
2075 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
2079 rc = smc_cdc_init();
2081 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
2085 rc = proto_register(&smc_proto, 1);
2087 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
2091 rc = proto_register(&smc_proto6, 1);
2093 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
2097 rc = sock_register(&smc_sock_family_ops);
2099 pr_err("%s: sock_register fails with %d\n", __func__, rc);
2102 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
2103 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
2105 rc = smc_ib_register_client();
2107 pr_err("%s: ib_register fails with %d\n", __func__, rc);
2111 static_branch_enable(&tcp_have_smc);
2115 sock_unregister(PF_SMC);
2117 proto_unregister(&smc_proto6);
2119 proto_unregister(&smc_proto);
2125 unregister_pernet_subsys(&smc_net_ops);
2130 static void __exit smc_exit(void)
2132 static_branch_disable(&tcp_have_smc);
2133 sock_unregister(PF_SMC);
2135 smc_ib_unregister_client();
2136 proto_unregister(&smc_proto6);
2137 proto_unregister(&smc_proto);
2139 unregister_pernet_subsys(&smc_net_ops);
2143 module_init(smc_init);
2144 module_exit(smc_exit);
2146 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
2147 MODULE_DESCRIPTION("smc socket address family");
2148 MODULE_LICENSE("GPL");
2149 MODULE_ALIAS_NETPROTO(PF_SMC);