1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
28 #include <linux/rcupdate_wait.h>
29 #include <linux/ctype.h>
34 #include <asm/ioctls.h>
36 #include <net/net_namespace.h>
37 #include <net/netns/generic.h>
38 #include "smc_netns.h"
48 #include "smc_netlink.h"
51 #include "smc_close.h"
52 #include "smc_stats.h"
53 #include "smc_tracepoint.h"
55 static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
58 static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
62 struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
63 struct workqueue_struct *smc_close_wq; /* wq for close work */
65 static void smc_tcp_listen_work(struct work_struct *);
66 static void smc_connect_work(struct work_struct *);
68 static void smc_set_keepalive(struct sock *sk, int val)
70 struct smc_sock *smc = smc_sk(sk);
72 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
75 static struct smc_hashinfo smc_v4_hashinfo = {
76 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
79 static struct smc_hashinfo smc_v6_hashinfo = {
80 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
83 int smc_hash_sk(struct sock *sk)
85 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
86 struct hlist_head *head;
90 write_lock_bh(&h->lock);
91 sk_add_node(sk, head);
92 write_unlock_bh(&h->lock);
93 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
97 EXPORT_SYMBOL_GPL(smc_hash_sk);
99 void smc_unhash_sk(struct sock *sk)
101 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
103 write_lock_bh(&h->lock);
104 if (sk_del_node_init(sk))
105 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
106 write_unlock_bh(&h->lock);
108 EXPORT_SYMBOL_GPL(smc_unhash_sk);
110 struct proto smc_proto = {
112 .owner = THIS_MODULE,
113 .keepalive = smc_set_keepalive,
115 .unhash = smc_unhash_sk,
116 .obj_size = sizeof(struct smc_sock),
117 .h.smc_hash = &smc_v4_hashinfo,
118 .slab_flags = SLAB_TYPESAFE_BY_RCU,
120 EXPORT_SYMBOL_GPL(smc_proto);
122 struct proto smc_proto6 = {
124 .owner = THIS_MODULE,
125 .keepalive = smc_set_keepalive,
127 .unhash = smc_unhash_sk,
128 .obj_size = sizeof(struct smc_sock),
129 .h.smc_hash = &smc_v6_hashinfo,
130 .slab_flags = SLAB_TYPESAFE_BY_RCU,
132 EXPORT_SYMBOL_GPL(smc_proto6);
134 static void smc_restore_fallback_changes(struct smc_sock *smc)
136 if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
137 smc->clcsock->file->private_data = smc->sk.sk_socket;
138 smc->clcsock->file = NULL;
142 static int __smc_release(struct smc_sock *smc)
144 struct sock *sk = &smc->sk;
147 if (!smc->use_fallback) {
148 rc = smc_close_active(smc);
149 sock_set_flag(sk, SOCK_DEAD);
150 sk->sk_shutdown |= SHUTDOWN_MASK;
152 if (sk->sk_state != SMC_CLOSED) {
153 if (sk->sk_state != SMC_LISTEN &&
154 sk->sk_state != SMC_INIT)
155 sock_put(sk); /* passive closing */
156 if (sk->sk_state == SMC_LISTEN) {
157 /* wake up clcsock accept */
158 rc = kernel_sock_shutdown(smc->clcsock,
161 sk->sk_state = SMC_CLOSED;
162 sk->sk_state_change(sk);
164 smc_restore_fallback_changes(smc);
167 sk->sk_prot->unhash(sk);
169 if (sk->sk_state == SMC_CLOSED) {
172 smc_clcsock_release(smc);
175 if (!smc->use_fallback)
176 smc_conn_free(&smc->conn);
182 static int smc_release(struct socket *sock)
184 struct sock *sk = sock->sk;
185 struct smc_sock *smc;
191 sock_hold(sk); /* sock_put below */
194 /* cleanup for a dangling non-blocking connect */
195 if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
196 tcp_abort(smc->clcsock->sk, ECONNABORTED);
198 if (cancel_work_sync(&smc->connect_work))
199 sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
201 if (sk->sk_state == SMC_LISTEN)
202 /* smc_close_non_accepted() is called and acquires
203 * sock lock for child sockets again
205 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
209 rc = __smc_release(smc);
216 sock_put(sk); /* sock_hold above */
217 sock_put(sk); /* final sock_put */
222 static void smc_destruct(struct sock *sk)
224 if (sk->sk_state != SMC_CLOSED)
226 if (!sock_flag(sk, SOCK_DEAD))
229 sk_refcnt_debug_dec(sk);
232 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
235 struct smc_sock *smc;
239 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
240 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
244 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
245 sk->sk_state = SMC_INIT;
246 sk->sk_destruct = smc_destruct;
247 sk->sk_protocol = protocol;
249 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
250 INIT_WORK(&smc->connect_work, smc_connect_work);
251 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
252 INIT_LIST_HEAD(&smc->accept_q);
253 spin_lock_init(&smc->accept_q_lock);
254 spin_lock_init(&smc->conn.send_lock);
255 sk->sk_prot->hash(sk);
256 sk_refcnt_debug_inc(sk);
257 mutex_init(&smc->clcsock_release_lock);
262 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
265 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
266 struct sock *sk = sock->sk;
267 struct smc_sock *smc;
272 /* replicate tests from inet_bind(), to be safe wrt. future changes */
274 if (addr_len < sizeof(struct sockaddr_in))
278 if (addr->sin_family != AF_INET &&
279 addr->sin_family != AF_INET6 &&
280 addr->sin_family != AF_UNSPEC)
282 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
283 if (addr->sin_family == AF_UNSPEC &&
284 addr->sin_addr.s_addr != htonl(INADDR_ANY))
289 /* Check if socket is already active */
291 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
294 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
295 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
303 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
306 /* options we don't get control via setsockopt for */
307 nsk->sk_type = osk->sk_type;
308 nsk->sk_sndbuf = osk->sk_sndbuf;
309 nsk->sk_rcvbuf = osk->sk_rcvbuf;
310 nsk->sk_sndtimeo = osk->sk_sndtimeo;
311 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
312 nsk->sk_mark = osk->sk_mark;
313 nsk->sk_priority = osk->sk_priority;
314 nsk->sk_rcvlowat = osk->sk_rcvlowat;
315 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
316 nsk->sk_err = osk->sk_err;
318 nsk->sk_flags &= ~mask;
319 nsk->sk_flags |= osk->sk_flags & mask;
322 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
323 (1UL << SOCK_KEEPOPEN) | \
324 (1UL << SOCK_LINGER) | \
325 (1UL << SOCK_BROADCAST) | \
326 (1UL << SOCK_TIMESTAMP) | \
327 (1UL << SOCK_DBG) | \
328 (1UL << SOCK_RCVTSTAMP) | \
329 (1UL << SOCK_RCVTSTAMPNS) | \
330 (1UL << SOCK_LOCALROUTE) | \
331 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
332 (1UL << SOCK_RXQ_OVFL) | \
333 (1UL << SOCK_WIFI_STATUS) | \
334 (1UL << SOCK_NOFCS) | \
335 (1UL << SOCK_FILTER_LOCKED) | \
336 (1UL << SOCK_TSTAMP_NEW))
337 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
338 * clc socket (since smc is not called for these options from net/core)
340 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
342 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
345 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
346 (1UL << SOCK_KEEPOPEN) | \
347 (1UL << SOCK_LINGER) | \
349 /* copy only settings and flags relevant for smc from clc to smc socket */
350 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
352 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
355 /* register the new rmb on all links */
356 static int smcr_lgr_reg_rmbs(struct smc_link *link,
357 struct smc_buf_desc *rmb_desc)
359 struct smc_link_group *lgr = link->lgr;
362 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
365 /* protect against parallel smc_llc_cli_rkey_exchange() and
366 * parallel smcr_link_reg_rmb()
368 mutex_lock(&lgr->llc_conf_mutex);
369 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
370 if (!smc_link_active(&lgr->lnk[i]))
372 rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc);
377 /* exchange confirm_rkey msg with peer */
378 rc = smc_llc_do_confirm_rkey(link, rmb_desc);
383 rmb_desc->is_conf_rkey = true;
385 mutex_unlock(&lgr->llc_conf_mutex);
386 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
390 static int smcr_clnt_conf_first_link(struct smc_sock *smc)
392 struct smc_link *link = smc->conn.lnk;
393 struct smc_llc_qentry *qentry;
396 /* receive CONFIRM LINK request from server over RoCE fabric */
397 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
398 SMC_LLC_CONFIRM_LINK);
400 struct smc_clc_msg_decline dclc;
402 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
403 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
404 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
406 smc_llc_save_peer_uid(qentry);
407 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
408 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
410 return SMC_CLC_DECL_RMBE_EC;
412 rc = smc_ib_modify_qp_rts(link);
414 return SMC_CLC_DECL_ERR_RDYLNK;
416 smc_wr_remember_qp_attr(link);
418 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
419 return SMC_CLC_DECL_ERR_REGRMB;
421 /* confirm_rkey is implicit on 1st contact */
422 smc->conn.rmb_desc->is_conf_rkey = true;
424 /* send CONFIRM LINK response over RoCE fabric */
425 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
427 return SMC_CLC_DECL_TIMEOUT_CL;
429 smc_llc_link_active(link);
430 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
432 /* optional 2nd link, receive ADD LINK request from server */
433 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
436 struct smc_clc_msg_decline dclc;
438 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
439 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
441 rc = 0; /* no DECLINE received, go with one link */
444 smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
445 smc_llc_cli_add_link(link, qentry);
449 static bool smc_isascii(char *hostname)
453 for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
454 if (!isascii(hostname[i]))
459 static void smc_conn_save_peer_info_fce(struct smc_sock *smc,
460 struct smc_clc_msg_accept_confirm *clc)
462 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
463 (struct smc_clc_msg_accept_confirm_v2 *)clc;
464 struct smc_clc_first_contact_ext *fce;
467 if (clc->hdr.version == SMC_V1 ||
468 !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK))
471 if (smc->conn.lgr->is_smcd) {
472 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->d1.eid,
474 clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2,
477 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->r1.eid,
479 clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2,
482 fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc_v2) + clc_v2_len);
483 smc->conn.lgr->peer_os = fce->os_type;
484 smc->conn.lgr->peer_smc_release = fce->release;
485 if (smc_isascii(fce->hostname))
486 memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
487 SMC_MAX_HOSTNAME_LEN);
490 static void smcr_conn_save_peer_info(struct smc_sock *smc,
491 struct smc_clc_msg_accept_confirm *clc)
493 int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
495 smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
496 smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
497 smc->conn.peer_rmbe_size = bufsize;
498 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
499 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
502 static void smcd_conn_save_peer_info(struct smc_sock *smc,
503 struct smc_clc_msg_accept_confirm *clc)
505 int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
507 smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
508 smc->conn.peer_token = clc->d0.token;
509 /* msg header takes up space in the buffer */
510 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
511 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
512 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
515 static void smc_conn_save_peer_info(struct smc_sock *smc,
516 struct smc_clc_msg_accept_confirm *clc)
518 if (smc->conn.lgr->is_smcd)
519 smcd_conn_save_peer_info(smc, clc);
521 smcr_conn_save_peer_info(smc, clc);
522 smc_conn_save_peer_info_fce(smc, clc);
525 static void smc_link_save_peer_info(struct smc_link *link,
526 struct smc_clc_msg_accept_confirm *clc,
527 struct smc_init_info *ini)
529 link->peer_qpn = ntoh24(clc->r0.qpn);
530 memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE);
531 memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac));
532 link->peer_psn = ntoh24(clc->r0.psn);
533 link->peer_mtu = clc->r0.qp_mtu;
536 static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
537 struct smc_stats_fback *fback_arr)
541 for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
542 if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
543 fback_arr[cnt].count++;
546 if (!fback_arr[cnt].fback_code) {
547 fback_arr[cnt].fback_code = smc->fallback_rsn;
548 fback_arr[cnt].count++;
554 static void smc_stat_fallback(struct smc_sock *smc)
556 struct net *net = sock_net(&smc->sk);
558 mutex_lock(&net->smc.mutex_fback_rsn);
559 if (smc->listen_smc) {
560 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
561 net->smc.fback_rsn->srv_fback_cnt++;
563 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
564 net->smc.fback_rsn->clnt_fback_cnt++;
566 mutex_unlock(&net->smc.mutex_fback_rsn);
569 /* must be called under rcu read lock */
570 static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key)
572 struct socket_wq *wq;
575 wq = rcu_dereference(smc->sk.sk_wq);
576 if (!skwq_has_sleeper(wq))
579 /* wake up smc sk->sk_wq */
581 /* sk_state_change */
582 wake_up_interruptible_all(&wq->wait);
584 flags = key_to_poll(key);
585 if (flags & (EPOLLIN | EPOLLOUT))
586 /* sk_data_ready or sk_write_space */
587 wake_up_interruptible_sync_poll(&wq->wait, flags);
588 else if (flags & EPOLLERR)
589 /* sk_error_report */
590 wake_up_interruptible_poll(&wq->wait, flags);
594 static int smc_fback_mark_woken(wait_queue_entry_t *wait,
595 unsigned int mode, int sync, void *key)
597 struct smc_mark_woken *mark =
598 container_of(wait, struct smc_mark_woken, wait_entry);
605 static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk,
606 void (*clcsock_callback)(struct sock *sk))
608 struct smc_mark_woken mark = { .woken = false };
609 struct socket_wq *wq;
611 init_waitqueue_func_entry(&mark.wait_entry,
612 smc_fback_mark_woken);
614 wq = rcu_dereference(clcsk->sk_wq);
617 add_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
618 clcsock_callback(clcsk);
619 remove_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
622 smc_fback_wakeup_waitqueue(smc, mark.key);
627 static void smc_fback_state_change(struct sock *clcsk)
629 struct smc_sock *smc =
630 smc_clcsock_user_data(clcsk);
634 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_state_change);
637 static void smc_fback_data_ready(struct sock *clcsk)
639 struct smc_sock *smc =
640 smc_clcsock_user_data(clcsk);
644 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_data_ready);
647 static void smc_fback_write_space(struct sock *clcsk)
649 struct smc_sock *smc =
650 smc_clcsock_user_data(clcsk);
654 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_write_space);
657 static void smc_fback_error_report(struct sock *clcsk)
659 struct smc_sock *smc =
660 smc_clcsock_user_data(clcsk);
664 smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_error_report);
667 static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
672 mutex_lock(&smc->clcsock_release_lock);
677 clcsk = smc->clcsock->sk;
679 if (smc->use_fallback)
681 smc->use_fallback = true;
682 smc->fallback_rsn = reason_code;
683 smc_stat_fallback(smc);
684 trace_smc_switch_to_fallback(smc, reason_code);
685 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
686 smc->clcsock->file = smc->sk.sk_socket->file;
687 smc->clcsock->file->private_data = smc->clcsock;
688 smc->clcsock->wq.fasync_list =
689 smc->sk.sk_socket->wq.fasync_list;
691 /* There might be some wait entries remaining
692 * in smc sk->sk_wq and they should be woken up
693 * as clcsock's wait queue is woken up.
695 smc->clcsk_state_change = clcsk->sk_state_change;
696 smc->clcsk_data_ready = clcsk->sk_data_ready;
697 smc->clcsk_write_space = clcsk->sk_write_space;
698 smc->clcsk_error_report = clcsk->sk_error_report;
700 clcsk->sk_state_change = smc_fback_state_change;
701 clcsk->sk_data_ready = smc_fback_data_ready;
702 clcsk->sk_write_space = smc_fback_write_space;
703 clcsk->sk_error_report = smc_fback_error_report;
705 smc->clcsock->sk->sk_user_data =
706 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
709 mutex_unlock(&smc->clcsock_release_lock);
713 /* fall back during connect */
714 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
716 struct net *net = sock_net(&smc->sk);
719 rc = smc_switch_to_fallback(smc, reason_code);
720 if (rc) { /* fallback fails */
721 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
722 if (smc->sk.sk_state == SMC_INIT)
723 sock_put(&smc->sk); /* passive closing */
726 smc_copy_sock_settings_to_clc(smc);
727 smc->connect_nonblock = 0;
728 if (smc->sk.sk_state == SMC_INIT)
729 smc->sk.sk_state = SMC_ACTIVE;
733 /* decline and fall back during connect */
734 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
737 struct net *net = sock_net(&smc->sk);
740 if (reason_code < 0) { /* error, fallback is not possible */
741 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
742 if (smc->sk.sk_state == SMC_INIT)
743 sock_put(&smc->sk); /* passive closing */
746 if (reason_code != SMC_CLC_DECL_PEERDECL) {
747 rc = smc_clc_send_decline(smc, reason_code, version);
749 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
750 if (smc->sk.sk_state == SMC_INIT)
751 sock_put(&smc->sk); /* passive closing */
755 return smc_connect_fallback(smc, reason_code);
758 static void smc_conn_abort(struct smc_sock *smc, int local_first)
760 struct smc_connection *conn = &smc->conn;
761 struct smc_link_group *lgr = conn->lgr;
762 bool lgr_valid = false;
764 if (smc_conn_lgr_valid(conn))
768 if (local_first && lgr_valid)
769 smc_lgr_cleanup_early(lgr);
772 /* check if there is a rdma device available for this connection. */
773 /* called for connect and listen */
774 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
776 /* PNET table look up: search active ib_device and port
777 * within same PNETID that also contains the ethernet device
778 * used for the internal TCP socket
780 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
781 if (!ini->check_smcrv2 && !ini->ib_dev)
782 return SMC_CLC_DECL_NOSMCRDEV;
783 if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2)
784 return SMC_CLC_DECL_NOSMCRDEV;
788 /* check if there is an ISM device available for this connection. */
789 /* called for connect and listen */
790 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
792 /* Find ISM device with same PNETID as connecting interface */
793 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
794 if (!ini->ism_dev[0])
795 return SMC_CLC_DECL_NOSMCDDEV;
797 ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
801 /* is chid unique for the ism devices that are already determined? */
802 static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
805 int i = (!ini->ism_dev[0]) ? 1 : 0;
808 if (ini->ism_chid[i] == chid)
813 /* determine possible V2 ISM devices (either without PNETID or with PNETID plus
814 * PNETID matching net_device)
816 static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
817 struct smc_init_info *ini)
819 int rc = SMC_CLC_DECL_NOSMCDDEV;
820 struct smcd_dev *smcd;
824 if (smcd_indicated(ini->smc_type_v1))
825 rc = 0; /* already initialized for V1 */
826 mutex_lock(&smcd_dev_list.mutex);
827 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
828 if (smcd->going_away || smcd == ini->ism_dev[0])
830 chid = smc_ism_get_chid(smcd);
831 if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
833 if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
834 smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
835 ini->ism_dev[i] = smcd;
836 ini->ism_chid[i] = chid;
840 if (i > SMC_MAX_ISM_DEVS)
844 mutex_unlock(&smcd_dev_list.mutex);
845 ini->ism_offered_cnt = i - 1;
846 if (!ini->ism_dev[0] && !ini->ism_dev[1])
847 ini->smcd_version = 0;
852 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
853 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
854 struct smc_init_info *ini)
856 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
857 return SMC_CLC_DECL_ISMVLANERR;
861 static int smc_find_proposal_devices(struct smc_sock *smc,
862 struct smc_init_info *ini)
866 /* check if there is an ism device available */
867 if (!(ini->smcd_version & SMC_V1) ||
868 smc_find_ism_device(smc, ini) ||
869 smc_connect_ism_vlan_setup(smc, ini))
870 ini->smcd_version &= ~SMC_V1;
871 /* else ISM V1 is supported for this connection */
873 /* check if there is an rdma device available */
874 if (!(ini->smcr_version & SMC_V1) ||
875 smc_find_rdma_device(smc, ini))
876 ini->smcr_version &= ~SMC_V1;
877 /* else RDMA is supported for this connection */
879 ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1,
880 ini->smcr_version & SMC_V1);
882 /* check if there is an ism v2 device available */
883 if (!(ini->smcd_version & SMC_V2) ||
884 !smc_ism_is_v2_capable() ||
885 smc_find_ism_v2_device_clnt(smc, ini))
886 ini->smcd_version &= ~SMC_V2;
888 /* check if there is an rdma v2 device available */
889 ini->check_smcrv2 = true;
890 ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr;
891 if (!(ini->smcr_version & SMC_V2) ||
892 smc->clcsock->sk->sk_family != AF_INET ||
893 !smc_clc_ueid_count() ||
894 smc_find_rdma_device(smc, ini))
895 ini->smcr_version &= ~SMC_V2;
896 ini->check_smcrv2 = false;
898 ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2,
899 ini->smcr_version & SMC_V2);
901 /* if neither ISM nor RDMA are supported, fallback */
902 if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
903 rc = SMC_CLC_DECL_NOSMCDEV;
908 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
909 * used, the VLAN ID will be registered again during the connection setup.
911 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
912 struct smc_init_info *ini)
914 if (!smcd_indicated(ini->smc_type_v1))
916 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
917 return SMC_CLC_DECL_CNFERR;
921 #define SMC_CLC_MAX_ACCEPT_LEN \
922 (sizeof(struct smc_clc_msg_accept_confirm_v2) + \
923 sizeof(struct smc_clc_first_contact_ext) + \
924 sizeof(struct smc_clc_msg_trail))
926 /* CLC handshake during connect */
927 static int smc_connect_clc(struct smc_sock *smc,
928 struct smc_clc_msg_accept_confirm_v2 *aclc2,
929 struct smc_init_info *ini)
933 /* do inband token exchange */
934 rc = smc_clc_send_proposal(smc, ini);
937 /* receive SMC Accept CLC message */
938 return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN,
939 SMC_CLC_ACCEPT, CLC_WAIT_TIME);
942 void smc_fill_gid_list(struct smc_link_group *lgr,
943 struct smc_gidlist *gidlist,
944 struct smc_ib_device *known_dev, u8 *known_gid)
946 struct smc_init_info *alt_ini = NULL;
948 memset(gidlist, 0, sizeof(*gidlist));
949 memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE);
951 alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL);
955 alt_ini->vlan_id = lgr->vlan_id;
956 alt_ini->check_smcrv2 = true;
957 alt_ini->smcrv2.saddr = lgr->saddr;
958 smc_pnet_find_alt_roce(lgr, alt_ini, known_dev);
960 if (!alt_ini->smcrv2.ib_dev_v2)
963 memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2,
970 static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
971 struct smc_clc_msg_accept_confirm *aclc,
972 struct smc_init_info *ini)
974 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
975 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
976 struct smc_clc_first_contact_ext *fce =
977 (struct smc_clc_first_contact_ext *)
978 (((u8 *)clc_v2) + sizeof(*clc_v2));
980 if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
983 if (fce->v2_direct) {
984 memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
985 ini->smcrv2.uses_gateway = false;
987 if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr,
988 smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
989 ini->smcrv2.nexthop_mac,
990 &ini->smcrv2.uses_gateway))
991 return SMC_CLC_DECL_NOROUTE;
992 if (!ini->smcrv2.uses_gateway) {
993 /* mismatch: peer claims indirect, but its direct */
994 return SMC_CLC_DECL_NOINDIRECT;
1000 /* setup for RDMA connection of client */
1001 static int smc_connect_rdma(struct smc_sock *smc,
1002 struct smc_clc_msg_accept_confirm *aclc,
1003 struct smc_init_info *ini)
1005 int i, reason_code = 0;
1006 struct smc_link *link;
1009 ini->is_smcd = false;
1010 ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
1011 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1012 memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN);
1013 memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE);
1014 memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN);
1016 reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini);
1020 mutex_lock(&smc_client_lgr_pending);
1021 reason_code = smc_conn_create(smc, ini);
1023 mutex_unlock(&smc_client_lgr_pending);
1027 smc_conn_save_peer_info(smc, aclc);
1029 if (ini->first_contact_local) {
1030 link = smc->conn.lnk;
1032 /* set link that was assigned by server */
1034 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1035 struct smc_link *l = &smc->conn.lgr->lnk[i];
1037 if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
1038 !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
1040 (aclc->hdr.version > SMC_V1 ||
1041 !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
1042 sizeof(l->peer_mac)))) {
1048 reason_code = SMC_CLC_DECL_NOSRVLINK;
1051 smc_switch_link_and_count(&smc->conn, link);
1054 /* create send buffer and rmb */
1055 if (smc_buf_create(smc, false)) {
1056 reason_code = SMC_CLC_DECL_MEM;
1060 if (ini->first_contact_local)
1061 smc_link_save_peer_info(link, aclc, ini);
1063 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
1064 reason_code = SMC_CLC_DECL_ERR_RTOK;
1068 smc_close_init(smc);
1071 if (ini->first_contact_local) {
1072 if (smc_ib_ready_link(link)) {
1073 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
1077 if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
1078 reason_code = SMC_CLC_DECL_ERR_REGRMB;
1082 smc_rmb_sync_sg_for_device(&smc->conn);
1084 if (aclc->hdr.version > SMC_V1) {
1085 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
1086 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
1088 eid = clc_v2->r1.eid;
1089 if (ini->first_contact_local)
1090 smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist,
1091 link->smcibdev, link->gid);
1094 reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
1095 aclc->hdr.version, eid, ini);
1101 if (ini->first_contact_local) {
1102 /* QP confirmation over RoCE fabric */
1103 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
1104 reason_code = smcr_clnt_conf_first_link(smc);
1105 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
1109 mutex_unlock(&smc_client_lgr_pending);
1111 smc_copy_sock_settings_to_clc(smc);
1112 smc->connect_nonblock = 0;
1113 if (smc->sk.sk_state == SMC_INIT)
1114 smc->sk.sk_state = SMC_ACTIVE;
1118 smc_conn_abort(smc, ini->first_contact_local);
1119 mutex_unlock(&smc_client_lgr_pending);
1120 smc->connect_nonblock = 0;
1125 /* The server has chosen one of the proposed ISM devices for the communication.
1126 * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
1129 smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc,
1130 struct smc_init_info *ini)
1134 for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
1135 if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) {
1136 ini->ism_selected = i;
1144 /* setup for ISM connection of client */
1145 static int smc_connect_ism(struct smc_sock *smc,
1146 struct smc_clc_msg_accept_confirm *aclc,
1147 struct smc_init_info *ini)
1152 ini->is_smcd = true;
1153 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1155 if (aclc->hdr.version == SMC_V2) {
1156 struct smc_clc_msg_accept_confirm_v2 *aclc_v2 =
1157 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
1159 rc = smc_v2_determine_accepted_chid(aclc_v2, ini);
1163 ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
1165 /* there is only one lgr role for SMC-D; use server lock */
1166 mutex_lock(&smc_server_lgr_pending);
1167 rc = smc_conn_create(smc, ini);
1169 mutex_unlock(&smc_server_lgr_pending);
1173 /* Create send and receive buffers */
1174 rc = smc_buf_create(smc, true);
1176 rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
1180 smc_conn_save_peer_info(smc, aclc);
1181 smc_close_init(smc);
1185 if (aclc->hdr.version > SMC_V1) {
1186 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
1187 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
1189 eid = clc_v2->d1.eid;
1192 rc = smc_clc_send_confirm(smc, ini->first_contact_local,
1193 aclc->hdr.version, eid, NULL);
1196 mutex_unlock(&smc_server_lgr_pending);
1198 smc_copy_sock_settings_to_clc(smc);
1199 smc->connect_nonblock = 0;
1200 if (smc->sk.sk_state == SMC_INIT)
1201 smc->sk.sk_state = SMC_ACTIVE;
1205 smc_conn_abort(smc, ini->first_contact_local);
1206 mutex_unlock(&smc_server_lgr_pending);
1207 smc->connect_nonblock = 0;
1212 /* check if received accept type and version matches a proposed one */
1213 static int smc_connect_check_aclc(struct smc_init_info *ini,
1214 struct smc_clc_msg_accept_confirm *aclc)
1216 if (aclc->hdr.typev1 != SMC_TYPE_R &&
1217 aclc->hdr.typev1 != SMC_TYPE_D)
1218 return SMC_CLC_DECL_MODEUNSUPP;
1220 if (aclc->hdr.version >= SMC_V2) {
1221 if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1222 !smcr_indicated(ini->smc_type_v2)) ||
1223 (aclc->hdr.typev1 == SMC_TYPE_D &&
1224 !smcd_indicated(ini->smc_type_v2)))
1225 return SMC_CLC_DECL_MODEUNSUPP;
1227 if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1228 !smcr_indicated(ini->smc_type_v1)) ||
1229 (aclc->hdr.typev1 == SMC_TYPE_D &&
1230 !smcd_indicated(ini->smc_type_v1)))
1231 return SMC_CLC_DECL_MODEUNSUPP;
1237 /* perform steps before actually connecting */
1238 static int __smc_connect(struct smc_sock *smc)
1240 u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
1241 struct smc_clc_msg_accept_confirm_v2 *aclc2;
1242 struct smc_clc_msg_accept_confirm *aclc;
1243 struct smc_init_info *ini = NULL;
1247 if (smc->use_fallback)
1248 return smc_connect_fallback(smc, smc->fallback_rsn);
1250 /* if peer has not signalled SMC-capability, fall back */
1251 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
1252 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
1254 /* IPSec connections opt out of SMC optimizations */
1255 if (using_ipsec(smc))
1256 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
1259 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1261 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
1264 ini->smcd_version = SMC_V1 | SMC_V2;
1265 ini->smcr_version = SMC_V1 | SMC_V2;
1266 ini->smc_type_v1 = SMC_TYPE_B;
1267 ini->smc_type_v2 = SMC_TYPE_B;
1269 /* get vlan id from IP device */
1270 if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
1271 ini->smcd_version &= ~SMC_V1;
1272 ini->smcr_version = 0;
1273 ini->smc_type_v1 = SMC_TYPE_N;
1274 if (!ini->smcd_version) {
1275 rc = SMC_CLC_DECL_GETVLANERR;
1280 rc = smc_find_proposal_devices(smc, ini);
1284 buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
1286 rc = SMC_CLC_DECL_MEM;
1289 aclc2 = (struct smc_clc_msg_accept_confirm_v2 *)buf;
1290 aclc = (struct smc_clc_msg_accept_confirm *)aclc2;
1292 /* perform CLC handshake */
1293 rc = smc_connect_clc(smc, aclc2, ini);
1297 /* check if smc modes and versions of CLC proposal and accept match */
1298 rc = smc_connect_check_aclc(ini, aclc);
1299 version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
1303 /* depending on previous steps, connect using rdma or ism */
1304 if (aclc->hdr.typev1 == SMC_TYPE_R) {
1305 ini->smcr_version = version;
1306 rc = smc_connect_rdma(smc, aclc, ini);
1307 } else if (aclc->hdr.typev1 == SMC_TYPE_D) {
1308 ini->smcd_version = version;
1309 rc = smc_connect_ism(smc, aclc, ini);
1314 SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
1315 smc_connect_ism_vlan_cleanup(smc, ini);
1321 smc_connect_ism_vlan_cleanup(smc, ini);
1325 return smc_connect_decline_fallback(smc, rc, version);
1328 static void smc_connect_work(struct work_struct *work)
1330 struct smc_sock *smc = container_of(work, struct smc_sock,
1332 long timeo = smc->sk.sk_sndtimeo;
1336 timeo = MAX_SCHEDULE_TIMEOUT;
1337 lock_sock(smc->clcsock->sk);
1338 if (smc->clcsock->sk->sk_err) {
1339 smc->sk.sk_err = smc->clcsock->sk->sk_err;
1340 } else if ((1 << smc->clcsock->sk->sk_state) &
1341 (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1342 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1343 if ((rc == -EPIPE) &&
1344 ((1 << smc->clcsock->sk->sk_state) &
1345 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
1348 release_sock(smc->clcsock->sk);
1349 lock_sock(&smc->sk);
1350 if (rc != 0 || smc->sk.sk_err) {
1351 smc->sk.sk_state = SMC_CLOSED;
1352 if (rc == -EPIPE || rc == -EAGAIN)
1353 smc->sk.sk_err = EPIPE;
1354 else if (signal_pending(current))
1355 smc->sk.sk_err = -sock_intr_errno(timeo);
1356 sock_put(&smc->sk); /* passive closing */
1360 rc = __smc_connect(smc);
1362 smc->sk.sk_err = -rc;
1365 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1366 if (smc->sk.sk_err) {
1367 smc->sk.sk_state_change(&smc->sk);
1368 } else { /* allow polling before and after fallback decision */
1369 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1370 smc->sk.sk_write_space(&smc->sk);
1373 release_sock(&smc->sk);
1376 static int smc_connect(struct socket *sock, struct sockaddr *addr,
1377 int alen, int flags)
1379 struct sock *sk = sock->sk;
1380 struct smc_sock *smc;
1385 /* separate smc parameter checking to be safe */
1386 if (alen < sizeof(addr->sa_family))
1388 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
1392 switch (sk->sk_state) {
1402 smc_copy_sock_settings_to_clc(smc);
1403 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1404 if (smc->connect_nonblock) {
1408 rc = kernel_connect(smc->clcsock, addr, alen, flags);
1409 if (rc && rc != -EINPROGRESS)
1412 sock_hold(&smc->sk); /* sock put in passive closing */
1413 if (smc->use_fallback)
1415 if (flags & O_NONBLOCK) {
1416 if (queue_work(smc_hs_wq, &smc->connect_work))
1417 smc->connect_nonblock = 1;
1420 rc = __smc_connect(smc);
1424 rc = 0; /* success cases including fallback */
1433 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
1435 struct socket *new_clcsock = NULL;
1436 struct sock *lsk = &lsmc->sk;
1437 struct sock *new_sk;
1441 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
1444 lsk->sk_err = ENOMEM;
1449 *new_smc = smc_sk(new_sk);
1451 mutex_lock(&lsmc->clcsock_release_lock);
1453 rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
1454 mutex_unlock(&lsmc->clcsock_release_lock);
1456 if (rc < 0 && rc != -EAGAIN)
1458 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
1459 new_sk->sk_prot->unhash(new_sk);
1461 sock_release(new_clcsock);
1462 new_sk->sk_state = SMC_CLOSED;
1463 sock_set_flag(new_sk, SOCK_DEAD);
1464 sock_put(new_sk); /* final */
1469 /* new clcsock has inherited the smc listen-specific sk_data_ready
1470 * function; switch it back to the original sk_data_ready function
1472 new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
1473 (*new_smc)->clcsock = new_clcsock;
1478 /* add a just created sock to the accept queue of the listen sock as
1479 * candidate for a following socket accept call from user space
1481 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
1483 struct smc_sock *par = smc_sk(parent);
1485 sock_hold(sk); /* sock_put in smc_accept_unlink () */
1486 spin_lock(&par->accept_q_lock);
1487 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
1488 spin_unlock(&par->accept_q_lock);
1489 sk_acceptq_added(parent);
1492 /* remove a socket from the accept queue of its parental listening socket */
1493 static void smc_accept_unlink(struct sock *sk)
1495 struct smc_sock *par = smc_sk(sk)->listen_smc;
1497 spin_lock(&par->accept_q_lock);
1498 list_del_init(&smc_sk(sk)->accept_q);
1499 spin_unlock(&par->accept_q_lock);
1500 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
1501 sock_put(sk); /* sock_hold in smc_accept_enqueue */
1504 /* remove a sock from the accept queue to bind it to a new socket created
1505 * for a socket accept call from user space
1507 struct sock *smc_accept_dequeue(struct sock *parent,
1508 struct socket *new_sock)
1510 struct smc_sock *isk, *n;
1511 struct sock *new_sk;
1513 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
1514 new_sk = (struct sock *)isk;
1516 smc_accept_unlink(new_sk);
1517 if (new_sk->sk_state == SMC_CLOSED) {
1518 new_sk->sk_prot->unhash(new_sk);
1520 sock_release(isk->clcsock);
1521 isk->clcsock = NULL;
1523 sock_put(new_sk); /* final */
1527 sock_graft(new_sk, new_sock);
1528 if (isk->use_fallback) {
1529 smc_sk(new_sk)->clcsock->file = new_sock->file;
1530 isk->clcsock->file->private_data = isk->clcsock;
1538 /* clean up for a created but never accepted sock */
1539 void smc_close_non_accepted(struct sock *sk)
1541 struct smc_sock *smc = smc_sk(sk);
1543 sock_hold(sk); /* sock_put below */
1545 if (!sk->sk_lingertime)
1546 /* wait for peer closing */
1547 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
1550 sock_put(sk); /* sock_hold above */
1551 sock_put(sk); /* final sock_put */
1554 static int smcr_serv_conf_first_link(struct smc_sock *smc)
1556 struct smc_link *link = smc->conn.lnk;
1557 struct smc_llc_qentry *qentry;
1560 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
1561 return SMC_CLC_DECL_ERR_REGRMB;
1563 /* send CONFIRM LINK request to client over the RoCE fabric */
1564 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1566 return SMC_CLC_DECL_TIMEOUT_CL;
1568 /* receive CONFIRM LINK response from client over the RoCE fabric */
1569 qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
1570 SMC_LLC_CONFIRM_LINK);
1572 struct smc_clc_msg_decline dclc;
1574 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1575 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1576 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1578 smc_llc_save_peer_uid(qentry);
1579 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
1580 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
1582 return SMC_CLC_DECL_RMBE_EC;
1584 /* confirm_rkey is implicit on 1st contact */
1585 smc->conn.rmb_desc->is_conf_rkey = true;
1587 smc_llc_link_active(link);
1588 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
1590 /* initial contact - try to establish second link */
1591 smc_llc_srv_add_link(link, NULL);
1595 /* listen worker: finish */
1596 static void smc_listen_out(struct smc_sock *new_smc)
1598 struct smc_sock *lsmc = new_smc->listen_smc;
1599 struct sock *newsmcsk = &new_smc->sk;
1601 if (lsmc->sk.sk_state == SMC_LISTEN) {
1602 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1603 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1604 release_sock(&lsmc->sk);
1605 } else { /* no longer listening */
1606 smc_close_non_accepted(newsmcsk);
1609 /* Wake up accept */
1610 lsmc->sk.sk_data_ready(&lsmc->sk);
1611 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1614 /* listen worker: finish in state connected */
1615 static void smc_listen_out_connected(struct smc_sock *new_smc)
1617 struct sock *newsmcsk = &new_smc->sk;
1619 sk_refcnt_debug_inc(newsmcsk);
1620 if (newsmcsk->sk_state == SMC_INIT)
1621 newsmcsk->sk_state = SMC_ACTIVE;
1623 smc_listen_out(new_smc);
1626 /* listen worker: finish in error state */
1627 static void smc_listen_out_err(struct smc_sock *new_smc)
1629 struct sock *newsmcsk = &new_smc->sk;
1630 struct net *net = sock_net(newsmcsk);
1632 this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
1633 if (newsmcsk->sk_state == SMC_INIT)
1634 sock_put(&new_smc->sk); /* passive closing */
1635 newsmcsk->sk_state = SMC_CLOSED;
1637 smc_listen_out(new_smc);
1640 /* listen worker: decline and fall back if possible */
1641 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1642 int local_first, u8 version)
1644 /* RDMA setup failed, switch back to TCP */
1645 smc_conn_abort(new_smc, local_first);
1646 if (reason_code < 0 ||
1647 smc_switch_to_fallback(new_smc, reason_code)) {
1648 /* error, no fallback possible */
1649 smc_listen_out_err(new_smc);
1652 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1653 if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
1654 smc_listen_out_err(new_smc);
1658 smc_listen_out_connected(new_smc);
1661 /* listen worker: version checking */
1662 static int smc_listen_v2_check(struct smc_sock *new_smc,
1663 struct smc_clc_msg_proposal *pclc,
1664 struct smc_init_info *ini)
1666 struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
1667 struct smc_clc_v2_extension *pclc_v2_ext;
1668 int rc = SMC_CLC_DECL_PEERNOSMC;
1670 ini->smc_type_v1 = pclc->hdr.typev1;
1671 ini->smc_type_v2 = pclc->hdr.typev2;
1672 ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
1673 ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
1674 if (pclc->hdr.version > SMC_V1) {
1675 if (smcd_indicated(ini->smc_type_v2))
1676 ini->smcd_version |= SMC_V2;
1677 if (smcr_indicated(ini->smc_type_v2))
1678 ini->smcr_version |= SMC_V2;
1680 if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) {
1681 rc = SMC_CLC_DECL_PEERNOSMC;
1684 pclc_v2_ext = smc_get_clc_v2_ext(pclc);
1686 ini->smcd_version &= ~SMC_V2;
1687 ini->smcr_version &= ~SMC_V2;
1688 rc = SMC_CLC_DECL_NOV2EXT;
1691 pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
1692 if (ini->smcd_version & SMC_V2) {
1693 if (!smc_ism_is_v2_capable()) {
1694 ini->smcd_version &= ~SMC_V2;
1695 rc = SMC_CLC_DECL_NOISM2SUPP;
1696 } else if (!pclc_smcd_v2_ext) {
1697 ini->smcd_version &= ~SMC_V2;
1698 rc = SMC_CLC_DECL_NOV2DEXT;
1699 } else if (!pclc_v2_ext->hdr.eid_cnt &&
1700 !pclc_v2_ext->hdr.flag.seid) {
1701 ini->smcd_version &= ~SMC_V2;
1702 rc = SMC_CLC_DECL_NOUEID;
1705 if (ini->smcr_version & SMC_V2) {
1706 if (!pclc_v2_ext->hdr.eid_cnt) {
1707 ini->smcr_version &= ~SMC_V2;
1708 rc = SMC_CLC_DECL_NOUEID;
1713 if (!ini->smcd_version && !ini->smcr_version)
1719 /* listen worker: check prefixes */
1720 static int smc_listen_prfx_check(struct smc_sock *new_smc,
1721 struct smc_clc_msg_proposal *pclc)
1723 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1724 struct socket *newclcsock = new_smc->clcsock;
1726 if (pclc->hdr.typev1 == SMC_TYPE_N)
1728 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1729 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1730 return SMC_CLC_DECL_DIFFPREFIX;
1735 /* listen worker: initialize connection and buffers */
1736 static int smc_listen_rdma_init(struct smc_sock *new_smc,
1737 struct smc_init_info *ini)
1741 /* allocate connection / link group */
1742 rc = smc_conn_create(new_smc, ini);
1746 /* create send buffer and rmb */
1747 if (smc_buf_create(new_smc, false))
1748 return SMC_CLC_DECL_MEM;
1753 /* listen worker: initialize connection and buffers for SMC-D */
1754 static int smc_listen_ism_init(struct smc_sock *new_smc,
1755 struct smc_init_info *ini)
1759 rc = smc_conn_create(new_smc, ini);
1763 /* Create send and receive buffers */
1764 rc = smc_buf_create(new_smc, true);
1766 smc_conn_abort(new_smc, ini->first_contact_local);
1767 return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
1774 static bool smc_is_already_selected(struct smcd_dev *smcd,
1775 struct smc_init_info *ini,
1780 for (i = 0; i < matches; i++)
1781 if (smcd == ini->ism_dev[i])
1787 /* check for ISM devices matching proposed ISM devices */
1788 static void smc_check_ism_v2_match(struct smc_init_info *ini,
1789 u16 proposed_chid, u64 proposed_gid,
1790 unsigned int *matches)
1792 struct smcd_dev *smcd;
1794 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1795 if (smcd->going_away)
1797 if (smc_is_already_selected(smcd, ini, *matches))
1799 if (smc_ism_get_chid(smcd) == proposed_chid &&
1800 !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
1801 ini->ism_peer_gid[*matches] = proposed_gid;
1802 ini->ism_dev[*matches] = smcd;
1809 static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini)
1815 static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
1816 struct smc_clc_msg_proposal *pclc,
1817 struct smc_init_info *ini)
1819 struct smc_clc_smcd_v2_extension *smcd_v2_ext;
1820 struct smc_clc_v2_extension *smc_v2_ext;
1821 struct smc_clc_msg_smcd *pclc_smcd;
1822 unsigned int matches = 0;
1827 if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
1830 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1831 smc_v2_ext = smc_get_clc_v2_ext(pclc);
1832 smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
1834 mutex_lock(&smcd_dev_list.mutex);
1835 if (pclc_smcd->ism.chid)
1836 /* check for ISM device matching proposed native ISM device */
1837 smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
1838 ntohll(pclc_smcd->ism.gid), &matches);
1839 for (i = 1; i <= smc_v2_ext->hdr.ism_gid_cnt; i++) {
1840 /* check for ISM devices matching proposed non-native ISM
1843 smc_check_ism_v2_match(ini,
1844 ntohs(smcd_v2_ext->gidchid[i - 1].chid),
1845 ntohll(smcd_v2_ext->gidchid[i - 1].gid),
1848 mutex_unlock(&smcd_dev_list.mutex);
1850 if (!ini->ism_dev[0]) {
1851 smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini);
1855 smc_ism_get_system_eid(&eid);
1856 if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext,
1857 smcd_v2_ext->system_eid, eid))
1860 /* separate - outside the smcd_dev_list.lock */
1861 smcd_version = ini->smcd_version;
1862 for (i = 0; i < matches; i++) {
1863 ini->smcd_version = SMC_V2;
1864 ini->is_smcd = true;
1865 ini->ism_selected = i;
1866 rc = smc_listen_ism_init(new_smc, ini);
1868 smc_find_ism_store_rc(rc, ini);
1869 /* try next active ISM device */
1872 return; /* matching and usable V2 ISM device found */
1874 /* no V2 ISM device could be initialized */
1875 ini->smcd_version = smcd_version; /* restore original value */
1876 ini->negotiated_eid[0] = 0;
1879 ini->smcd_version &= ~SMC_V2;
1880 ini->ism_dev[0] = NULL;
1881 ini->is_smcd = false;
1884 static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
1885 struct smc_clc_msg_proposal *pclc,
1886 struct smc_init_info *ini)
1888 struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
1891 /* check if ISM V1 is available */
1892 if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
1894 ini->is_smcd = true; /* prepare ISM check */
1895 ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid);
1896 rc = smc_find_ism_device(new_smc, ini);
1899 ini->ism_selected = 0;
1900 rc = smc_listen_ism_init(new_smc, ini);
1902 return; /* V1 ISM device found */
1905 smc_find_ism_store_rc(rc, ini);
1906 ini->smcd_version &= ~SMC_V1;
1907 ini->ism_dev[0] = NULL;
1908 ini->is_smcd = false;
1911 /* listen worker: register buffers */
1912 static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
1914 struct smc_connection *conn = &new_smc->conn;
1917 if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
1918 return SMC_CLC_DECL_ERR_REGRMB;
1920 smc_rmb_sync_sg_for_device(&new_smc->conn);
1925 static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
1926 struct smc_clc_msg_proposal *pclc,
1927 struct smc_init_info *ini)
1929 struct smc_clc_v2_extension *smc_v2_ext;
1933 if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2))
1936 smc_v2_ext = smc_get_clc_v2_ext(pclc);
1937 if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
1940 /* prepare RDMA check */
1941 memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
1942 memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE);
1943 memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
1944 ini->check_smcrv2 = true;
1945 ini->smcrv2.clc_sk = new_smc->clcsock->sk;
1946 ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr;
1947 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce);
1948 rc = smc_find_rdma_device(new_smc, ini);
1950 smc_find_ism_store_rc(rc, ini);
1953 if (!ini->smcrv2.uses_gateway)
1954 memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN);
1956 smcr_version = ini->smcr_version;
1957 ini->smcr_version = SMC_V2;
1958 rc = smc_listen_rdma_init(new_smc, ini);
1960 rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
1963 ini->smcr_version = smcr_version;
1964 smc_find_ism_store_rc(rc, ini);
1967 ini->smcr_version &= ~SMC_V2;
1968 ini->check_smcrv2 = false;
1971 static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
1972 struct smc_clc_msg_proposal *pclc,
1973 struct smc_init_info *ini)
1977 if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1))
1978 return SMC_CLC_DECL_NOSMCDEV;
1980 /* prepare RDMA check */
1981 memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
1982 memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE);
1983 memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
1984 rc = smc_find_rdma_device(new_smc, ini);
1986 /* no RDMA device found */
1987 return SMC_CLC_DECL_NOSMCDEV;
1989 rc = smc_listen_rdma_init(new_smc, ini);
1992 return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
1995 /* determine the local device matching to proposal */
1996 static int smc_listen_find_device(struct smc_sock *new_smc,
1997 struct smc_clc_msg_proposal *pclc,
1998 struct smc_init_info *ini)
2002 /* check for ISM device matching V2 proposed device */
2003 smc_find_ism_v2_device_serv(new_smc, pclc, ini);
2004 if (ini->ism_dev[0])
2007 /* check for matching IP prefix and subnet length (V1) */
2008 prfx_rc = smc_listen_prfx_check(new_smc, pclc);
2010 smc_find_ism_store_rc(prfx_rc, ini);
2012 /* get vlan id from IP device */
2013 if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
2014 return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
2016 /* check for ISM device matching V1 proposed device */
2018 smc_find_ism_v1_device_serv(new_smc, pclc, ini);
2019 if (ini->ism_dev[0])
2022 if (!smcr_indicated(pclc->hdr.typev1) &&
2023 !smcr_indicated(pclc->hdr.typev2))
2024 /* skip RDMA and decline */
2025 return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
2027 /* check if RDMA V2 is available */
2028 smc_find_rdma_v2_device_serv(new_smc, pclc, ini);
2029 if (ini->smcrv2.ib_dev_v2)
2032 /* check if RDMA V1 is available */
2036 rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
2037 smc_find_ism_store_rc(rc, ini);
2038 return (!rc) ? 0 : ini->rc;
2040 return SMC_CLC_DECL_NOSMCDEV;
2043 /* listen worker: finish RDMA setup */
2044 static int smc_listen_rdma_finish(struct smc_sock *new_smc,
2045 struct smc_clc_msg_accept_confirm *cclc,
2047 struct smc_init_info *ini)
2049 struct smc_link *link = new_smc->conn.lnk;
2050 int reason_code = 0;
2053 smc_link_save_peer_info(link, cclc, ini);
2055 if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
2056 return SMC_CLC_DECL_ERR_RTOK;
2059 if (smc_ib_ready_link(link))
2060 return SMC_CLC_DECL_ERR_RDYLNK;
2061 /* QP confirmation over RoCE fabric */
2062 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
2063 reason_code = smcr_serv_conf_first_link(new_smc);
2064 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
2069 /* setup for connection of server */
2070 static void smc_listen_work(struct work_struct *work)
2072 struct smc_sock *new_smc = container_of(work, struct smc_sock,
2074 struct socket *newclcsock = new_smc->clcsock;
2075 struct smc_clc_msg_accept_confirm *cclc;
2076 struct smc_clc_msg_proposal_area *buf;
2077 struct smc_clc_msg_proposal *pclc;
2078 struct smc_init_info *ini = NULL;
2079 u8 proposal_version = SMC_V1;
2083 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
2084 return smc_listen_out_err(new_smc);
2086 if (new_smc->use_fallback) {
2087 smc_listen_out_connected(new_smc);
2091 /* check if peer is smc capable */
2092 if (!tcp_sk(newclcsock->sk)->syn_smc) {
2093 rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
2095 smc_listen_out_err(new_smc);
2097 smc_listen_out_connected(new_smc);
2101 /* do inband token exchange -
2102 * wait for and receive SMC Proposal CLC message
2104 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
2106 rc = SMC_CLC_DECL_MEM;
2109 pclc = (struct smc_clc_msg_proposal *)buf;
2110 rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
2111 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
2115 if (pclc->hdr.version > SMC_V1)
2116 proposal_version = SMC_V2;
2118 /* IPSec connections opt out of SMC optimizations */
2119 if (using_ipsec(new_smc)) {
2120 rc = SMC_CLC_DECL_IPSEC;
2124 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
2126 rc = SMC_CLC_DECL_MEM;
2130 /* initial version checking */
2131 rc = smc_listen_v2_check(new_smc, pclc, ini);
2135 mutex_lock(&smc_server_lgr_pending);
2136 smc_close_init(new_smc);
2137 smc_rx_init(new_smc);
2138 smc_tx_init(new_smc);
2140 /* determine ISM or RoCE device used for connection */
2141 rc = smc_listen_find_device(new_smc, pclc, ini);
2145 /* send SMC Accept CLC message */
2146 accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version;
2147 rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
2148 accept_version, ini->negotiated_eid);
2152 /* SMC-D does not need this lock any more */
2154 mutex_unlock(&smc_server_lgr_pending);
2156 /* receive SMC Confirm CLC message */
2157 memset(buf, 0, sizeof(*buf));
2158 cclc = (struct smc_clc_msg_accept_confirm *)buf;
2159 rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
2160 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
2168 if (!ini->is_smcd) {
2169 rc = smc_listen_rdma_finish(new_smc, cclc,
2170 ini->first_contact_local, ini);
2173 mutex_unlock(&smc_server_lgr_pending);
2175 smc_conn_save_peer_info(new_smc, cclc);
2176 smc_listen_out_connected(new_smc);
2177 SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
2181 mutex_unlock(&smc_server_lgr_pending);
2183 smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
2190 static void smc_tcp_listen_work(struct work_struct *work)
2192 struct smc_sock *lsmc = container_of(work, struct smc_sock,
2194 struct sock *lsk = &lsmc->sk;
2195 struct smc_sock *new_smc;
2199 while (lsk->sk_state == SMC_LISTEN) {
2200 rc = smc_clcsock_accept(lsmc, &new_smc);
2201 if (rc) /* clcsock accept queue empty or error */
2206 new_smc->listen_smc = lsmc;
2207 new_smc->use_fallback = lsmc->use_fallback;
2208 new_smc->fallback_rsn = lsmc->fallback_rsn;
2209 sock_hold(lsk); /* sock_put in smc_listen_work */
2210 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
2211 smc_copy_sock_settings_to_smc(new_smc);
2212 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
2213 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
2214 sock_hold(&new_smc->sk); /* sock_put in passive closing */
2215 if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
2216 sock_put(&new_smc->sk);
2221 sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
2224 static void smc_clcsock_data_ready(struct sock *listen_clcsock)
2226 struct smc_sock *lsmc =
2227 smc_clcsock_user_data(listen_clcsock);
2231 lsmc->clcsk_data_ready(listen_clcsock);
2232 if (lsmc->sk.sk_state == SMC_LISTEN) {
2233 sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
2234 if (!queue_work(smc_hs_wq, &lsmc->tcp_listen_work))
2235 sock_put(&lsmc->sk);
2239 static int smc_listen(struct socket *sock, int backlog)
2241 struct sock *sk = sock->sk;
2242 struct smc_sock *smc;
2249 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
2250 smc->connect_nonblock)
2254 if (sk->sk_state == SMC_LISTEN) {
2255 sk->sk_max_ack_backlog = backlog;
2258 /* some socket options are handled in core, so we could not apply
2259 * them to the clc socket -- copy smc socket options to clc socket
2261 smc_copy_sock_settings_to_clc(smc);
2262 if (!smc->use_fallback)
2263 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
2265 /* save original sk_data_ready function and establish
2266 * smc-specific sk_data_ready function
2268 smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
2269 smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
2270 smc->clcsock->sk->sk_user_data =
2271 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
2272 rc = kernel_listen(smc->clcsock, backlog);
2274 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
2277 sk->sk_max_ack_backlog = backlog;
2278 sk->sk_ack_backlog = 0;
2279 sk->sk_state = SMC_LISTEN;
2286 static int smc_accept(struct socket *sock, struct socket *new_sock,
2287 int flags, bool kern)
2289 struct sock *sk = sock->sk, *nsk;
2290 DECLARE_WAITQUEUE(wait, current);
2291 struct smc_sock *lsmc;
2296 sock_hold(sk); /* sock_put below */
2299 if (lsmc->sk.sk_state != SMC_LISTEN) {
2305 /* Wait for an incoming connection */
2306 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2307 add_wait_queue_exclusive(sk_sleep(sk), &wait);
2308 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
2309 set_current_state(TASK_INTERRUPTIBLE);
2315 timeo = schedule_timeout(timeo);
2316 /* wakeup by sk_data_ready in smc_listen_work() */
2317 sched_annotate_sleep();
2319 if (signal_pending(current)) {
2320 rc = sock_intr_errno(timeo);
2324 set_current_state(TASK_RUNNING);
2325 remove_wait_queue(sk_sleep(sk), &wait);
2328 rc = sock_error(nsk);
2333 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
2334 /* wait till data arrives on the socket */
2335 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
2337 if (smc_sk(nsk)->use_fallback) {
2338 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
2341 if (skb_queue_empty(&clcsk->sk_receive_queue))
2342 sk_wait_data(clcsk, &timeo, NULL);
2343 release_sock(clcsk);
2344 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
2346 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
2352 sock_put(sk); /* sock_hold above */
2356 static int smc_getname(struct socket *sock, struct sockaddr *addr,
2359 struct smc_sock *smc;
2361 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
2362 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
2365 smc = smc_sk(sock->sk);
2367 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
2370 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2372 struct sock *sk = sock->sk;
2373 struct smc_sock *smc;
2378 if ((sk->sk_state != SMC_ACTIVE) &&
2379 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2380 (sk->sk_state != SMC_INIT))
2383 if (msg->msg_flags & MSG_FASTOPEN) {
2384 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2385 rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2394 if (smc->use_fallback) {
2395 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2397 rc = smc_tx_sendmsg(smc, msg, len);
2398 SMC_STAT_TX_PAYLOAD(smc, len, rc);
2405 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2408 struct sock *sk = sock->sk;
2409 struct smc_sock *smc;
2414 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2415 /* socket was connected before, no more data to read */
2419 if ((sk->sk_state == SMC_INIT) ||
2420 (sk->sk_state == SMC_LISTEN) ||
2421 (sk->sk_state == SMC_CLOSED))
2424 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2429 if (smc->use_fallback) {
2430 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2432 msg->msg_namelen = 0;
2433 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2434 SMC_STAT_RX_PAYLOAD(smc, rc, rc);
2442 static __poll_t smc_accept_poll(struct sock *parent)
2444 struct smc_sock *isk = smc_sk(parent);
2447 spin_lock(&isk->accept_q_lock);
2448 if (!list_empty(&isk->accept_q))
2449 mask = EPOLLIN | EPOLLRDNORM;
2450 spin_unlock(&isk->accept_q_lock);
2455 static __poll_t smc_poll(struct file *file, struct socket *sock,
2458 struct sock *sk = sock->sk;
2459 struct smc_sock *smc;
2465 smc = smc_sk(sock->sk);
2466 if (smc->use_fallback) {
2467 /* delegate to CLC child sock */
2468 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2469 sk->sk_err = smc->clcsock->sk->sk_err;
2471 if (sk->sk_state != SMC_CLOSED)
2472 sock_poll_wait(file, sock, wait);
2475 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
2476 (sk->sk_state == SMC_CLOSED))
2478 if (sk->sk_state == SMC_LISTEN) {
2479 /* woken up by sk_data_ready in smc_listen_work() */
2480 mask |= smc_accept_poll(sk);
2481 } else if (smc->use_fallback) { /* as result of connect_work()*/
2482 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2484 sk->sk_err = smc->clcsock->sk->sk_err;
2486 if ((sk->sk_state != SMC_INIT &&
2487 atomic_read(&smc->conn.sndbuf_space)) ||
2488 sk->sk_shutdown & SEND_SHUTDOWN) {
2489 mask |= EPOLLOUT | EPOLLWRNORM;
2491 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2492 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2494 if (atomic_read(&smc->conn.bytes_to_rcv))
2495 mask |= EPOLLIN | EPOLLRDNORM;
2496 if (sk->sk_shutdown & RCV_SHUTDOWN)
2497 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2498 if (sk->sk_state == SMC_APPCLOSEWAIT1)
2500 if (smc->conn.urg_state == SMC_URG_VALID)
2508 static int smc_shutdown(struct socket *sock, int how)
2510 struct sock *sk = sock->sk;
2511 bool do_shutdown = true;
2512 struct smc_sock *smc;
2519 if ((how < SHUT_RD) || (how > SHUT_RDWR))
2525 if ((sk->sk_state != SMC_ACTIVE) &&
2526 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
2527 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
2528 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2529 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
2530 (sk->sk_state != SMC_APPFINCLOSEWAIT))
2532 if (smc->use_fallback) {
2533 rc = kernel_sock_shutdown(smc->clcsock, how);
2534 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2535 if (sk->sk_shutdown == SHUTDOWN_MASK)
2536 sk->sk_state = SMC_CLOSED;
2540 case SHUT_RDWR: /* shutdown in both directions */
2541 old_state = sk->sk_state;
2542 rc = smc_close_active(smc);
2543 if (old_state == SMC_ACTIVE &&
2544 sk->sk_state == SMC_PEERCLOSEWAIT1)
2545 do_shutdown = false;
2548 rc = smc_close_shutdown_write(smc);
2552 /* nothing more to do because peer is not involved */
2555 if (do_shutdown && smc->clcsock)
2556 rc1 = kernel_sock_shutdown(smc->clcsock, how);
2557 /* map sock_shutdown_cmd constants to sk_shutdown value range */
2558 sk->sk_shutdown |= how + 1;
2562 return rc ? rc : rc1;
2565 static int smc_setsockopt(struct socket *sock, int level, int optname,
2566 sockptr_t optval, unsigned int optlen)
2568 struct sock *sk = sock->sk;
2569 struct smc_sock *smc;
2572 if (level == SOL_TCP && optname == TCP_ULP)
2577 /* generic setsockopts reaching us here always apply to the
2580 mutex_lock(&smc->clcsock_release_lock);
2581 if (!smc->clcsock) {
2582 mutex_unlock(&smc->clcsock_release_lock);
2585 if (unlikely(!smc->clcsock->ops->setsockopt))
2588 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
2590 if (smc->clcsock->sk->sk_err) {
2591 sk->sk_err = smc->clcsock->sk->sk_err;
2592 sk_error_report(sk);
2594 mutex_unlock(&smc->clcsock_release_lock);
2596 if (optlen < sizeof(int))
2598 if (copy_from_sockptr(&val, optval, sizeof(int)))
2602 if (rc || smc->use_fallback)
2606 case TCP_FASTOPEN_CONNECT:
2607 case TCP_FASTOPEN_KEY:
2608 case TCP_FASTOPEN_NO_COOKIE:
2609 /* option not supported by SMC */
2610 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2611 rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2617 if (sk->sk_state != SMC_INIT &&
2618 sk->sk_state != SMC_LISTEN &&
2619 sk->sk_state != SMC_CLOSED) {
2621 SMC_STAT_INC(smc, ndly_cnt);
2622 mod_delayed_work(smc->conn.lgr->tx_wq,
2623 &smc->conn.tx_work, 0);
2628 if (sk->sk_state != SMC_INIT &&
2629 sk->sk_state != SMC_LISTEN &&
2630 sk->sk_state != SMC_CLOSED) {
2632 SMC_STAT_INC(smc, cork_cnt);
2633 mod_delayed_work(smc->conn.lgr->tx_wq,
2634 &smc->conn.tx_work, 0);
2638 case TCP_DEFER_ACCEPT:
2639 smc->sockopt_defer_accept = val;
2650 static int smc_getsockopt(struct socket *sock, int level, int optname,
2651 char __user *optval, int __user *optlen)
2653 struct smc_sock *smc;
2656 smc = smc_sk(sock->sk);
2657 mutex_lock(&smc->clcsock_release_lock);
2658 if (!smc->clcsock) {
2659 mutex_unlock(&smc->clcsock_release_lock);
2662 /* socket options apply to the CLC socket */
2663 if (unlikely(!smc->clcsock->ops->getsockopt)) {
2664 mutex_unlock(&smc->clcsock_release_lock);
2667 rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
2669 mutex_unlock(&smc->clcsock_release_lock);
2673 static int smc_ioctl(struct socket *sock, unsigned int cmd,
2676 union smc_host_cursor cons, urg;
2677 struct smc_connection *conn;
2678 struct smc_sock *smc;
2681 smc = smc_sk(sock->sk);
2683 lock_sock(&smc->sk);
2684 if (smc->use_fallback) {
2685 if (!smc->clcsock) {
2686 release_sock(&smc->sk);
2689 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
2690 release_sock(&smc->sk);
2694 case SIOCINQ: /* same as FIONREAD */
2695 if (smc->sk.sk_state == SMC_LISTEN) {
2696 release_sock(&smc->sk);
2699 if (smc->sk.sk_state == SMC_INIT ||
2700 smc->sk.sk_state == SMC_CLOSED)
2703 answ = atomic_read(&smc->conn.bytes_to_rcv);
2706 /* output queue size (not send + not acked) */
2707 if (smc->sk.sk_state == SMC_LISTEN) {
2708 release_sock(&smc->sk);
2711 if (smc->sk.sk_state == SMC_INIT ||
2712 smc->sk.sk_state == SMC_CLOSED)
2715 answ = smc->conn.sndbuf_desc->len -
2716 atomic_read(&smc->conn.sndbuf_space);
2719 /* output queue size (not send only) */
2720 if (smc->sk.sk_state == SMC_LISTEN) {
2721 release_sock(&smc->sk);
2724 if (smc->sk.sk_state == SMC_INIT ||
2725 smc->sk.sk_state == SMC_CLOSED)
2728 answ = smc_tx_prepared_sends(&smc->conn);
2731 if (smc->sk.sk_state == SMC_LISTEN) {
2732 release_sock(&smc->sk);
2735 if (smc->sk.sk_state == SMC_INIT ||
2736 smc->sk.sk_state == SMC_CLOSED) {
2739 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
2740 smc_curs_copy(&urg, &conn->urg_curs, conn);
2741 answ = smc_curs_diff(conn->rmb_desc->len,
2746 release_sock(&smc->sk);
2747 return -ENOIOCTLCMD;
2749 release_sock(&smc->sk);
2751 return put_user(answ, (int __user *)arg);
2754 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
2755 int offset, size_t size, int flags)
2757 struct sock *sk = sock->sk;
2758 struct smc_sock *smc;
2763 if (sk->sk_state != SMC_ACTIVE) {
2768 if (smc->use_fallback) {
2769 rc = kernel_sendpage(smc->clcsock, page, offset,
2772 SMC_STAT_INC(smc, sendpage_cnt);
2773 rc = sock_no_sendpage(sock, page, offset, size, flags);
2780 /* Map the affected portions of the rmbe into an spd, note the number of bytes
2781 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
2782 * updates till whenever a respective page has been fully processed.
2783 * Note that subsequent recv() calls have to wait till all splice() processing
2786 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
2787 struct pipe_inode_info *pipe, size_t len,
2790 struct sock *sk = sock->sk;
2791 struct smc_sock *smc;
2796 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2797 /* socket was connected before, no more data to read */
2801 if (sk->sk_state == SMC_INIT ||
2802 sk->sk_state == SMC_LISTEN ||
2803 sk->sk_state == SMC_CLOSED)
2806 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2811 if (smc->use_fallback) {
2812 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
2819 if (flags & SPLICE_F_NONBLOCK)
2820 flags = MSG_DONTWAIT;
2823 SMC_STAT_INC(smc, splice_cnt);
2824 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
2832 /* must look like tcp */
2833 static const struct proto_ops smc_sock_ops = {
2835 .owner = THIS_MODULE,
2836 .release = smc_release,
2838 .connect = smc_connect,
2839 .socketpair = sock_no_socketpair,
2840 .accept = smc_accept,
2841 .getname = smc_getname,
2844 .listen = smc_listen,
2845 .shutdown = smc_shutdown,
2846 .setsockopt = smc_setsockopt,
2847 .getsockopt = smc_getsockopt,
2848 .sendmsg = smc_sendmsg,
2849 .recvmsg = smc_recvmsg,
2850 .mmap = sock_no_mmap,
2851 .sendpage = smc_sendpage,
2852 .splice_read = smc_splice_read,
2855 static int __smc_create(struct net *net, struct socket *sock, int protocol,
2856 int kern, struct socket *clcsock)
2858 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
2859 struct smc_sock *smc;
2863 rc = -ESOCKTNOSUPPORT;
2864 if (sock->type != SOCK_STREAM)
2867 rc = -EPROTONOSUPPORT;
2868 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
2872 sock->ops = &smc_sock_ops;
2873 sk = smc_sock_alloc(net, sock, protocol);
2877 /* create internal TCP socket for CLC handshake and fallback */
2879 smc->use_fallback = false; /* assume rdma capability first */
2880 smc->fallback_rsn = 0;
2884 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
2887 sk_common_release(sk);
2891 smc->clcsock = clcsock;
2894 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
2895 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
2901 static int smc_create(struct net *net, struct socket *sock, int protocol,
2904 return __smc_create(net, sock, protocol, kern, NULL);
2907 static const struct net_proto_family smc_sock_family_ops = {
2909 .owner = THIS_MODULE,
2910 .create = smc_create,
2913 static int smc_ulp_init(struct sock *sk)
2915 struct socket *tcp = sk->sk_socket;
2916 struct net *net = sock_net(sk);
2917 struct socket *smcsock;
2920 /* only TCP can be replaced */
2921 if (tcp->type != SOCK_STREAM || sk->sk_protocol != IPPROTO_TCP ||
2922 (sk->sk_family != AF_INET && sk->sk_family != AF_INET6))
2923 return -ESOCKTNOSUPPORT;
2924 /* don't handle wq now */
2925 if (tcp->state != SS_UNCONNECTED || !tcp->file || tcp->wq.fasync_list)
2928 if (sk->sk_family == AF_INET)
2929 protocol = SMCPROTO_SMC;
2931 protocol = SMCPROTO_SMC6;
2933 smcsock = sock_alloc();
2937 smcsock->type = SOCK_STREAM;
2938 __module_get(THIS_MODULE); /* tried in __tcp_ulp_find_autoload */
2939 ret = __smc_create(net, smcsock, protocol, 1, tcp);
2941 sock_release(smcsock); /* module_put() which ops won't be NULL */
2945 /* replace tcp socket to smc */
2946 smcsock->file = tcp->file;
2947 smcsock->file->private_data = smcsock;
2948 smcsock->file->f_inode = SOCK_INODE(smcsock); /* replace inode when sock_close */
2949 smcsock->file->f_path.dentry->d_inode = SOCK_INODE(smcsock); /* dput() in __fput */
2955 static void smc_ulp_clone(const struct request_sock *req, struct sock *newsk,
2956 const gfp_t priority)
2958 struct inet_connection_sock *icsk = inet_csk(newsk);
2960 /* don't inherit ulp ops to child when listen */
2961 icsk->icsk_ulp_ops = NULL;
2964 static struct tcp_ulp_ops smc_ulp_ops __read_mostly = {
2966 .owner = THIS_MODULE,
2967 .init = smc_ulp_init,
2968 .clone = smc_ulp_clone,
2971 unsigned int smc_net_id;
2973 static __net_init int smc_net_init(struct net *net)
2975 return smc_pnet_net_init(net);
2978 static void __net_exit smc_net_exit(struct net *net)
2980 smc_pnet_net_exit(net);
2983 static __net_init int smc_net_stat_init(struct net *net)
2985 return smc_stats_init(net);
2988 static void __net_exit smc_net_stat_exit(struct net *net)
2990 smc_stats_exit(net);
2993 static struct pernet_operations smc_net_ops = {
2994 .init = smc_net_init,
2995 .exit = smc_net_exit,
2997 .size = sizeof(struct smc_net),
3000 static struct pernet_operations smc_net_stat_ops = {
3001 .init = smc_net_stat_init,
3002 .exit = smc_net_stat_exit,
3005 static int __init smc_init(void)
3009 rc = register_pernet_subsys(&smc_net_ops);
3013 rc = register_pernet_subsys(&smc_net_stat_ops);
3022 goto out_pernet_subsys;
3024 rc = smc_pnet_init();
3029 smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
3033 smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
3035 goto out_alloc_hs_wq;
3037 rc = smc_core_init();
3039 pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
3043 rc = smc_llc_init();
3045 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
3049 rc = smc_cdc_init();
3051 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
3055 rc = proto_register(&smc_proto, 1);
3057 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
3061 rc = proto_register(&smc_proto6, 1);
3063 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
3067 rc = sock_register(&smc_sock_family_ops);
3069 pr_err("%s: sock_register fails with %d\n", __func__, rc);
3072 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
3073 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
3075 rc = smc_ib_register_client();
3077 pr_err("%s: ib_register fails with %d\n", __func__, rc);
3081 rc = tcp_register_ulp(&smc_ulp_ops);
3083 pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
3087 static_branch_enable(&tcp_have_smc);
3091 sock_unregister(PF_SMC);
3093 proto_unregister(&smc_proto6);
3095 proto_unregister(&smc_proto);
3099 destroy_workqueue(smc_close_wq);
3101 destroy_workqueue(smc_hs_wq);
3107 unregister_pernet_subsys(&smc_net_ops);
3112 static void __exit smc_exit(void)
3114 static_branch_disable(&tcp_have_smc);
3115 tcp_unregister_ulp(&smc_ulp_ops);
3116 sock_unregister(PF_SMC);
3118 smc_ib_unregister_client();
3119 destroy_workqueue(smc_close_wq);
3120 destroy_workqueue(smc_hs_wq);
3121 proto_unregister(&smc_proto6);
3122 proto_unregister(&smc_proto);
3126 unregister_pernet_subsys(&smc_net_stat_ops);
3127 unregister_pernet_subsys(&smc_net_ops);
3131 module_init(smc_init);
3132 module_exit(smc_exit);
3134 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
3135 MODULE_DESCRIPTION("smc socket address family");
3136 MODULE_LICENSE("GPL");
3137 MODULE_ALIAS_NETPROTO(PF_SMC);
3138 MODULE_ALIAS_TCP_ULP("smc");