1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
15 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
28 #include <linux/rcupdate_wait.h>
29 #include <linux/ctype.h>
34 #include <asm/ioctls.h>
36 #include <net/net_namespace.h>
37 #include <net/netns/generic.h>
38 #include "smc_netns.h"
48 #include "smc_netlink.h"
51 #include "smc_close.h"
52 #include "smc_stats.h"
53 #include "smc_tracepoint.h"
55 static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
58 static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
62 struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
63 struct workqueue_struct *smc_close_wq; /* wq for close work */
65 static void smc_tcp_listen_work(struct work_struct *);
66 static void smc_connect_work(struct work_struct *);
68 static void smc_set_keepalive(struct sock *sk, int val)
70 struct smc_sock *smc = smc_sk(sk);
72 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
75 static struct smc_hashinfo smc_v4_hashinfo = {
76 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
79 static struct smc_hashinfo smc_v6_hashinfo = {
80 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
83 int smc_hash_sk(struct sock *sk)
85 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
86 struct hlist_head *head;
90 write_lock_bh(&h->lock);
91 sk_add_node(sk, head);
92 write_unlock_bh(&h->lock);
93 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
97 EXPORT_SYMBOL_GPL(smc_hash_sk);
99 void smc_unhash_sk(struct sock *sk)
101 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
103 write_lock_bh(&h->lock);
104 if (sk_del_node_init(sk))
105 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
106 write_unlock_bh(&h->lock);
108 EXPORT_SYMBOL_GPL(smc_unhash_sk);
110 struct proto smc_proto = {
112 .owner = THIS_MODULE,
113 .keepalive = smc_set_keepalive,
115 .unhash = smc_unhash_sk,
116 .obj_size = sizeof(struct smc_sock),
117 .h.smc_hash = &smc_v4_hashinfo,
118 .slab_flags = SLAB_TYPESAFE_BY_RCU,
120 EXPORT_SYMBOL_GPL(smc_proto);
122 struct proto smc_proto6 = {
124 .owner = THIS_MODULE,
125 .keepalive = smc_set_keepalive,
127 .unhash = smc_unhash_sk,
128 .obj_size = sizeof(struct smc_sock),
129 .h.smc_hash = &smc_v6_hashinfo,
130 .slab_flags = SLAB_TYPESAFE_BY_RCU,
132 EXPORT_SYMBOL_GPL(smc_proto6);
134 static void smc_restore_fallback_changes(struct smc_sock *smc)
136 if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
137 smc->clcsock->file->private_data = smc->sk.sk_socket;
138 smc->clcsock->file = NULL;
142 static int __smc_release(struct smc_sock *smc)
144 struct sock *sk = &smc->sk;
147 if (!smc->use_fallback) {
148 rc = smc_close_active(smc);
149 sock_set_flag(sk, SOCK_DEAD);
150 sk->sk_shutdown |= SHUTDOWN_MASK;
152 if (sk->sk_state != SMC_CLOSED) {
153 if (sk->sk_state != SMC_LISTEN &&
154 sk->sk_state != SMC_INIT)
155 sock_put(sk); /* passive closing */
156 if (sk->sk_state == SMC_LISTEN) {
157 /* wake up clcsock accept */
158 rc = kernel_sock_shutdown(smc->clcsock,
161 sk->sk_state = SMC_CLOSED;
162 sk->sk_state_change(sk);
164 smc_restore_fallback_changes(smc);
167 sk->sk_prot->unhash(sk);
169 if (sk->sk_state == SMC_CLOSED) {
172 smc_clcsock_release(smc);
175 if (!smc->use_fallback)
176 smc_conn_free(&smc->conn);
182 static int smc_release(struct socket *sock)
184 struct sock *sk = sock->sk;
185 struct smc_sock *smc;
191 sock_hold(sk); /* sock_put below */
194 /* cleanup for a dangling non-blocking connect */
195 if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
196 tcp_abort(smc->clcsock->sk, ECONNABORTED);
198 if (cancel_work_sync(&smc->connect_work))
199 sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
201 if (sk->sk_state == SMC_LISTEN)
202 /* smc_close_non_accepted() is called and acquires
203 * sock lock for child sockets again
205 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
209 rc = __smc_release(smc);
216 sock_put(sk); /* sock_hold above */
217 sock_put(sk); /* final sock_put */
222 static void smc_destruct(struct sock *sk)
224 if (sk->sk_state != SMC_CLOSED)
226 if (!sock_flag(sk, SOCK_DEAD))
229 sk_refcnt_debug_dec(sk);
232 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
235 struct smc_sock *smc;
239 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
240 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
244 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
245 sk->sk_state = SMC_INIT;
246 sk->sk_destruct = smc_destruct;
247 sk->sk_protocol = protocol;
249 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
250 INIT_WORK(&smc->connect_work, smc_connect_work);
251 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
252 INIT_LIST_HEAD(&smc->accept_q);
253 spin_lock_init(&smc->accept_q_lock);
254 spin_lock_init(&smc->conn.send_lock);
255 sk->sk_prot->hash(sk);
256 sk_refcnt_debug_inc(sk);
257 mutex_init(&smc->clcsock_release_lock);
262 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
265 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
266 struct sock *sk = sock->sk;
267 struct smc_sock *smc;
272 /* replicate tests from inet_bind(), to be safe wrt. future changes */
274 if (addr_len < sizeof(struct sockaddr_in))
278 if (addr->sin_family != AF_INET &&
279 addr->sin_family != AF_INET6 &&
280 addr->sin_family != AF_UNSPEC)
282 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
283 if (addr->sin_family == AF_UNSPEC &&
284 addr->sin_addr.s_addr != htonl(INADDR_ANY))
289 /* Check if socket is already active */
291 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
294 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
295 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
303 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
306 /* options we don't get control via setsockopt for */
307 nsk->sk_type = osk->sk_type;
308 nsk->sk_sndbuf = osk->sk_sndbuf;
309 nsk->sk_rcvbuf = osk->sk_rcvbuf;
310 nsk->sk_sndtimeo = osk->sk_sndtimeo;
311 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
312 nsk->sk_mark = osk->sk_mark;
313 nsk->sk_priority = osk->sk_priority;
314 nsk->sk_rcvlowat = osk->sk_rcvlowat;
315 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
316 nsk->sk_err = osk->sk_err;
318 nsk->sk_flags &= ~mask;
319 nsk->sk_flags |= osk->sk_flags & mask;
322 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
323 (1UL << SOCK_KEEPOPEN) | \
324 (1UL << SOCK_LINGER) | \
325 (1UL << SOCK_BROADCAST) | \
326 (1UL << SOCK_TIMESTAMP) | \
327 (1UL << SOCK_DBG) | \
328 (1UL << SOCK_RCVTSTAMP) | \
329 (1UL << SOCK_RCVTSTAMPNS) | \
330 (1UL << SOCK_LOCALROUTE) | \
331 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
332 (1UL << SOCK_RXQ_OVFL) | \
333 (1UL << SOCK_WIFI_STATUS) | \
334 (1UL << SOCK_NOFCS) | \
335 (1UL << SOCK_FILTER_LOCKED) | \
336 (1UL << SOCK_TSTAMP_NEW))
337 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
338 * clc socket (since smc is not called for these options from net/core)
340 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
342 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
345 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
346 (1UL << SOCK_KEEPOPEN) | \
347 (1UL << SOCK_LINGER) | \
349 /* copy only settings and flags relevant for smc from clc to smc socket */
350 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
352 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
355 /* register the new rmb on all links */
356 static int smcr_lgr_reg_rmbs(struct smc_link *link,
357 struct smc_buf_desc *rmb_desc)
359 struct smc_link_group *lgr = link->lgr;
362 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
365 /* protect against parallel smc_llc_cli_rkey_exchange() and
366 * parallel smcr_link_reg_rmb()
368 mutex_lock(&lgr->llc_conf_mutex);
369 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
370 if (!smc_link_active(&lgr->lnk[i]))
372 rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc);
377 /* exchange confirm_rkey msg with peer */
378 rc = smc_llc_do_confirm_rkey(link, rmb_desc);
383 rmb_desc->is_conf_rkey = true;
385 mutex_unlock(&lgr->llc_conf_mutex);
386 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
390 static int smcr_clnt_conf_first_link(struct smc_sock *smc)
392 struct smc_link *link = smc->conn.lnk;
393 struct smc_llc_qentry *qentry;
396 /* receive CONFIRM LINK request from server over RoCE fabric */
397 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
398 SMC_LLC_CONFIRM_LINK);
400 struct smc_clc_msg_decline dclc;
402 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
403 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
404 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
406 smc_llc_save_peer_uid(qentry);
407 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
408 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
410 return SMC_CLC_DECL_RMBE_EC;
412 rc = smc_ib_modify_qp_rts(link);
414 return SMC_CLC_DECL_ERR_RDYLNK;
416 smc_wr_remember_qp_attr(link);
418 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
419 return SMC_CLC_DECL_ERR_REGRMB;
421 /* confirm_rkey is implicit on 1st contact */
422 smc->conn.rmb_desc->is_conf_rkey = true;
424 /* send CONFIRM LINK response over RoCE fabric */
425 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
427 return SMC_CLC_DECL_TIMEOUT_CL;
429 smc_llc_link_active(link);
430 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
432 /* optional 2nd link, receive ADD LINK request from server */
433 qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
436 struct smc_clc_msg_decline dclc;
438 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
439 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
441 rc = 0; /* no DECLINE received, go with one link */
444 smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
445 smc_llc_cli_add_link(link, qentry);
449 static bool smc_isascii(char *hostname)
453 for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
454 if (!isascii(hostname[i]))
459 static void smc_conn_save_peer_info_fce(struct smc_sock *smc,
460 struct smc_clc_msg_accept_confirm *clc)
462 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
463 (struct smc_clc_msg_accept_confirm_v2 *)clc;
464 struct smc_clc_first_contact_ext *fce;
467 if (clc->hdr.version == SMC_V1 ||
468 !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK))
471 if (smc->conn.lgr->is_smcd) {
472 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->d1.eid,
474 clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2,
477 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->r1.eid,
479 clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm_v2,
482 fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc_v2) + clc_v2_len);
483 smc->conn.lgr->peer_os = fce->os_type;
484 smc->conn.lgr->peer_smc_release = fce->release;
485 if (smc_isascii(fce->hostname))
486 memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
487 SMC_MAX_HOSTNAME_LEN);
490 static void smcr_conn_save_peer_info(struct smc_sock *smc,
491 struct smc_clc_msg_accept_confirm *clc)
493 int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
495 smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
496 smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
497 smc->conn.peer_rmbe_size = bufsize;
498 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
499 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
502 static void smcd_conn_save_peer_info(struct smc_sock *smc,
503 struct smc_clc_msg_accept_confirm *clc)
505 int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
507 smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
508 smc->conn.peer_token = clc->d0.token;
509 /* msg header takes up space in the buffer */
510 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
511 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
512 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
515 static void smc_conn_save_peer_info(struct smc_sock *smc,
516 struct smc_clc_msg_accept_confirm *clc)
518 if (smc->conn.lgr->is_smcd)
519 smcd_conn_save_peer_info(smc, clc);
521 smcr_conn_save_peer_info(smc, clc);
522 smc_conn_save_peer_info_fce(smc, clc);
525 static void smc_link_save_peer_info(struct smc_link *link,
526 struct smc_clc_msg_accept_confirm *clc,
527 struct smc_init_info *ini)
529 link->peer_qpn = ntoh24(clc->r0.qpn);
530 memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE);
531 memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac));
532 link->peer_psn = ntoh24(clc->r0.psn);
533 link->peer_mtu = clc->r0.qp_mtu;
536 static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
537 struct smc_stats_fback *fback_arr)
541 for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
542 if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
543 fback_arr[cnt].count++;
546 if (!fback_arr[cnt].fback_code) {
547 fback_arr[cnt].fback_code = smc->fallback_rsn;
548 fback_arr[cnt].count++;
554 static void smc_stat_fallback(struct smc_sock *smc)
556 struct net *net = sock_net(&smc->sk);
558 mutex_lock(&net->smc.mutex_fback_rsn);
559 if (smc->listen_smc) {
560 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
561 net->smc.fback_rsn->srv_fback_cnt++;
563 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
564 net->smc.fback_rsn->clnt_fback_cnt++;
566 mutex_unlock(&net->smc.mutex_fback_rsn);
569 static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
571 wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
572 wait_queue_head_t *clc_wait;
575 mutex_lock(&smc->clcsock_release_lock);
577 mutex_unlock(&smc->clcsock_release_lock);
580 smc->use_fallback = true;
581 smc->fallback_rsn = reason_code;
582 smc_stat_fallback(smc);
583 trace_smc_switch_to_fallback(smc, reason_code);
584 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
585 smc->clcsock->file = smc->sk.sk_socket->file;
586 smc->clcsock->file->private_data = smc->clcsock;
587 smc->clcsock->wq.fasync_list =
588 smc->sk.sk_socket->wq.fasync_list;
590 /* There may be some entries remaining in
591 * smc socket->wq, which should be removed
592 * to clcsocket->wq during the fallback.
594 clc_wait = sk_sleep(smc->clcsock->sk);
595 spin_lock_irqsave(&smc_wait->lock, flags);
596 spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
597 list_splice_init(&smc_wait->head, &clc_wait->head);
598 spin_unlock(&clc_wait->lock);
599 spin_unlock_irqrestore(&smc_wait->lock, flags);
601 mutex_unlock(&smc->clcsock_release_lock);
605 /* fall back during connect */
606 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
608 struct net *net = sock_net(&smc->sk);
611 rc = smc_switch_to_fallback(smc, reason_code);
612 if (rc) { /* fallback fails */
613 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
614 if (smc->sk.sk_state == SMC_INIT)
615 sock_put(&smc->sk); /* passive closing */
618 smc_copy_sock_settings_to_clc(smc);
619 smc->connect_nonblock = 0;
620 if (smc->sk.sk_state == SMC_INIT)
621 smc->sk.sk_state = SMC_ACTIVE;
625 /* decline and fall back during connect */
626 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
629 struct net *net = sock_net(&smc->sk);
632 if (reason_code < 0) { /* error, fallback is not possible */
633 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
634 if (smc->sk.sk_state == SMC_INIT)
635 sock_put(&smc->sk); /* passive closing */
638 if (reason_code != SMC_CLC_DECL_PEERDECL) {
639 rc = smc_clc_send_decline(smc, reason_code, version);
641 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
642 if (smc->sk.sk_state == SMC_INIT)
643 sock_put(&smc->sk); /* passive closing */
647 return smc_connect_fallback(smc, reason_code);
650 static void smc_conn_abort(struct smc_sock *smc, int local_first)
652 struct smc_connection *conn = &smc->conn;
653 struct smc_link_group *lgr = conn->lgr;
654 bool lgr_valid = false;
656 if (smc_conn_lgr_valid(conn))
660 if (local_first && lgr_valid)
661 smc_lgr_cleanup_early(lgr);
664 /* check if there is a rdma device available for this connection. */
665 /* called for connect and listen */
666 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
668 /* PNET table look up: search active ib_device and port
669 * within same PNETID that also contains the ethernet device
670 * used for the internal TCP socket
672 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
673 if (!ini->check_smcrv2 && !ini->ib_dev)
674 return SMC_CLC_DECL_NOSMCRDEV;
675 if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2)
676 return SMC_CLC_DECL_NOSMCRDEV;
680 /* check if there is an ISM device available for this connection. */
681 /* called for connect and listen */
682 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
684 /* Find ISM device with same PNETID as connecting interface */
685 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
686 if (!ini->ism_dev[0])
687 return SMC_CLC_DECL_NOSMCDDEV;
689 ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
693 /* is chid unique for the ism devices that are already determined? */
694 static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
697 int i = (!ini->ism_dev[0]) ? 1 : 0;
700 if (ini->ism_chid[i] == chid)
705 /* determine possible V2 ISM devices (either without PNETID or with PNETID plus
706 * PNETID matching net_device)
708 static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
709 struct smc_init_info *ini)
711 int rc = SMC_CLC_DECL_NOSMCDDEV;
712 struct smcd_dev *smcd;
716 if (smcd_indicated(ini->smc_type_v1))
717 rc = 0; /* already initialized for V1 */
718 mutex_lock(&smcd_dev_list.mutex);
719 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
720 if (smcd->going_away || smcd == ini->ism_dev[0])
722 chid = smc_ism_get_chid(smcd);
723 if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
725 if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
726 smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
727 ini->ism_dev[i] = smcd;
728 ini->ism_chid[i] = chid;
732 if (i > SMC_MAX_ISM_DEVS)
736 mutex_unlock(&smcd_dev_list.mutex);
737 ini->ism_offered_cnt = i - 1;
738 if (!ini->ism_dev[0] && !ini->ism_dev[1])
739 ini->smcd_version = 0;
744 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
745 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
746 struct smc_init_info *ini)
748 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
749 return SMC_CLC_DECL_ISMVLANERR;
753 static int smc_find_proposal_devices(struct smc_sock *smc,
754 struct smc_init_info *ini)
758 /* check if there is an ism device available */
759 if (!(ini->smcd_version & SMC_V1) ||
760 smc_find_ism_device(smc, ini) ||
761 smc_connect_ism_vlan_setup(smc, ini))
762 ini->smcd_version &= ~SMC_V1;
763 /* else ISM V1 is supported for this connection */
765 /* check if there is an rdma device available */
766 if (!(ini->smcr_version & SMC_V1) ||
767 smc_find_rdma_device(smc, ini))
768 ini->smcr_version &= ~SMC_V1;
769 /* else RDMA is supported for this connection */
771 ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1,
772 ini->smcr_version & SMC_V1);
774 /* check if there is an ism v2 device available */
775 if (!(ini->smcd_version & SMC_V2) ||
776 !smc_ism_is_v2_capable() ||
777 smc_find_ism_v2_device_clnt(smc, ini))
778 ini->smcd_version &= ~SMC_V2;
780 /* check if there is an rdma v2 device available */
781 ini->check_smcrv2 = true;
782 ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr;
783 if (!(ini->smcr_version & SMC_V2) ||
784 smc->clcsock->sk->sk_family != AF_INET ||
785 !smc_clc_ueid_count() ||
786 smc_find_rdma_device(smc, ini))
787 ini->smcr_version &= ~SMC_V2;
788 ini->check_smcrv2 = false;
790 ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2,
791 ini->smcr_version & SMC_V2);
793 /* if neither ISM nor RDMA are supported, fallback */
794 if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
795 rc = SMC_CLC_DECL_NOSMCDEV;
800 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
801 * used, the VLAN ID will be registered again during the connection setup.
803 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
804 struct smc_init_info *ini)
806 if (!smcd_indicated(ini->smc_type_v1))
808 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
809 return SMC_CLC_DECL_CNFERR;
813 #define SMC_CLC_MAX_ACCEPT_LEN \
814 (sizeof(struct smc_clc_msg_accept_confirm_v2) + \
815 sizeof(struct smc_clc_first_contact_ext) + \
816 sizeof(struct smc_clc_msg_trail))
818 /* CLC handshake during connect */
819 static int smc_connect_clc(struct smc_sock *smc,
820 struct smc_clc_msg_accept_confirm_v2 *aclc2,
821 struct smc_init_info *ini)
825 /* do inband token exchange */
826 rc = smc_clc_send_proposal(smc, ini);
829 /* receive SMC Accept CLC message */
830 return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN,
831 SMC_CLC_ACCEPT, CLC_WAIT_TIME);
834 void smc_fill_gid_list(struct smc_link_group *lgr,
835 struct smc_gidlist *gidlist,
836 struct smc_ib_device *known_dev, u8 *known_gid)
838 struct smc_init_info *alt_ini = NULL;
840 memset(gidlist, 0, sizeof(*gidlist));
841 memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE);
843 alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL);
847 alt_ini->vlan_id = lgr->vlan_id;
848 alt_ini->check_smcrv2 = true;
849 alt_ini->smcrv2.saddr = lgr->saddr;
850 smc_pnet_find_alt_roce(lgr, alt_ini, known_dev);
852 if (!alt_ini->smcrv2.ib_dev_v2)
855 memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2,
862 static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
863 struct smc_clc_msg_accept_confirm *aclc,
864 struct smc_init_info *ini)
866 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
867 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
868 struct smc_clc_first_contact_ext *fce =
869 (struct smc_clc_first_contact_ext *)
870 (((u8 *)clc_v2) + sizeof(*clc_v2));
872 if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
875 if (fce->v2_direct) {
876 memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
877 ini->smcrv2.uses_gateway = false;
879 if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr,
880 smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
881 ini->smcrv2.nexthop_mac,
882 &ini->smcrv2.uses_gateway))
883 return SMC_CLC_DECL_NOROUTE;
884 if (!ini->smcrv2.uses_gateway) {
885 /* mismatch: peer claims indirect, but its direct */
886 return SMC_CLC_DECL_NOINDIRECT;
892 /* setup for RDMA connection of client */
893 static int smc_connect_rdma(struct smc_sock *smc,
894 struct smc_clc_msg_accept_confirm *aclc,
895 struct smc_init_info *ini)
897 int i, reason_code = 0;
898 struct smc_link *link;
901 ini->is_smcd = false;
902 ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
903 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
904 memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN);
905 memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE);
906 memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN);
908 reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini);
912 mutex_lock(&smc_client_lgr_pending);
913 reason_code = smc_conn_create(smc, ini);
915 mutex_unlock(&smc_client_lgr_pending);
919 smc_conn_save_peer_info(smc, aclc);
921 if (ini->first_contact_local) {
922 link = smc->conn.lnk;
924 /* set link that was assigned by server */
926 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
927 struct smc_link *l = &smc->conn.lgr->lnk[i];
929 if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
930 !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
932 (aclc->hdr.version > SMC_V1 ||
933 !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
934 sizeof(l->peer_mac)))) {
940 reason_code = SMC_CLC_DECL_NOSRVLINK;
943 smc_switch_link_and_count(&smc->conn, link);
946 /* create send buffer and rmb */
947 if (smc_buf_create(smc, false)) {
948 reason_code = SMC_CLC_DECL_MEM;
952 if (ini->first_contact_local)
953 smc_link_save_peer_info(link, aclc, ini);
955 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
956 reason_code = SMC_CLC_DECL_ERR_RTOK;
963 if (ini->first_contact_local) {
964 if (smc_ib_ready_link(link)) {
965 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
969 if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
970 reason_code = SMC_CLC_DECL_ERR_REGRMB;
974 smc_rmb_sync_sg_for_device(&smc->conn);
976 if (aclc->hdr.version > SMC_V1) {
977 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
978 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
980 eid = clc_v2->r1.eid;
981 if (ini->first_contact_local)
982 smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist,
983 link->smcibdev, link->gid);
986 reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
987 aclc->hdr.version, eid, ini);
993 if (ini->first_contact_local) {
994 /* QP confirmation over RoCE fabric */
995 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
996 reason_code = smcr_clnt_conf_first_link(smc);
997 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
1001 mutex_unlock(&smc_client_lgr_pending);
1003 smc_copy_sock_settings_to_clc(smc);
1004 smc->connect_nonblock = 0;
1005 if (smc->sk.sk_state == SMC_INIT)
1006 smc->sk.sk_state = SMC_ACTIVE;
1010 smc_conn_abort(smc, ini->first_contact_local);
1011 mutex_unlock(&smc_client_lgr_pending);
1012 smc->connect_nonblock = 0;
1017 /* The server has chosen one of the proposed ISM devices for the communication.
1018 * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
1021 smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc,
1022 struct smc_init_info *ini)
1026 for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
1027 if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) {
1028 ini->ism_selected = i;
1036 /* setup for ISM connection of client */
1037 static int smc_connect_ism(struct smc_sock *smc,
1038 struct smc_clc_msg_accept_confirm *aclc,
1039 struct smc_init_info *ini)
1044 ini->is_smcd = true;
1045 ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
1047 if (aclc->hdr.version == SMC_V2) {
1048 struct smc_clc_msg_accept_confirm_v2 *aclc_v2 =
1049 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
1051 rc = smc_v2_determine_accepted_chid(aclc_v2, ini);
1055 ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
1057 /* there is only one lgr role for SMC-D; use server lock */
1058 mutex_lock(&smc_server_lgr_pending);
1059 rc = smc_conn_create(smc, ini);
1061 mutex_unlock(&smc_server_lgr_pending);
1065 /* Create send and receive buffers */
1066 rc = smc_buf_create(smc, true);
1068 rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
1072 smc_conn_save_peer_info(smc, aclc);
1073 smc_close_init(smc);
1077 if (aclc->hdr.version > SMC_V1) {
1078 struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
1079 (struct smc_clc_msg_accept_confirm_v2 *)aclc;
1081 eid = clc_v2->d1.eid;
1084 rc = smc_clc_send_confirm(smc, ini->first_contact_local,
1085 aclc->hdr.version, eid, NULL);
1088 mutex_unlock(&smc_server_lgr_pending);
1090 smc_copy_sock_settings_to_clc(smc);
1091 smc->connect_nonblock = 0;
1092 if (smc->sk.sk_state == SMC_INIT)
1093 smc->sk.sk_state = SMC_ACTIVE;
1097 smc_conn_abort(smc, ini->first_contact_local);
1098 mutex_unlock(&smc_server_lgr_pending);
1099 smc->connect_nonblock = 0;
1104 /* check if received accept type and version matches a proposed one */
1105 static int smc_connect_check_aclc(struct smc_init_info *ini,
1106 struct smc_clc_msg_accept_confirm *aclc)
1108 if (aclc->hdr.typev1 != SMC_TYPE_R &&
1109 aclc->hdr.typev1 != SMC_TYPE_D)
1110 return SMC_CLC_DECL_MODEUNSUPP;
1112 if (aclc->hdr.version >= SMC_V2) {
1113 if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1114 !smcr_indicated(ini->smc_type_v2)) ||
1115 (aclc->hdr.typev1 == SMC_TYPE_D &&
1116 !smcd_indicated(ini->smc_type_v2)))
1117 return SMC_CLC_DECL_MODEUNSUPP;
1119 if ((aclc->hdr.typev1 == SMC_TYPE_R &&
1120 !smcr_indicated(ini->smc_type_v1)) ||
1121 (aclc->hdr.typev1 == SMC_TYPE_D &&
1122 !smcd_indicated(ini->smc_type_v1)))
1123 return SMC_CLC_DECL_MODEUNSUPP;
1129 /* perform steps before actually connecting */
1130 static int __smc_connect(struct smc_sock *smc)
1132 u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
1133 struct smc_clc_msg_accept_confirm_v2 *aclc2;
1134 struct smc_clc_msg_accept_confirm *aclc;
1135 struct smc_init_info *ini = NULL;
1139 if (smc->use_fallback)
1140 return smc_connect_fallback(smc, smc->fallback_rsn);
1142 /* if peer has not signalled SMC-capability, fall back */
1143 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
1144 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
1146 /* IPSec connections opt out of SMC optimizations */
1147 if (using_ipsec(smc))
1148 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
1151 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
1153 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
1156 ini->smcd_version = SMC_V1 | SMC_V2;
1157 ini->smcr_version = SMC_V1 | SMC_V2;
1158 ini->smc_type_v1 = SMC_TYPE_B;
1159 ini->smc_type_v2 = SMC_TYPE_B;
1161 /* get vlan id from IP device */
1162 if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
1163 ini->smcd_version &= ~SMC_V1;
1164 ini->smcr_version = 0;
1165 ini->smc_type_v1 = SMC_TYPE_N;
1166 if (!ini->smcd_version) {
1167 rc = SMC_CLC_DECL_GETVLANERR;
1172 rc = smc_find_proposal_devices(smc, ini);
1176 buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
1178 rc = SMC_CLC_DECL_MEM;
1181 aclc2 = (struct smc_clc_msg_accept_confirm_v2 *)buf;
1182 aclc = (struct smc_clc_msg_accept_confirm *)aclc2;
1184 /* perform CLC handshake */
1185 rc = smc_connect_clc(smc, aclc2, ini);
1189 /* check if smc modes and versions of CLC proposal and accept match */
1190 rc = smc_connect_check_aclc(ini, aclc);
1191 version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
1195 /* depending on previous steps, connect using rdma or ism */
1196 if (aclc->hdr.typev1 == SMC_TYPE_R) {
1197 ini->smcr_version = version;
1198 rc = smc_connect_rdma(smc, aclc, ini);
1199 } else if (aclc->hdr.typev1 == SMC_TYPE_D) {
1200 ini->smcd_version = version;
1201 rc = smc_connect_ism(smc, aclc, ini);
1206 SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
1207 smc_connect_ism_vlan_cleanup(smc, ini);
1213 smc_connect_ism_vlan_cleanup(smc, ini);
1217 return smc_connect_decline_fallback(smc, rc, version);
1220 static void smc_connect_work(struct work_struct *work)
1222 struct smc_sock *smc = container_of(work, struct smc_sock,
1224 long timeo = smc->sk.sk_sndtimeo;
1228 timeo = MAX_SCHEDULE_TIMEOUT;
1229 lock_sock(smc->clcsock->sk);
1230 if (smc->clcsock->sk->sk_err) {
1231 smc->sk.sk_err = smc->clcsock->sk->sk_err;
1232 } else if ((1 << smc->clcsock->sk->sk_state) &
1233 (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1234 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1235 if ((rc == -EPIPE) &&
1236 ((1 << smc->clcsock->sk->sk_state) &
1237 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
1240 release_sock(smc->clcsock->sk);
1241 lock_sock(&smc->sk);
1242 if (rc != 0 || smc->sk.sk_err) {
1243 smc->sk.sk_state = SMC_CLOSED;
1244 if (rc == -EPIPE || rc == -EAGAIN)
1245 smc->sk.sk_err = EPIPE;
1246 else if (signal_pending(current))
1247 smc->sk.sk_err = -sock_intr_errno(timeo);
1248 sock_put(&smc->sk); /* passive closing */
1252 rc = __smc_connect(smc);
1254 smc->sk.sk_err = -rc;
1257 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1258 if (smc->sk.sk_err) {
1259 smc->sk.sk_state_change(&smc->sk);
1260 } else { /* allow polling before and after fallback decision */
1261 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1262 smc->sk.sk_write_space(&smc->sk);
1265 release_sock(&smc->sk);
1268 static int smc_connect(struct socket *sock, struct sockaddr *addr,
1269 int alen, int flags)
1271 struct sock *sk = sock->sk;
1272 struct smc_sock *smc;
1277 /* separate smc parameter checking to be safe */
1278 if (alen < sizeof(addr->sa_family))
1280 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
1284 switch (sk->sk_state) {
1294 smc_copy_sock_settings_to_clc(smc);
1295 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1296 if (smc->connect_nonblock) {
1300 rc = kernel_connect(smc->clcsock, addr, alen, flags);
1301 if (rc && rc != -EINPROGRESS)
1304 sock_hold(&smc->sk); /* sock put in passive closing */
1305 if (smc->use_fallback)
1307 if (flags & O_NONBLOCK) {
1308 if (queue_work(smc_hs_wq, &smc->connect_work))
1309 smc->connect_nonblock = 1;
1312 rc = __smc_connect(smc);
1316 rc = 0; /* success cases including fallback */
1325 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
1327 struct socket *new_clcsock = NULL;
1328 struct sock *lsk = &lsmc->sk;
1329 struct sock *new_sk;
1333 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
1336 lsk->sk_err = ENOMEM;
1341 *new_smc = smc_sk(new_sk);
1343 mutex_lock(&lsmc->clcsock_release_lock);
1345 rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
1346 mutex_unlock(&lsmc->clcsock_release_lock);
1348 if (rc < 0 && rc != -EAGAIN)
1350 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
1351 new_sk->sk_prot->unhash(new_sk);
1353 sock_release(new_clcsock);
1354 new_sk->sk_state = SMC_CLOSED;
1355 sock_set_flag(new_sk, SOCK_DEAD);
1356 sock_put(new_sk); /* final */
1361 /* new clcsock has inherited the smc listen-specific sk_data_ready
1362 * function; switch it back to the original sk_data_ready function
1364 new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
1365 (*new_smc)->clcsock = new_clcsock;
1370 /* add a just created sock to the accept queue of the listen sock as
1371 * candidate for a following socket accept call from user space
1373 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
1375 struct smc_sock *par = smc_sk(parent);
1377 sock_hold(sk); /* sock_put in smc_accept_unlink () */
1378 spin_lock(&par->accept_q_lock);
1379 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
1380 spin_unlock(&par->accept_q_lock);
1381 sk_acceptq_added(parent);
1384 /* remove a socket from the accept queue of its parental listening socket */
1385 static void smc_accept_unlink(struct sock *sk)
1387 struct smc_sock *par = smc_sk(sk)->listen_smc;
1389 spin_lock(&par->accept_q_lock);
1390 list_del_init(&smc_sk(sk)->accept_q);
1391 spin_unlock(&par->accept_q_lock);
1392 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
1393 sock_put(sk); /* sock_hold in smc_accept_enqueue */
1396 /* remove a sock from the accept queue to bind it to a new socket created
1397 * for a socket accept call from user space
1399 struct sock *smc_accept_dequeue(struct sock *parent,
1400 struct socket *new_sock)
1402 struct smc_sock *isk, *n;
1403 struct sock *new_sk;
1405 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
1406 new_sk = (struct sock *)isk;
1408 smc_accept_unlink(new_sk);
1409 if (new_sk->sk_state == SMC_CLOSED) {
1410 new_sk->sk_prot->unhash(new_sk);
1412 sock_release(isk->clcsock);
1413 isk->clcsock = NULL;
1415 sock_put(new_sk); /* final */
1419 sock_graft(new_sk, new_sock);
1420 if (isk->use_fallback) {
1421 smc_sk(new_sk)->clcsock->file = new_sock->file;
1422 isk->clcsock->file->private_data = isk->clcsock;
1430 /* clean up for a created but never accepted sock */
1431 void smc_close_non_accepted(struct sock *sk)
1433 struct smc_sock *smc = smc_sk(sk);
1435 sock_hold(sk); /* sock_put below */
1437 if (!sk->sk_lingertime)
1438 /* wait for peer closing */
1439 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
1442 sock_put(sk); /* sock_hold above */
1443 sock_put(sk); /* final sock_put */
1446 static int smcr_serv_conf_first_link(struct smc_sock *smc)
1448 struct smc_link *link = smc->conn.lnk;
1449 struct smc_llc_qentry *qentry;
1452 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
1453 return SMC_CLC_DECL_ERR_REGRMB;
1455 /* send CONFIRM LINK request to client over the RoCE fabric */
1456 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1458 return SMC_CLC_DECL_TIMEOUT_CL;
1460 /* receive CONFIRM LINK response from client over the RoCE fabric */
1461 qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
1462 SMC_LLC_CONFIRM_LINK);
1464 struct smc_clc_msg_decline dclc;
1466 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1467 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1468 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1470 smc_llc_save_peer_uid(qentry);
1471 rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
1472 smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
1474 return SMC_CLC_DECL_RMBE_EC;
1476 /* confirm_rkey is implicit on 1st contact */
1477 smc->conn.rmb_desc->is_conf_rkey = true;
1479 smc_llc_link_active(link);
1480 smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
1482 /* initial contact - try to establish second link */
1483 smc_llc_srv_add_link(link, NULL);
1487 /* listen worker: finish */
1488 static void smc_listen_out(struct smc_sock *new_smc)
1490 struct smc_sock *lsmc = new_smc->listen_smc;
1491 struct sock *newsmcsk = &new_smc->sk;
1493 if (lsmc->sk.sk_state == SMC_LISTEN) {
1494 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1495 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1496 release_sock(&lsmc->sk);
1497 } else { /* no longer listening */
1498 smc_close_non_accepted(newsmcsk);
1501 /* Wake up accept */
1502 lsmc->sk.sk_data_ready(&lsmc->sk);
1503 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1506 /* listen worker: finish in state connected */
1507 static void smc_listen_out_connected(struct smc_sock *new_smc)
1509 struct sock *newsmcsk = &new_smc->sk;
1511 sk_refcnt_debug_inc(newsmcsk);
1512 if (newsmcsk->sk_state == SMC_INIT)
1513 newsmcsk->sk_state = SMC_ACTIVE;
1515 smc_listen_out(new_smc);
1518 /* listen worker: finish in error state */
1519 static void smc_listen_out_err(struct smc_sock *new_smc)
1521 struct sock *newsmcsk = &new_smc->sk;
1522 struct net *net = sock_net(newsmcsk);
1524 this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
1525 if (newsmcsk->sk_state == SMC_INIT)
1526 sock_put(&new_smc->sk); /* passive closing */
1527 newsmcsk->sk_state = SMC_CLOSED;
1529 smc_listen_out(new_smc);
1532 /* listen worker: decline and fall back if possible */
1533 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1534 int local_first, u8 version)
1536 /* RDMA setup failed, switch back to TCP */
1537 smc_conn_abort(new_smc, local_first);
1538 if (reason_code < 0 ||
1539 smc_switch_to_fallback(new_smc, reason_code)) {
1540 /* error, no fallback possible */
1541 smc_listen_out_err(new_smc);
1544 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1545 if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
1546 smc_listen_out_err(new_smc);
1550 smc_listen_out_connected(new_smc);
1553 /* listen worker: version checking */
1554 static int smc_listen_v2_check(struct smc_sock *new_smc,
1555 struct smc_clc_msg_proposal *pclc,
1556 struct smc_init_info *ini)
1558 struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
1559 struct smc_clc_v2_extension *pclc_v2_ext;
1560 int rc = SMC_CLC_DECL_PEERNOSMC;
1562 ini->smc_type_v1 = pclc->hdr.typev1;
1563 ini->smc_type_v2 = pclc->hdr.typev2;
1564 ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
1565 ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
1566 if (pclc->hdr.version > SMC_V1) {
1567 if (smcd_indicated(ini->smc_type_v2))
1568 ini->smcd_version |= SMC_V2;
1569 if (smcr_indicated(ini->smc_type_v2))
1570 ini->smcr_version |= SMC_V2;
1572 if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) {
1573 rc = SMC_CLC_DECL_PEERNOSMC;
1576 pclc_v2_ext = smc_get_clc_v2_ext(pclc);
1578 ini->smcd_version &= ~SMC_V2;
1579 ini->smcr_version &= ~SMC_V2;
1580 rc = SMC_CLC_DECL_NOV2EXT;
1583 pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
1584 if (ini->smcd_version & SMC_V2) {
1585 if (!smc_ism_is_v2_capable()) {
1586 ini->smcd_version &= ~SMC_V2;
1587 rc = SMC_CLC_DECL_NOISM2SUPP;
1588 } else if (!pclc_smcd_v2_ext) {
1589 ini->smcd_version &= ~SMC_V2;
1590 rc = SMC_CLC_DECL_NOV2DEXT;
1591 } else if (!pclc_v2_ext->hdr.eid_cnt &&
1592 !pclc_v2_ext->hdr.flag.seid) {
1593 ini->smcd_version &= ~SMC_V2;
1594 rc = SMC_CLC_DECL_NOUEID;
1597 if (ini->smcr_version & SMC_V2) {
1598 if (!pclc_v2_ext->hdr.eid_cnt) {
1599 ini->smcr_version &= ~SMC_V2;
1600 rc = SMC_CLC_DECL_NOUEID;
1605 if (!ini->smcd_version && !ini->smcr_version)
1611 /* listen worker: check prefixes */
1612 static int smc_listen_prfx_check(struct smc_sock *new_smc,
1613 struct smc_clc_msg_proposal *pclc)
1615 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1616 struct socket *newclcsock = new_smc->clcsock;
1618 if (pclc->hdr.typev1 == SMC_TYPE_N)
1620 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1621 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1622 return SMC_CLC_DECL_DIFFPREFIX;
1627 /* listen worker: initialize connection and buffers */
1628 static int smc_listen_rdma_init(struct smc_sock *new_smc,
1629 struct smc_init_info *ini)
1633 /* allocate connection / link group */
1634 rc = smc_conn_create(new_smc, ini);
1638 /* create send buffer and rmb */
1639 if (smc_buf_create(new_smc, false))
1640 return SMC_CLC_DECL_MEM;
1645 /* listen worker: initialize connection and buffers for SMC-D */
1646 static int smc_listen_ism_init(struct smc_sock *new_smc,
1647 struct smc_init_info *ini)
1651 rc = smc_conn_create(new_smc, ini);
1655 /* Create send and receive buffers */
1656 rc = smc_buf_create(new_smc, true);
1658 smc_conn_abort(new_smc, ini->first_contact_local);
1659 return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
1666 static bool smc_is_already_selected(struct smcd_dev *smcd,
1667 struct smc_init_info *ini,
1672 for (i = 0; i < matches; i++)
1673 if (smcd == ini->ism_dev[i])
1679 /* check for ISM devices matching proposed ISM devices */
1680 static void smc_check_ism_v2_match(struct smc_init_info *ini,
1681 u16 proposed_chid, u64 proposed_gid,
1682 unsigned int *matches)
1684 struct smcd_dev *smcd;
1686 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1687 if (smcd->going_away)
1689 if (smc_is_already_selected(smcd, ini, *matches))
1691 if (smc_ism_get_chid(smcd) == proposed_chid &&
1692 !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
1693 ini->ism_peer_gid[*matches] = proposed_gid;
1694 ini->ism_dev[*matches] = smcd;
1701 static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini)
1707 static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
1708 struct smc_clc_msg_proposal *pclc,
1709 struct smc_init_info *ini)
1711 struct smc_clc_smcd_v2_extension *smcd_v2_ext;
1712 struct smc_clc_v2_extension *smc_v2_ext;
1713 struct smc_clc_msg_smcd *pclc_smcd;
1714 unsigned int matches = 0;
1719 if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
1722 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1723 smc_v2_ext = smc_get_clc_v2_ext(pclc);
1724 smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
1726 mutex_lock(&smcd_dev_list.mutex);
1727 if (pclc_smcd->ism.chid)
1728 /* check for ISM device matching proposed native ISM device */
1729 smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
1730 ntohll(pclc_smcd->ism.gid), &matches);
1731 for (i = 1; i <= smc_v2_ext->hdr.ism_gid_cnt; i++) {
1732 /* check for ISM devices matching proposed non-native ISM
1735 smc_check_ism_v2_match(ini,
1736 ntohs(smcd_v2_ext->gidchid[i - 1].chid),
1737 ntohll(smcd_v2_ext->gidchid[i - 1].gid),
1740 mutex_unlock(&smcd_dev_list.mutex);
1742 if (!ini->ism_dev[0]) {
1743 smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini);
1747 smc_ism_get_system_eid(&eid);
1748 if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext,
1749 smcd_v2_ext->system_eid, eid))
1752 /* separate - outside the smcd_dev_list.lock */
1753 smcd_version = ini->smcd_version;
1754 for (i = 0; i < matches; i++) {
1755 ini->smcd_version = SMC_V2;
1756 ini->is_smcd = true;
1757 ini->ism_selected = i;
1758 rc = smc_listen_ism_init(new_smc, ini);
1760 smc_find_ism_store_rc(rc, ini);
1761 /* try next active ISM device */
1764 return; /* matching and usable V2 ISM device found */
1766 /* no V2 ISM device could be initialized */
1767 ini->smcd_version = smcd_version; /* restore original value */
1768 ini->negotiated_eid[0] = 0;
1771 ini->smcd_version &= ~SMC_V2;
1772 ini->ism_dev[0] = NULL;
1773 ini->is_smcd = false;
1776 static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
1777 struct smc_clc_msg_proposal *pclc,
1778 struct smc_init_info *ini)
1780 struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
1783 /* check if ISM V1 is available */
1784 if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
1786 ini->is_smcd = true; /* prepare ISM check */
1787 ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid);
1788 rc = smc_find_ism_device(new_smc, ini);
1791 ini->ism_selected = 0;
1792 rc = smc_listen_ism_init(new_smc, ini);
1794 return; /* V1 ISM device found */
1797 smc_find_ism_store_rc(rc, ini);
1798 ini->smcd_version &= ~SMC_V1;
1799 ini->ism_dev[0] = NULL;
1800 ini->is_smcd = false;
1803 /* listen worker: register buffers */
1804 static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
1806 struct smc_connection *conn = &new_smc->conn;
1809 if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
1810 return SMC_CLC_DECL_ERR_REGRMB;
1812 smc_rmb_sync_sg_for_device(&new_smc->conn);
1817 static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
1818 struct smc_clc_msg_proposal *pclc,
1819 struct smc_init_info *ini)
1821 struct smc_clc_v2_extension *smc_v2_ext;
1825 if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2))
1828 smc_v2_ext = smc_get_clc_v2_ext(pclc);
1829 if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
1832 /* prepare RDMA check */
1833 memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
1834 memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE);
1835 memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
1836 ini->check_smcrv2 = true;
1837 ini->smcrv2.clc_sk = new_smc->clcsock->sk;
1838 ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr;
1839 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce);
1840 rc = smc_find_rdma_device(new_smc, ini);
1842 smc_find_ism_store_rc(rc, ini);
1845 if (!ini->smcrv2.uses_gateway)
1846 memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN);
1848 smcr_version = ini->smcr_version;
1849 ini->smcr_version = SMC_V2;
1850 rc = smc_listen_rdma_init(new_smc, ini);
1852 rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
1855 ini->smcr_version = smcr_version;
1856 smc_find_ism_store_rc(rc, ini);
1859 ini->smcr_version &= ~SMC_V2;
1860 ini->check_smcrv2 = false;
1863 static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
1864 struct smc_clc_msg_proposal *pclc,
1865 struct smc_init_info *ini)
1869 if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1))
1870 return SMC_CLC_DECL_NOSMCDEV;
1872 /* prepare RDMA check */
1873 memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
1874 memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE);
1875 memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
1876 rc = smc_find_rdma_device(new_smc, ini);
1878 /* no RDMA device found */
1879 return SMC_CLC_DECL_NOSMCDEV;
1881 rc = smc_listen_rdma_init(new_smc, ini);
1884 return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
1887 /* determine the local device matching to proposal */
1888 static int smc_listen_find_device(struct smc_sock *new_smc,
1889 struct smc_clc_msg_proposal *pclc,
1890 struct smc_init_info *ini)
1894 /* check for ISM device matching V2 proposed device */
1895 smc_find_ism_v2_device_serv(new_smc, pclc, ini);
1896 if (ini->ism_dev[0])
1899 /* check for matching IP prefix and subnet length (V1) */
1900 prfx_rc = smc_listen_prfx_check(new_smc, pclc);
1902 smc_find_ism_store_rc(prfx_rc, ini);
1904 /* get vlan id from IP device */
1905 if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
1906 return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
1908 /* check for ISM device matching V1 proposed device */
1910 smc_find_ism_v1_device_serv(new_smc, pclc, ini);
1911 if (ini->ism_dev[0])
1914 if (!smcr_indicated(pclc->hdr.typev1) &&
1915 !smcr_indicated(pclc->hdr.typev2))
1916 /* skip RDMA and decline */
1917 return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
1919 /* check if RDMA V2 is available */
1920 smc_find_rdma_v2_device_serv(new_smc, pclc, ini);
1921 if (ini->smcrv2.ib_dev_v2)
1924 /* check if RDMA V1 is available */
1928 rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
1929 smc_find_ism_store_rc(rc, ini);
1930 return (!rc) ? 0 : ini->rc;
1932 return SMC_CLC_DECL_NOSMCDEV;
1935 /* listen worker: finish RDMA setup */
1936 static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1937 struct smc_clc_msg_accept_confirm *cclc,
1939 struct smc_init_info *ini)
1941 struct smc_link *link = new_smc->conn.lnk;
1942 int reason_code = 0;
1945 smc_link_save_peer_info(link, cclc, ini);
1947 if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
1948 return SMC_CLC_DECL_ERR_RTOK;
1951 if (smc_ib_ready_link(link))
1952 return SMC_CLC_DECL_ERR_RDYLNK;
1953 /* QP confirmation over RoCE fabric */
1954 smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
1955 reason_code = smcr_serv_conf_first_link(new_smc);
1956 smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
1961 /* setup for connection of server */
1962 static void smc_listen_work(struct work_struct *work)
1964 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1966 struct socket *newclcsock = new_smc->clcsock;
1967 struct smc_clc_msg_accept_confirm *cclc;
1968 struct smc_clc_msg_proposal_area *buf;
1969 struct smc_clc_msg_proposal *pclc;
1970 struct smc_init_info *ini = NULL;
1971 u8 proposal_version = SMC_V1;
1975 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
1976 return smc_listen_out_err(new_smc);
1978 if (new_smc->use_fallback) {
1979 smc_listen_out_connected(new_smc);
1983 /* check if peer is smc capable */
1984 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1985 rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
1987 smc_listen_out_err(new_smc);
1989 smc_listen_out_connected(new_smc);
1993 /* do inband token exchange -
1994 * wait for and receive SMC Proposal CLC message
1996 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
1998 rc = SMC_CLC_DECL_MEM;
2001 pclc = (struct smc_clc_msg_proposal *)buf;
2002 rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
2003 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
2007 if (pclc->hdr.version > SMC_V1)
2008 proposal_version = SMC_V2;
2010 /* IPSec connections opt out of SMC optimizations */
2011 if (using_ipsec(new_smc)) {
2012 rc = SMC_CLC_DECL_IPSEC;
2016 ini = kzalloc(sizeof(*ini), GFP_KERNEL);
2018 rc = SMC_CLC_DECL_MEM;
2022 /* initial version checking */
2023 rc = smc_listen_v2_check(new_smc, pclc, ini);
2027 mutex_lock(&smc_server_lgr_pending);
2028 smc_close_init(new_smc);
2029 smc_rx_init(new_smc);
2030 smc_tx_init(new_smc);
2032 /* determine ISM or RoCE device used for connection */
2033 rc = smc_listen_find_device(new_smc, pclc, ini);
2037 /* send SMC Accept CLC message */
2038 accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version;
2039 rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
2040 accept_version, ini->negotiated_eid);
2044 /* SMC-D does not need this lock any more */
2046 mutex_unlock(&smc_server_lgr_pending);
2048 /* receive SMC Confirm CLC message */
2049 memset(buf, 0, sizeof(*buf));
2050 cclc = (struct smc_clc_msg_accept_confirm *)buf;
2051 rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
2052 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
2060 if (!ini->is_smcd) {
2061 rc = smc_listen_rdma_finish(new_smc, cclc,
2062 ini->first_contact_local, ini);
2065 mutex_unlock(&smc_server_lgr_pending);
2067 smc_conn_save_peer_info(new_smc, cclc);
2068 smc_listen_out_connected(new_smc);
2069 SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
2073 mutex_unlock(&smc_server_lgr_pending);
2075 smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
2082 static void smc_tcp_listen_work(struct work_struct *work)
2084 struct smc_sock *lsmc = container_of(work, struct smc_sock,
2086 struct sock *lsk = &lsmc->sk;
2087 struct smc_sock *new_smc;
2091 while (lsk->sk_state == SMC_LISTEN) {
2092 rc = smc_clcsock_accept(lsmc, &new_smc);
2093 if (rc) /* clcsock accept queue empty or error */
2098 new_smc->listen_smc = lsmc;
2099 new_smc->use_fallback = lsmc->use_fallback;
2100 new_smc->fallback_rsn = lsmc->fallback_rsn;
2101 sock_hold(lsk); /* sock_put in smc_listen_work */
2102 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
2103 smc_copy_sock_settings_to_smc(new_smc);
2104 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
2105 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
2106 sock_hold(&new_smc->sk); /* sock_put in passive closing */
2107 if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
2108 sock_put(&new_smc->sk);
2113 sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
2116 static void smc_clcsock_data_ready(struct sock *listen_clcsock)
2118 struct smc_sock *lsmc;
2120 lsmc = (struct smc_sock *)
2121 ((uintptr_t)listen_clcsock->sk_user_data & ~SK_USER_DATA_NOCOPY);
2124 lsmc->clcsk_data_ready(listen_clcsock);
2125 if (lsmc->sk.sk_state == SMC_LISTEN) {
2126 sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
2127 if (!queue_work(smc_hs_wq, &lsmc->tcp_listen_work))
2128 sock_put(&lsmc->sk);
2132 static int smc_listen(struct socket *sock, int backlog)
2134 struct sock *sk = sock->sk;
2135 struct smc_sock *smc;
2142 if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
2143 smc->connect_nonblock)
2147 if (sk->sk_state == SMC_LISTEN) {
2148 sk->sk_max_ack_backlog = backlog;
2151 /* some socket options are handled in core, so we could not apply
2152 * them to the clc socket -- copy smc socket options to clc socket
2154 smc_copy_sock_settings_to_clc(smc);
2155 if (!smc->use_fallback)
2156 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
2158 /* save original sk_data_ready function and establish
2159 * smc-specific sk_data_ready function
2161 smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
2162 smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
2163 smc->clcsock->sk->sk_user_data =
2164 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
2165 rc = kernel_listen(smc->clcsock, backlog);
2167 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
2170 sk->sk_max_ack_backlog = backlog;
2171 sk->sk_ack_backlog = 0;
2172 sk->sk_state = SMC_LISTEN;
2179 static int smc_accept(struct socket *sock, struct socket *new_sock,
2180 int flags, bool kern)
2182 struct sock *sk = sock->sk, *nsk;
2183 DECLARE_WAITQUEUE(wait, current);
2184 struct smc_sock *lsmc;
2189 sock_hold(sk); /* sock_put below */
2192 if (lsmc->sk.sk_state != SMC_LISTEN) {
2198 /* Wait for an incoming connection */
2199 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2200 add_wait_queue_exclusive(sk_sleep(sk), &wait);
2201 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
2202 set_current_state(TASK_INTERRUPTIBLE);
2208 timeo = schedule_timeout(timeo);
2209 /* wakeup by sk_data_ready in smc_listen_work() */
2210 sched_annotate_sleep();
2212 if (signal_pending(current)) {
2213 rc = sock_intr_errno(timeo);
2217 set_current_state(TASK_RUNNING);
2218 remove_wait_queue(sk_sleep(sk), &wait);
2221 rc = sock_error(nsk);
2226 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
2227 /* wait till data arrives on the socket */
2228 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
2230 if (smc_sk(nsk)->use_fallback) {
2231 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
2234 if (skb_queue_empty(&clcsk->sk_receive_queue))
2235 sk_wait_data(clcsk, &timeo, NULL);
2236 release_sock(clcsk);
2237 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
2239 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
2245 sock_put(sk); /* sock_hold above */
2249 static int smc_getname(struct socket *sock, struct sockaddr *addr,
2252 struct smc_sock *smc;
2254 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
2255 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
2258 smc = smc_sk(sock->sk);
2260 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
2263 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2265 struct sock *sk = sock->sk;
2266 struct smc_sock *smc;
2271 if ((sk->sk_state != SMC_ACTIVE) &&
2272 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2273 (sk->sk_state != SMC_INIT))
2276 if (msg->msg_flags & MSG_FASTOPEN) {
2277 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2278 rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2287 if (smc->use_fallback) {
2288 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2290 rc = smc_tx_sendmsg(smc, msg, len);
2291 SMC_STAT_TX_PAYLOAD(smc, len, rc);
2298 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
2301 struct sock *sk = sock->sk;
2302 struct smc_sock *smc;
2307 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2308 /* socket was connected before, no more data to read */
2312 if ((sk->sk_state == SMC_INIT) ||
2313 (sk->sk_state == SMC_LISTEN) ||
2314 (sk->sk_state == SMC_CLOSED))
2317 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2322 if (smc->use_fallback) {
2323 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2325 msg->msg_namelen = 0;
2326 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2327 SMC_STAT_RX_PAYLOAD(smc, rc, rc);
2335 static __poll_t smc_accept_poll(struct sock *parent)
2337 struct smc_sock *isk = smc_sk(parent);
2340 spin_lock(&isk->accept_q_lock);
2341 if (!list_empty(&isk->accept_q))
2342 mask = EPOLLIN | EPOLLRDNORM;
2343 spin_unlock(&isk->accept_q_lock);
2348 static __poll_t smc_poll(struct file *file, struct socket *sock,
2351 struct sock *sk = sock->sk;
2352 struct smc_sock *smc;
2358 smc = smc_sk(sock->sk);
2359 if (smc->use_fallback) {
2360 /* delegate to CLC child sock */
2361 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2362 sk->sk_err = smc->clcsock->sk->sk_err;
2364 if (sk->sk_state != SMC_CLOSED)
2365 sock_poll_wait(file, sock, wait);
2368 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
2369 (sk->sk_state == SMC_CLOSED))
2371 if (sk->sk_state == SMC_LISTEN) {
2372 /* woken up by sk_data_ready in smc_listen_work() */
2373 mask |= smc_accept_poll(sk);
2374 } else if (smc->use_fallback) { /* as result of connect_work()*/
2375 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2377 sk->sk_err = smc->clcsock->sk->sk_err;
2379 if ((sk->sk_state != SMC_INIT &&
2380 atomic_read(&smc->conn.sndbuf_space)) ||
2381 sk->sk_shutdown & SEND_SHUTDOWN) {
2382 mask |= EPOLLOUT | EPOLLWRNORM;
2384 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2385 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2387 if (atomic_read(&smc->conn.bytes_to_rcv))
2388 mask |= EPOLLIN | EPOLLRDNORM;
2389 if (sk->sk_shutdown & RCV_SHUTDOWN)
2390 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2391 if (sk->sk_state == SMC_APPCLOSEWAIT1)
2393 if (smc->conn.urg_state == SMC_URG_VALID)
2401 static int smc_shutdown(struct socket *sock, int how)
2403 struct sock *sk = sock->sk;
2404 bool do_shutdown = true;
2405 struct smc_sock *smc;
2412 if ((how < SHUT_RD) || (how > SHUT_RDWR))
2418 if ((sk->sk_state != SMC_ACTIVE) &&
2419 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
2420 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
2421 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
2422 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
2423 (sk->sk_state != SMC_APPFINCLOSEWAIT))
2425 if (smc->use_fallback) {
2426 rc = kernel_sock_shutdown(smc->clcsock, how);
2427 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2428 if (sk->sk_shutdown == SHUTDOWN_MASK)
2429 sk->sk_state = SMC_CLOSED;
2433 case SHUT_RDWR: /* shutdown in both directions */
2434 old_state = sk->sk_state;
2435 rc = smc_close_active(smc);
2436 if (old_state == SMC_ACTIVE &&
2437 sk->sk_state == SMC_PEERCLOSEWAIT1)
2438 do_shutdown = false;
2441 rc = smc_close_shutdown_write(smc);
2445 /* nothing more to do because peer is not involved */
2448 if (do_shutdown && smc->clcsock)
2449 rc1 = kernel_sock_shutdown(smc->clcsock, how);
2450 /* map sock_shutdown_cmd constants to sk_shutdown value range */
2451 sk->sk_shutdown |= how + 1;
2455 return rc ? rc : rc1;
2458 static int smc_setsockopt(struct socket *sock, int level, int optname,
2459 sockptr_t optval, unsigned int optlen)
2461 struct sock *sk = sock->sk;
2462 struct smc_sock *smc;
2465 if (level == SOL_TCP && optname == TCP_ULP)
2470 /* generic setsockopts reaching us here always apply to the
2473 mutex_lock(&smc->clcsock_release_lock);
2474 if (!smc->clcsock) {
2475 mutex_unlock(&smc->clcsock_release_lock);
2478 if (unlikely(!smc->clcsock->ops->setsockopt))
2481 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
2483 if (smc->clcsock->sk->sk_err) {
2484 sk->sk_err = smc->clcsock->sk->sk_err;
2485 sk_error_report(sk);
2487 mutex_unlock(&smc->clcsock_release_lock);
2489 if (optlen < sizeof(int))
2491 if (copy_from_sockptr(&val, optval, sizeof(int)))
2495 if (rc || smc->use_fallback)
2499 case TCP_FASTOPEN_CONNECT:
2500 case TCP_FASTOPEN_KEY:
2501 case TCP_FASTOPEN_NO_COOKIE:
2502 /* option not supported by SMC */
2503 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2504 rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2510 if (sk->sk_state != SMC_INIT &&
2511 sk->sk_state != SMC_LISTEN &&
2512 sk->sk_state != SMC_CLOSED) {
2514 SMC_STAT_INC(smc, ndly_cnt);
2515 mod_delayed_work(smc->conn.lgr->tx_wq,
2516 &smc->conn.tx_work, 0);
2521 if (sk->sk_state != SMC_INIT &&
2522 sk->sk_state != SMC_LISTEN &&
2523 sk->sk_state != SMC_CLOSED) {
2525 SMC_STAT_INC(smc, cork_cnt);
2526 mod_delayed_work(smc->conn.lgr->tx_wq,
2527 &smc->conn.tx_work, 0);
2531 case TCP_DEFER_ACCEPT:
2532 smc->sockopt_defer_accept = val;
2543 static int smc_getsockopt(struct socket *sock, int level, int optname,
2544 char __user *optval, int __user *optlen)
2546 struct smc_sock *smc;
2549 smc = smc_sk(sock->sk);
2550 mutex_lock(&smc->clcsock_release_lock);
2551 if (!smc->clcsock) {
2552 mutex_unlock(&smc->clcsock_release_lock);
2555 /* socket options apply to the CLC socket */
2556 if (unlikely(!smc->clcsock->ops->getsockopt)) {
2557 mutex_unlock(&smc->clcsock_release_lock);
2560 rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
2562 mutex_unlock(&smc->clcsock_release_lock);
2566 static int smc_ioctl(struct socket *sock, unsigned int cmd,
2569 union smc_host_cursor cons, urg;
2570 struct smc_connection *conn;
2571 struct smc_sock *smc;
2574 smc = smc_sk(sock->sk);
2576 lock_sock(&smc->sk);
2577 if (smc->use_fallback) {
2578 if (!smc->clcsock) {
2579 release_sock(&smc->sk);
2582 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
2583 release_sock(&smc->sk);
2587 case SIOCINQ: /* same as FIONREAD */
2588 if (smc->sk.sk_state == SMC_LISTEN) {
2589 release_sock(&smc->sk);
2592 if (smc->sk.sk_state == SMC_INIT ||
2593 smc->sk.sk_state == SMC_CLOSED)
2596 answ = atomic_read(&smc->conn.bytes_to_rcv);
2599 /* output queue size (not send + not acked) */
2600 if (smc->sk.sk_state == SMC_LISTEN) {
2601 release_sock(&smc->sk);
2604 if (smc->sk.sk_state == SMC_INIT ||
2605 smc->sk.sk_state == SMC_CLOSED)
2608 answ = smc->conn.sndbuf_desc->len -
2609 atomic_read(&smc->conn.sndbuf_space);
2612 /* output queue size (not send only) */
2613 if (smc->sk.sk_state == SMC_LISTEN) {
2614 release_sock(&smc->sk);
2617 if (smc->sk.sk_state == SMC_INIT ||
2618 smc->sk.sk_state == SMC_CLOSED)
2621 answ = smc_tx_prepared_sends(&smc->conn);
2624 if (smc->sk.sk_state == SMC_LISTEN) {
2625 release_sock(&smc->sk);
2628 if (smc->sk.sk_state == SMC_INIT ||
2629 smc->sk.sk_state == SMC_CLOSED) {
2632 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
2633 smc_curs_copy(&urg, &conn->urg_curs, conn);
2634 answ = smc_curs_diff(conn->rmb_desc->len,
2639 release_sock(&smc->sk);
2640 return -ENOIOCTLCMD;
2642 release_sock(&smc->sk);
2644 return put_user(answ, (int __user *)arg);
2647 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
2648 int offset, size_t size, int flags)
2650 struct sock *sk = sock->sk;
2651 struct smc_sock *smc;
2656 if (sk->sk_state != SMC_ACTIVE) {
2661 if (smc->use_fallback) {
2662 rc = kernel_sendpage(smc->clcsock, page, offset,
2665 SMC_STAT_INC(smc, sendpage_cnt);
2666 rc = sock_no_sendpage(sock, page, offset, size, flags);
2673 /* Map the affected portions of the rmbe into an spd, note the number of bytes
2674 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
2675 * updates till whenever a respective page has been fully processed.
2676 * Note that subsequent recv() calls have to wait till all splice() processing
2679 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
2680 struct pipe_inode_info *pipe, size_t len,
2683 struct sock *sk = sock->sk;
2684 struct smc_sock *smc;
2689 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
2690 /* socket was connected before, no more data to read */
2694 if (sk->sk_state == SMC_INIT ||
2695 sk->sk_state == SMC_LISTEN ||
2696 sk->sk_state == SMC_CLOSED)
2699 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
2704 if (smc->use_fallback) {
2705 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
2712 if (flags & SPLICE_F_NONBLOCK)
2713 flags = MSG_DONTWAIT;
2716 SMC_STAT_INC(smc, splice_cnt);
2717 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
2725 /* must look like tcp */
2726 static const struct proto_ops smc_sock_ops = {
2728 .owner = THIS_MODULE,
2729 .release = smc_release,
2731 .connect = smc_connect,
2732 .socketpair = sock_no_socketpair,
2733 .accept = smc_accept,
2734 .getname = smc_getname,
2737 .listen = smc_listen,
2738 .shutdown = smc_shutdown,
2739 .setsockopt = smc_setsockopt,
2740 .getsockopt = smc_getsockopt,
2741 .sendmsg = smc_sendmsg,
2742 .recvmsg = smc_recvmsg,
2743 .mmap = sock_no_mmap,
2744 .sendpage = smc_sendpage,
2745 .splice_read = smc_splice_read,
2748 static int __smc_create(struct net *net, struct socket *sock, int protocol,
2749 int kern, struct socket *clcsock)
2751 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
2752 struct smc_sock *smc;
2756 rc = -ESOCKTNOSUPPORT;
2757 if (sock->type != SOCK_STREAM)
2760 rc = -EPROTONOSUPPORT;
2761 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
2765 sock->ops = &smc_sock_ops;
2766 sk = smc_sock_alloc(net, sock, protocol);
2770 /* create internal TCP socket for CLC handshake and fallback */
2772 smc->use_fallback = false; /* assume rdma capability first */
2773 smc->fallback_rsn = 0;
2777 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
2780 sk_common_release(sk);
2784 smc->clcsock = clcsock;
2787 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
2788 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
2794 static int smc_create(struct net *net, struct socket *sock, int protocol,
2797 return __smc_create(net, sock, protocol, kern, NULL);
2800 static const struct net_proto_family smc_sock_family_ops = {
2802 .owner = THIS_MODULE,
2803 .create = smc_create,
2806 static int smc_ulp_init(struct sock *sk)
2808 struct socket *tcp = sk->sk_socket;
2809 struct net *net = sock_net(sk);
2810 struct socket *smcsock;
2813 /* only TCP can be replaced */
2814 if (tcp->type != SOCK_STREAM || sk->sk_protocol != IPPROTO_TCP ||
2815 (sk->sk_family != AF_INET && sk->sk_family != AF_INET6))
2816 return -ESOCKTNOSUPPORT;
2817 /* don't handle wq now */
2818 if (tcp->state != SS_UNCONNECTED || !tcp->file || tcp->wq.fasync_list)
2821 if (sk->sk_family == AF_INET)
2822 protocol = SMCPROTO_SMC;
2824 protocol = SMCPROTO_SMC6;
2826 smcsock = sock_alloc();
2830 smcsock->type = SOCK_STREAM;
2831 __module_get(THIS_MODULE); /* tried in __tcp_ulp_find_autoload */
2832 ret = __smc_create(net, smcsock, protocol, 1, tcp);
2834 sock_release(smcsock); /* module_put() which ops won't be NULL */
2838 /* replace tcp socket to smc */
2839 smcsock->file = tcp->file;
2840 smcsock->file->private_data = smcsock;
2841 smcsock->file->f_inode = SOCK_INODE(smcsock); /* replace inode when sock_close */
2842 smcsock->file->f_path.dentry->d_inode = SOCK_INODE(smcsock); /* dput() in __fput */
2848 static void smc_ulp_clone(const struct request_sock *req, struct sock *newsk,
2849 const gfp_t priority)
2851 struct inet_connection_sock *icsk = inet_csk(newsk);
2853 /* don't inherit ulp ops to child when listen */
2854 icsk->icsk_ulp_ops = NULL;
2857 static struct tcp_ulp_ops smc_ulp_ops __read_mostly = {
2859 .owner = THIS_MODULE,
2860 .init = smc_ulp_init,
2861 .clone = smc_ulp_clone,
2864 unsigned int smc_net_id;
2866 static __net_init int smc_net_init(struct net *net)
2868 return smc_pnet_net_init(net);
2871 static void __net_exit smc_net_exit(struct net *net)
2873 smc_pnet_net_exit(net);
2876 static __net_init int smc_net_stat_init(struct net *net)
2878 return smc_stats_init(net);
2881 static void __net_exit smc_net_stat_exit(struct net *net)
2883 smc_stats_exit(net);
2886 static struct pernet_operations smc_net_ops = {
2887 .init = smc_net_init,
2888 .exit = smc_net_exit,
2890 .size = sizeof(struct smc_net),
2893 static struct pernet_operations smc_net_stat_ops = {
2894 .init = smc_net_stat_init,
2895 .exit = smc_net_stat_exit,
2898 static int __init smc_init(void)
2902 rc = register_pernet_subsys(&smc_net_ops);
2906 rc = register_pernet_subsys(&smc_net_stat_ops);
2915 goto out_pernet_subsys;
2917 rc = smc_pnet_init();
2922 smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
2926 smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
2928 goto out_alloc_hs_wq;
2930 rc = smc_core_init();
2932 pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
2936 rc = smc_llc_init();
2938 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
2942 rc = smc_cdc_init();
2944 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
2948 rc = proto_register(&smc_proto, 1);
2950 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
2954 rc = proto_register(&smc_proto6, 1);
2956 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
2960 rc = sock_register(&smc_sock_family_ops);
2962 pr_err("%s: sock_register fails with %d\n", __func__, rc);
2965 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
2966 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
2968 rc = smc_ib_register_client();
2970 pr_err("%s: ib_register fails with %d\n", __func__, rc);
2974 rc = tcp_register_ulp(&smc_ulp_ops);
2976 pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
2980 static_branch_enable(&tcp_have_smc);
2984 sock_unregister(PF_SMC);
2986 proto_unregister(&smc_proto6);
2988 proto_unregister(&smc_proto);
2992 destroy_workqueue(smc_close_wq);
2994 destroy_workqueue(smc_hs_wq);
3000 unregister_pernet_subsys(&smc_net_ops);
3005 static void __exit smc_exit(void)
3007 static_branch_disable(&tcp_have_smc);
3008 tcp_unregister_ulp(&smc_ulp_ops);
3009 sock_unregister(PF_SMC);
3011 smc_ib_unregister_client();
3012 destroy_workqueue(smc_close_wq);
3013 destroy_workqueue(smc_hs_wq);
3014 proto_unregister(&smc_proto6);
3015 proto_unregister(&smc_proto);
3019 unregister_pernet_subsys(&smc_net_stat_ops);
3020 unregister_pernet_subsys(&smc_net_ops);
3024 module_init(smc_init);
3025 module_exit(smc_exit);
3027 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
3028 MODULE_DESCRIPTION("smc socket address family");
3029 MODULE_LICENSE("GPL");
3030 MODULE_ALIAS_NETPROTO(PF_SMC);
3031 MODULE_ALIAS_TCP_ULP("smc");