2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
5 * applies to SOCK_STREAM sockets only
6 * offers an alternative communication option for TCP-protocol sockets
7 * applicable with RoCE-cards only
9 * Initial restrictions:
10 * - support for alternate links postponed
12 * Copyright IBM Corp. 2016, 2018
14 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 * based on prototype from Frank Blaschka
18 #define KMSG_COMPONENT "smc"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/module.h>
22 #include <linux/socket.h>
23 #include <linux/workqueue.h>
25 #include <linux/sched/signal.h>
30 #include <asm/ioctls.h>
41 #include "smc_close.h"
43 static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group
47 static void smc_tcp_listen_work(struct work_struct *);
48 static void smc_connect_work(struct work_struct *);
50 static void smc_set_keepalive(struct sock *sk, int val)
52 struct smc_sock *smc = smc_sk(sk);
54 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
57 static struct smc_hashinfo smc_v4_hashinfo = {
58 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
61 static struct smc_hashinfo smc_v6_hashinfo = {
62 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
65 int smc_hash_sk(struct sock *sk)
67 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
68 struct hlist_head *head;
72 write_lock_bh(&h->lock);
73 sk_add_node(sk, head);
74 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
75 write_unlock_bh(&h->lock);
79 EXPORT_SYMBOL_GPL(smc_hash_sk);
81 void smc_unhash_sk(struct sock *sk)
83 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
85 write_lock_bh(&h->lock);
86 if (sk_del_node_init(sk))
87 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
88 write_unlock_bh(&h->lock);
90 EXPORT_SYMBOL_GPL(smc_unhash_sk);
92 struct proto smc_proto = {
95 .keepalive = smc_set_keepalive,
97 .unhash = smc_unhash_sk,
98 .obj_size = sizeof(struct smc_sock),
99 .h.smc_hash = &smc_v4_hashinfo,
100 .slab_flags = SLAB_TYPESAFE_BY_RCU,
102 EXPORT_SYMBOL_GPL(smc_proto);
104 struct proto smc_proto6 = {
106 .owner = THIS_MODULE,
107 .keepalive = smc_set_keepalive,
109 .unhash = smc_unhash_sk,
110 .obj_size = sizeof(struct smc_sock),
111 .h.smc_hash = &smc_v6_hashinfo,
112 .slab_flags = SLAB_TYPESAFE_BY_RCU,
114 EXPORT_SYMBOL_GPL(smc_proto6);
116 static int smc_release(struct socket *sock)
118 struct sock *sk = sock->sk;
119 struct smc_sock *smc;
127 /* cleanup for a dangling non-blocking connect */
128 flush_work(&smc->connect_work);
129 kfree(smc->connect_info);
130 smc->connect_info = NULL;
132 if (sk->sk_state == SMC_LISTEN)
133 /* smc_close_non_accepted() is called and acquires
134 * sock lock for child sockets again
136 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
140 if (!smc->use_fallback) {
141 rc = smc_close_active(smc);
142 sock_set_flag(sk, SOCK_DEAD);
143 sk->sk_shutdown |= SHUTDOWN_MASK;
146 sock_release(smc->clcsock);
149 if (smc->use_fallback) {
150 if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
151 sock_put(sk); /* passive closing */
152 sk->sk_state = SMC_CLOSED;
153 sk->sk_state_change(sk);
159 if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
160 smc_conn_free(&smc->conn);
163 sk->sk_prot->unhash(sk);
164 sock_put(sk); /* final sock_put */
169 static void smc_destruct(struct sock *sk)
171 if (sk->sk_state != SMC_CLOSED)
173 if (!sock_flag(sk, SOCK_DEAD))
176 sk_refcnt_debug_dec(sk);
179 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
182 struct smc_sock *smc;
186 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
187 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
191 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
192 sk->sk_state = SMC_INIT;
193 sk->sk_destruct = smc_destruct;
194 sk->sk_protocol = protocol;
196 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
197 INIT_WORK(&smc->connect_work, smc_connect_work);
198 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
199 INIT_LIST_HEAD(&smc->accept_q);
200 spin_lock_init(&smc->accept_q_lock);
201 spin_lock_init(&smc->conn.send_lock);
202 sk->sk_prot->hash(sk);
203 sk_refcnt_debug_inc(sk);
208 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
211 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
212 struct sock *sk = sock->sk;
213 struct smc_sock *smc;
218 /* replicate tests from inet_bind(), to be safe wrt. future changes */
220 if (addr_len < sizeof(struct sockaddr_in))
224 if (addr->sin_family != AF_INET &&
225 addr->sin_family != AF_INET6 &&
226 addr->sin_family != AF_UNSPEC)
228 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
229 if (addr->sin_family == AF_UNSPEC &&
230 addr->sin_addr.s_addr != htonl(INADDR_ANY))
235 /* Check if socket is already active */
237 if (sk->sk_state != SMC_INIT)
240 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
241 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
249 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
252 /* options we don't get control via setsockopt for */
253 nsk->sk_type = osk->sk_type;
254 nsk->sk_sndbuf = osk->sk_sndbuf;
255 nsk->sk_rcvbuf = osk->sk_rcvbuf;
256 nsk->sk_sndtimeo = osk->sk_sndtimeo;
257 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
258 nsk->sk_mark = osk->sk_mark;
259 nsk->sk_priority = osk->sk_priority;
260 nsk->sk_rcvlowat = osk->sk_rcvlowat;
261 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
262 nsk->sk_err = osk->sk_err;
264 nsk->sk_flags &= ~mask;
265 nsk->sk_flags |= osk->sk_flags & mask;
268 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
269 (1UL << SOCK_KEEPOPEN) | \
270 (1UL << SOCK_LINGER) | \
271 (1UL << SOCK_BROADCAST) | \
272 (1UL << SOCK_TIMESTAMP) | \
273 (1UL << SOCK_DBG) | \
274 (1UL << SOCK_RCVTSTAMP) | \
275 (1UL << SOCK_RCVTSTAMPNS) | \
276 (1UL << SOCK_LOCALROUTE) | \
277 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
278 (1UL << SOCK_RXQ_OVFL) | \
279 (1UL << SOCK_WIFI_STATUS) | \
280 (1UL << SOCK_NOFCS) | \
281 (1UL << SOCK_FILTER_LOCKED))
282 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
283 * clc socket (since smc is not called for these options from net/core)
285 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
287 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
290 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
291 (1UL << SOCK_KEEPOPEN) | \
292 (1UL << SOCK_LINGER) | \
294 /* copy only settings and flags relevant for smc from clc to smc socket */
295 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
297 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
300 /* register a new rmb, optionally send confirm_rkey msg to register with peer */
301 static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc,
304 /* register memory region for new rmb */
305 if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
306 rmb_desc->regerr = 1;
311 /* exchange confirm_rkey msg with peer */
312 if (smc_llc_do_confirm_rkey(link, rmb_desc)) {
313 rmb_desc->regerr = 1;
319 static int smc_clnt_conf_first_link(struct smc_sock *smc)
321 struct net *net = sock_net(smc->clcsock->sk);
322 struct smc_link_group *lgr = smc->conn.lgr;
323 struct smc_link *link;
327 link = &lgr->lnk[SMC_SINGLE_LINK];
328 /* receive CONFIRM LINK request from server over RoCE fabric */
329 rest = wait_for_completion_interruptible_timeout(
331 SMC_LLC_WAIT_FIRST_TIME);
333 struct smc_clc_msg_decline dclc;
335 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
340 if (link->llc_confirm_rc)
341 return SMC_CLC_DECL_RMBE_EC;
343 rc = smc_ib_modify_qp_rts(link);
345 return SMC_CLC_DECL_INTERR;
347 smc_wr_remember_qp_attr(link);
349 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
350 return SMC_CLC_DECL_INTERR;
352 /* send CONFIRM LINK response over RoCE fabric */
353 rc = smc_llc_send_confirm_link(link,
354 link->smcibdev->mac[link->ibport - 1],
355 &link->smcibdev->gid[link->ibport - 1],
358 return SMC_CLC_DECL_TCL;
360 /* receive ADD LINK request from server over RoCE fabric */
361 rest = wait_for_completion_interruptible_timeout(&link->llc_add,
364 struct smc_clc_msg_decline dclc;
366 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
371 /* send add link reject message, only one link supported for now */
372 rc = smc_llc_send_add_link(link,
373 link->smcibdev->mac[link->ibport - 1],
374 &link->smcibdev->gid[link->ibport - 1],
377 return SMC_CLC_DECL_TCL;
379 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
384 static void smc_conn_save_peer_info(struct smc_sock *smc,
385 struct smc_clc_msg_accept_confirm *clc)
387 int bufsize = smc_uncompress_bufsize(clc->rmbe_size);
389 smc->conn.peer_rmbe_idx = clc->rmbe_idx;
390 smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
391 smc->conn.peer_rmbe_size = bufsize;
392 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
393 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
396 static void smc_link_save_peer_info(struct smc_link *link,
397 struct smc_clc_msg_accept_confirm *clc)
399 link->peer_qpn = ntoh24(clc->qpn);
400 memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
401 memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
402 link->peer_psn = ntoh24(clc->psn);
403 link->peer_mtu = clc->qp_mtu;
406 /* fall back during connect */
407 static int smc_connect_fallback(struct smc_sock *smc)
409 smc->use_fallback = true;
410 smc_copy_sock_settings_to_clc(smc);
411 if (smc->sk.sk_state == SMC_INIT)
412 smc->sk.sk_state = SMC_ACTIVE;
416 /* decline and fall back during connect */
417 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
421 if (reason_code < 0) { /* error, fallback is not possible */
422 if (smc->sk.sk_state == SMC_INIT)
423 sock_put(&smc->sk); /* passive closing */
426 if (reason_code != SMC_CLC_DECL_REPLY) {
427 rc = smc_clc_send_decline(smc, reason_code);
429 if (smc->sk.sk_state == SMC_INIT)
430 sock_put(&smc->sk); /* passive closing */
434 return smc_connect_fallback(smc);
437 /* abort connecting */
438 static int smc_connect_abort(struct smc_sock *smc, int reason_code,
441 if (local_contact == SMC_FIRST_CONTACT)
442 smc_lgr_forget(smc->conn.lgr);
443 mutex_unlock(&smc_create_lgr_pending);
444 smc_conn_free(&smc->conn);
448 /* check if there is a rdma device available for this connection. */
449 /* called for connect and listen */
450 static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev,
455 /* PNET table look up: search active ib_device and port
456 * within same PNETID that also contains the ethernet device
457 * used for the internal TCP socket
459 smc_pnet_find_roce_resource(smc->clcsock->sk, ibdev, ibport);
461 reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
466 /* CLC handshake during connect */
467 static int smc_connect_clc(struct smc_sock *smc,
468 struct smc_clc_msg_accept_confirm *aclc,
469 struct smc_ib_device *ibdev, u8 ibport)
473 /* do inband token exchange */
474 rc = smc_clc_send_proposal(smc, ibdev, ibport);
477 /* receive SMC Accept CLC message */
478 return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT);
481 /* setup for RDMA connection of client */
482 static int smc_connect_rdma(struct smc_sock *smc,
483 struct smc_clc_msg_accept_confirm *aclc,
484 struct smc_ib_device *ibdev, u8 ibport)
486 int local_contact = SMC_FIRST_CONTACT;
487 struct smc_link *link;
490 mutex_lock(&smc_create_lgr_pending);
491 local_contact = smc_conn_create(smc, ibdev, ibport, &aclc->lcl,
493 if (local_contact < 0) {
494 if (local_contact == -ENOMEM)
495 reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
496 else if (local_contact == -ENOLINK)
497 reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
499 reason_code = SMC_CLC_DECL_INTERR; /* other error */
500 return smc_connect_abort(smc, reason_code, 0);
502 link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
504 smc_conn_save_peer_info(smc, aclc);
506 /* create send buffer and rmb */
507 if (smc_buf_create(smc))
508 return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact);
510 if (local_contact == SMC_FIRST_CONTACT)
511 smc_link_save_peer_info(link, aclc);
513 if (smc_rmb_rtoken_handling(&smc->conn, aclc))
514 return smc_connect_abort(smc, SMC_CLC_DECL_INTERR,
520 if (local_contact == SMC_FIRST_CONTACT) {
521 if (smc_ib_ready_link(link))
522 return smc_connect_abort(smc, SMC_CLC_DECL_INTERR,
525 if (!smc->conn.rmb_desc->reused &&
526 smc_reg_rmb(link, smc->conn.rmb_desc, true))
527 return smc_connect_abort(smc, SMC_CLC_DECL_INTERR,
530 smc_rmb_sync_sg_for_device(&smc->conn);
532 reason_code = smc_clc_send_confirm(smc);
534 return smc_connect_abort(smc, reason_code, local_contact);
538 if (local_contact == SMC_FIRST_CONTACT) {
539 /* QP confirmation over RoCE fabric */
540 reason_code = smc_clnt_conf_first_link(smc);
542 return smc_connect_abort(smc, reason_code,
545 mutex_unlock(&smc_create_lgr_pending);
547 smc_copy_sock_settings_to_clc(smc);
548 if (smc->sk.sk_state == SMC_INIT)
549 smc->sk.sk_state = SMC_ACTIVE;
554 /* perform steps before actually connecting */
555 static int __smc_connect(struct smc_sock *smc)
557 struct smc_clc_msg_accept_confirm aclc;
558 struct smc_ib_device *ibdev;
562 sock_hold(&smc->sk); /* sock put in passive closing */
564 if (smc->use_fallback)
565 return smc_connect_fallback(smc);
567 /* if peer has not signalled SMC-capability, fall back */
568 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
569 return smc_connect_fallback(smc);
571 /* IPSec connections opt out of SMC-R optimizations */
572 if (using_ipsec(smc))
573 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
575 /* check if a RDMA device is available; if not, fall back */
576 if (smc_check_rdma(smc, &ibdev, &ibport))
577 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR);
579 /* perform CLC handshake */
580 rc = smc_connect_clc(smc, &aclc, ibdev, ibport);
582 return smc_connect_decline_fallback(smc, rc);
584 /* connect using rdma */
585 rc = smc_connect_rdma(smc, &aclc, ibdev, ibport);
587 return smc_connect_decline_fallback(smc, rc);
592 static void smc_connect_work(struct work_struct *work)
594 struct smc_sock *smc = container_of(work, struct smc_sock,
599 rc = kernel_connect(smc->clcsock, &smc->connect_info->addr,
600 smc->connect_info->alen, smc->connect_info->flags);
601 if (smc->clcsock->sk->sk_err) {
602 smc->sk.sk_err = smc->clcsock->sk->sk_err;
606 smc->sk.sk_err = -rc;
610 rc = __smc_connect(smc);
612 smc->sk.sk_err = -rc;
615 smc->sk.sk_state_change(&smc->sk);
616 kfree(smc->connect_info);
617 smc->connect_info = NULL;
618 release_sock(&smc->sk);
621 static int smc_connect(struct socket *sock, struct sockaddr *addr,
624 struct sock *sk = sock->sk;
625 struct smc_sock *smc;
630 /* separate smc parameter checking to be safe */
631 if (alen < sizeof(addr->sa_family))
633 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
637 switch (sk->sk_state) {
648 smc_copy_sock_settings_to_clc(smc);
649 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
650 if (flags & O_NONBLOCK) {
651 if (smc->connect_info) {
655 smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL);
656 if (!smc->connect_info) {
660 smc->connect_info->alen = alen;
661 smc->connect_info->flags = flags ^ O_NONBLOCK;
662 memcpy(&smc->connect_info->addr, addr, alen);
663 schedule_work(&smc->connect_work);
666 rc = kernel_connect(smc->clcsock, addr, alen, flags);
670 rc = __smc_connect(smc);
674 rc = 0; /* success cases including fallback */
683 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
685 struct socket *new_clcsock = NULL;
686 struct sock *lsk = &lsmc->sk;
691 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
694 lsk->sk_err = ENOMEM;
699 *new_smc = smc_sk(new_sk);
701 rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
705 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
707 sock_release(new_clcsock);
708 new_sk->sk_state = SMC_CLOSED;
709 sock_set_flag(new_sk, SOCK_DEAD);
710 new_sk->sk_prot->unhash(new_sk);
711 sock_put(new_sk); /* final */
716 (*new_smc)->clcsock = new_clcsock;
721 /* add a just created sock to the accept queue of the listen sock as
722 * candidate for a following socket accept call from user space
724 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
726 struct smc_sock *par = smc_sk(parent);
728 sock_hold(sk); /* sock_put in smc_accept_unlink () */
729 spin_lock(&par->accept_q_lock);
730 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
731 spin_unlock(&par->accept_q_lock);
732 sk_acceptq_added(parent);
735 /* remove a socket from the accept queue of its parental listening socket */
736 static void smc_accept_unlink(struct sock *sk)
738 struct smc_sock *par = smc_sk(sk)->listen_smc;
740 spin_lock(&par->accept_q_lock);
741 list_del_init(&smc_sk(sk)->accept_q);
742 spin_unlock(&par->accept_q_lock);
743 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
744 sock_put(sk); /* sock_hold in smc_accept_enqueue */
747 /* remove a sock from the accept queue to bind it to a new socket created
748 * for a socket accept call from user space
750 struct sock *smc_accept_dequeue(struct sock *parent,
751 struct socket *new_sock)
753 struct smc_sock *isk, *n;
756 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
757 new_sk = (struct sock *)isk;
759 smc_accept_unlink(new_sk);
760 if (new_sk->sk_state == SMC_CLOSED) {
762 sock_release(isk->clcsock);
765 new_sk->sk_prot->unhash(new_sk);
766 sock_put(new_sk); /* final */
770 sock_graft(new_sk, new_sock);
776 /* clean up for a created but never accepted sock */
777 void smc_close_non_accepted(struct sock *sk)
779 struct smc_sock *smc = smc_sk(sk);
782 if (!sk->sk_lingertime)
783 /* wait for peer closing */
784 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
785 if (!smc->use_fallback) {
786 smc_close_active(smc);
787 sock_set_flag(sk, SOCK_DEAD);
788 sk->sk_shutdown |= SHUTDOWN_MASK;
797 if (smc->use_fallback) {
798 sock_put(sk); /* passive closing */
799 sk->sk_state = SMC_CLOSED;
801 if (sk->sk_state == SMC_CLOSED)
802 smc_conn_free(&smc->conn);
805 sk->sk_prot->unhash(sk);
806 sock_put(sk); /* final sock_put */
809 static int smc_serv_conf_first_link(struct smc_sock *smc)
811 struct net *net = sock_net(smc->clcsock->sk);
812 struct smc_link_group *lgr = smc->conn.lgr;
813 struct smc_link *link;
817 link = &lgr->lnk[SMC_SINGLE_LINK];
819 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
820 return SMC_CLC_DECL_INTERR;
822 /* send CONFIRM LINK request to client over the RoCE fabric */
823 rc = smc_llc_send_confirm_link(link,
824 link->smcibdev->mac[link->ibport - 1],
825 &link->smcibdev->gid[link->ibport - 1],
828 return SMC_CLC_DECL_TCL;
830 /* receive CONFIRM LINK response from client over the RoCE fabric */
831 rest = wait_for_completion_interruptible_timeout(
832 &link->llc_confirm_resp,
833 SMC_LLC_WAIT_FIRST_TIME);
835 struct smc_clc_msg_decline dclc;
837 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
842 if (link->llc_confirm_resp_rc)
843 return SMC_CLC_DECL_RMBE_EC;
845 /* send ADD LINK request to client over the RoCE fabric */
846 rc = smc_llc_send_add_link(link,
847 link->smcibdev->mac[link->ibport - 1],
848 &link->smcibdev->gid[link->ibport - 1],
851 return SMC_CLC_DECL_TCL;
853 /* receive ADD LINK response from client over the RoCE fabric */
854 rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp,
857 struct smc_clc_msg_decline dclc;
859 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
864 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
869 /* listen worker: finish */
870 static void smc_listen_out(struct smc_sock *new_smc)
872 struct smc_sock *lsmc = new_smc->listen_smc;
873 struct sock *newsmcsk = &new_smc->sk;
875 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
876 if (lsmc->sk.sk_state == SMC_LISTEN) {
877 smc_accept_enqueue(&lsmc->sk, newsmcsk);
878 } else { /* no longer listening */
879 smc_close_non_accepted(newsmcsk);
881 release_sock(&lsmc->sk);
884 lsmc->sk.sk_data_ready(&lsmc->sk);
885 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
888 /* listen worker: finish in state connected */
889 static void smc_listen_out_connected(struct smc_sock *new_smc)
891 struct sock *newsmcsk = &new_smc->sk;
893 sk_refcnt_debug_inc(newsmcsk);
894 if (newsmcsk->sk_state == SMC_INIT)
895 newsmcsk->sk_state = SMC_ACTIVE;
897 smc_listen_out(new_smc);
900 /* listen worker: finish in error state */
901 static void smc_listen_out_err(struct smc_sock *new_smc)
903 struct sock *newsmcsk = &new_smc->sk;
905 if (newsmcsk->sk_state == SMC_INIT)
906 sock_put(&new_smc->sk); /* passive closing */
907 newsmcsk->sk_state = SMC_CLOSED;
908 smc_conn_free(&new_smc->conn);
910 smc_listen_out(new_smc);
913 /* listen worker: decline and fall back if possible */
914 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
917 /* RDMA setup failed, switch back to TCP */
918 if (local_contact == SMC_FIRST_CONTACT)
919 smc_lgr_forget(new_smc->conn.lgr);
920 if (reason_code < 0) { /* error, no fallback possible */
921 smc_listen_out_err(new_smc);
924 smc_conn_free(&new_smc->conn);
925 new_smc->use_fallback = true;
926 if (reason_code && reason_code != SMC_CLC_DECL_REPLY) {
927 if (smc_clc_send_decline(new_smc, reason_code) < 0) {
928 smc_listen_out_err(new_smc);
932 smc_listen_out_connected(new_smc);
935 /* listen worker: check prefixes */
936 static int smc_listen_rdma_check(struct smc_sock *new_smc,
937 struct smc_clc_msg_proposal *pclc)
939 struct smc_clc_msg_proposal_prefix *pclc_prfx;
940 struct socket *newclcsock = new_smc->clcsock;
942 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
943 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
944 return SMC_CLC_DECL_CNFERR;
949 /* listen worker: initialize connection and buffers */
950 static int smc_listen_rdma_init(struct smc_sock *new_smc,
951 struct smc_clc_msg_proposal *pclc,
952 struct smc_ib_device *ibdev, u8 ibport,
955 /* allocate connection / link group */
956 *local_contact = smc_conn_create(new_smc, ibdev, ibport, &pclc->lcl, 0);
957 if (*local_contact < 0) {
958 if (*local_contact == -ENOMEM)
959 return SMC_CLC_DECL_MEM;/* insufficient memory*/
960 return SMC_CLC_DECL_INTERR; /* other error */
963 /* create send buffer and rmb */
964 if (smc_buf_create(new_smc))
965 return SMC_CLC_DECL_MEM;
970 /* listen worker: register buffers */
971 static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
973 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
975 if (local_contact != SMC_FIRST_CONTACT) {
976 if (!new_smc->conn.rmb_desc->reused) {
977 if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
978 return SMC_CLC_DECL_INTERR;
981 smc_rmb_sync_sg_for_device(&new_smc->conn);
986 /* listen worker: finish RDMA setup */
987 static void smc_listen_rdma_finish(struct smc_sock *new_smc,
988 struct smc_clc_msg_accept_confirm *cclc,
991 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
994 if (local_contact == SMC_FIRST_CONTACT)
995 smc_link_save_peer_info(link, cclc);
997 if (smc_rmb_rtoken_handling(&new_smc->conn, cclc)) {
998 reason_code = SMC_CLC_DECL_INTERR;
1002 if (local_contact == SMC_FIRST_CONTACT) {
1003 if (smc_ib_ready_link(link)) {
1004 reason_code = SMC_CLC_DECL_INTERR;
1007 /* QP confirmation over RoCE fabric */
1008 reason_code = smc_serv_conf_first_link(new_smc);
1015 mutex_unlock(&smc_create_lgr_pending);
1016 smc_listen_decline(new_smc, reason_code, local_contact);
1019 /* setup for RDMA connection of server */
1020 static void smc_listen_work(struct work_struct *work)
1022 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1024 struct socket *newclcsock = new_smc->clcsock;
1025 struct smc_clc_msg_accept_confirm cclc;
1026 struct smc_clc_msg_proposal *pclc;
1027 struct smc_ib_device *ibdev;
1028 u8 buf[SMC_CLC_MAX_LEN];
1029 int local_contact = 0;
1030 int reason_code = 0;
1034 if (new_smc->use_fallback) {
1035 smc_listen_out_connected(new_smc);
1039 /* check if peer is smc capable */
1040 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1041 new_smc->use_fallback = true;
1042 smc_listen_out_connected(new_smc);
1046 /* do inband token exchange -
1047 * wait for and receive SMC Proposal CLC message
1049 pclc = (struct smc_clc_msg_proposal *)&buf;
1050 reason_code = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
1053 smc_listen_decline(new_smc, reason_code, 0);
1057 /* IPSec connections opt out of SMC-R optimizations */
1058 if (using_ipsec(new_smc)) {
1059 smc_listen_decline(new_smc, SMC_CLC_DECL_IPSEC, 0);
1063 mutex_lock(&smc_create_lgr_pending);
1064 smc_close_init(new_smc);
1065 smc_rx_init(new_smc);
1066 smc_tx_init(new_smc);
1068 /* check if RDMA is available */
1069 if (smc_check_rdma(new_smc, &ibdev, &ibport) ||
1070 smc_listen_rdma_check(new_smc, pclc) ||
1071 smc_listen_rdma_init(new_smc, pclc, ibdev, ibport,
1073 smc_listen_rdma_reg(new_smc, local_contact)) {
1074 /* SMC not supported, decline */
1075 mutex_unlock(&smc_create_lgr_pending);
1076 smc_listen_decline(new_smc, SMC_CLC_DECL_CNFERR, local_contact);
1080 /* send SMC Accept CLC message */
1081 rc = smc_clc_send_accept(new_smc, local_contact);
1083 mutex_unlock(&smc_create_lgr_pending);
1084 smc_listen_decline(new_smc, rc, local_contact);
1088 /* receive SMC Confirm CLC message */
1089 reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
1092 mutex_unlock(&smc_create_lgr_pending);
1093 smc_listen_decline(new_smc, reason_code, local_contact);
1098 smc_listen_rdma_finish(new_smc, &cclc, local_contact);
1099 smc_conn_save_peer_info(new_smc, &cclc);
1100 mutex_unlock(&smc_create_lgr_pending);
1101 smc_listen_out_connected(new_smc);
1104 static void smc_tcp_listen_work(struct work_struct *work)
1106 struct smc_sock *lsmc = container_of(work, struct smc_sock,
1108 struct sock *lsk = &lsmc->sk;
1109 struct smc_sock *new_smc;
1113 while (lsk->sk_state == SMC_LISTEN) {
1114 rc = smc_clcsock_accept(lsmc, &new_smc);
1120 new_smc->listen_smc = lsmc;
1121 new_smc->use_fallback = lsmc->use_fallback;
1122 sock_hold(lsk); /* sock_put in smc_listen_work */
1123 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
1124 smc_copy_sock_settings_to_smc(new_smc);
1125 sock_hold(&new_smc->sk); /* sock_put in passive closing */
1126 if (!schedule_work(&new_smc->smc_listen_work))
1127 sock_put(&new_smc->sk);
1132 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
1135 static int smc_listen(struct socket *sock, int backlog)
1137 struct sock *sk = sock->sk;
1138 struct smc_sock *smc;
1145 if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN))
1149 if (sk->sk_state == SMC_LISTEN) {
1150 sk->sk_max_ack_backlog = backlog;
1153 /* some socket options are handled in core, so we could not apply
1154 * them to the clc socket -- copy smc socket options to clc socket
1156 smc_copy_sock_settings_to_clc(smc);
1157 if (!smc->use_fallback)
1158 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1160 rc = kernel_listen(smc->clcsock, backlog);
1163 sk->sk_max_ack_backlog = backlog;
1164 sk->sk_ack_backlog = 0;
1165 sk->sk_state = SMC_LISTEN;
1166 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
1167 sock_hold(sk); /* sock_hold in tcp_listen_worker */
1168 if (!schedule_work(&smc->tcp_listen_work))
1176 static int smc_accept(struct socket *sock, struct socket *new_sock,
1177 int flags, bool kern)
1179 struct sock *sk = sock->sk, *nsk;
1180 DECLARE_WAITQUEUE(wait, current);
1181 struct smc_sock *lsmc;
1186 sock_hold(sk); /* sock_put below */
1189 if (lsmc->sk.sk_state != SMC_LISTEN) {
1195 /* Wait for an incoming connection */
1196 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1197 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1198 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
1199 set_current_state(TASK_INTERRUPTIBLE);
1205 timeo = schedule_timeout(timeo);
1206 /* wakeup by sk_data_ready in smc_listen_work() */
1207 sched_annotate_sleep();
1209 if (signal_pending(current)) {
1210 rc = sock_intr_errno(timeo);
1214 set_current_state(TASK_RUNNING);
1215 remove_wait_queue(sk_sleep(sk), &wait);
1218 rc = sock_error(nsk);
1223 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
1224 /* wait till data arrives on the socket */
1225 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
1227 if (smc_sk(nsk)->use_fallback) {
1228 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
1231 if (skb_queue_empty(&clcsk->sk_receive_queue))
1232 sk_wait_data(clcsk, &timeo, NULL);
1233 release_sock(clcsk);
1234 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
1236 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
1242 sock_put(sk); /* sock_hold above */
1246 static int smc_getname(struct socket *sock, struct sockaddr *addr,
1249 struct smc_sock *smc;
1251 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
1252 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
1255 smc = smc_sk(sock->sk);
1257 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
1260 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1262 struct sock *sk = sock->sk;
1263 struct smc_sock *smc;
1268 if ((sk->sk_state != SMC_ACTIVE) &&
1269 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1270 (sk->sk_state != SMC_INIT))
1273 if (msg->msg_flags & MSG_FASTOPEN) {
1274 if (sk->sk_state == SMC_INIT) {
1275 smc->use_fallback = true;
1282 if (smc->use_fallback)
1283 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
1285 rc = smc_tx_sendmsg(smc, msg, len);
1291 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1294 struct sock *sk = sock->sk;
1295 struct smc_sock *smc;
1300 if ((sk->sk_state == SMC_INIT) ||
1301 (sk->sk_state == SMC_LISTEN) ||
1302 (sk->sk_state == SMC_CLOSED))
1305 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1310 if (smc->use_fallback) {
1311 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
1313 msg->msg_namelen = 0;
1314 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
1322 static __poll_t smc_accept_poll(struct sock *parent)
1324 struct smc_sock *isk = smc_sk(parent);
1327 spin_lock(&isk->accept_q_lock);
1328 if (!list_empty(&isk->accept_q))
1329 mask = EPOLLIN | EPOLLRDNORM;
1330 spin_unlock(&isk->accept_q_lock);
1335 static __poll_t smc_poll(struct file *file, struct socket *sock,
1338 struct sock *sk = sock->sk;
1340 struct smc_sock *smc;
1345 smc = smc_sk(sock->sk);
1346 if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
1347 /* delegate to CLC child sock */
1348 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1349 sk->sk_err = smc->clcsock->sk->sk_err;
1353 if (sk->sk_state != SMC_CLOSED)
1354 sock_poll_wait(file, sk_sleep(sk), wait);
1357 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1358 (sk->sk_state == SMC_CLOSED))
1360 if (sk->sk_state == SMC_LISTEN) {
1361 /* woken up by sk_data_ready in smc_listen_work() */
1362 mask = smc_accept_poll(sk);
1364 if (atomic_read(&smc->conn.sndbuf_space) ||
1365 sk->sk_shutdown & SEND_SHUTDOWN) {
1366 mask |= EPOLLOUT | EPOLLWRNORM;
1368 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1369 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1371 if (atomic_read(&smc->conn.bytes_to_rcv))
1372 mask |= EPOLLIN | EPOLLRDNORM;
1373 if (sk->sk_shutdown & RCV_SHUTDOWN)
1374 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1375 if (sk->sk_state == SMC_APPCLOSEWAIT1)
1378 if (smc->conn.urg_state == SMC_URG_VALID)
1385 static int smc_shutdown(struct socket *sock, int how)
1387 struct sock *sk = sock->sk;
1388 struct smc_sock *smc;
1394 if ((how < SHUT_RD) || (how > SHUT_RDWR))
1400 if ((sk->sk_state != SMC_LISTEN) &&
1401 (sk->sk_state != SMC_ACTIVE) &&
1402 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
1403 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
1404 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1405 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
1406 (sk->sk_state != SMC_APPFINCLOSEWAIT))
1408 if (smc->use_fallback) {
1409 rc = kernel_sock_shutdown(smc->clcsock, how);
1410 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
1411 if (sk->sk_shutdown == SHUTDOWN_MASK)
1412 sk->sk_state = SMC_CLOSED;
1416 case SHUT_RDWR: /* shutdown in both directions */
1417 rc = smc_close_active(smc);
1420 rc = smc_close_shutdown_write(smc);
1424 /* nothing more to do because peer is not involved */
1428 rc1 = kernel_sock_shutdown(smc->clcsock, how);
1429 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1430 sk->sk_shutdown |= how + 1;
1434 return rc ? rc : rc1;
1437 static int smc_setsockopt(struct socket *sock, int level, int optname,
1438 char __user *optval, unsigned int optlen)
1440 struct sock *sk = sock->sk;
1441 struct smc_sock *smc;
1446 /* generic setsockopts reaching us here always apply to the
1449 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
1451 if (smc->clcsock->sk->sk_err) {
1452 sk->sk_err = smc->clcsock->sk->sk_err;
1453 sk->sk_error_report(sk);
1458 if (optlen < sizeof(int))
1460 if (get_user(val, (int __user *)optval))
1467 case TCP_FASTOPEN_CONNECT:
1468 case TCP_FASTOPEN_KEY:
1469 case TCP_FASTOPEN_NO_COOKIE:
1470 /* option not supported by SMC */
1471 if (sk->sk_state == SMC_INIT) {
1472 smc->use_fallback = true;
1474 if (!smc->use_fallback)
1479 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
1480 if (val && !smc->use_fallback)
1481 mod_delayed_work(system_wq, &smc->conn.tx_work,
1486 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
1487 if (!val && !smc->use_fallback)
1488 mod_delayed_work(system_wq, &smc->conn.tx_work,
1492 case TCP_DEFER_ACCEPT:
1493 smc->sockopt_defer_accept = val;
1503 static int smc_getsockopt(struct socket *sock, int level, int optname,
1504 char __user *optval, int __user *optlen)
1506 struct smc_sock *smc;
1508 smc = smc_sk(sock->sk);
1509 /* socket options apply to the CLC socket */
1510 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
1514 static int smc_ioctl(struct socket *sock, unsigned int cmd,
1517 union smc_host_cursor cons, urg;
1518 struct smc_connection *conn;
1519 struct smc_sock *smc;
1522 smc = smc_sk(sock->sk);
1524 if (smc->use_fallback) {
1527 return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1529 lock_sock(&smc->sk);
1531 case SIOCINQ: /* same as FIONREAD */
1532 if (smc->sk.sk_state == SMC_LISTEN) {
1533 release_sock(&smc->sk);
1536 if (smc->sk.sk_state == SMC_INIT ||
1537 smc->sk.sk_state == SMC_CLOSED)
1540 answ = atomic_read(&smc->conn.bytes_to_rcv);
1543 /* output queue size (not send + not acked) */
1544 if (smc->sk.sk_state == SMC_LISTEN) {
1545 release_sock(&smc->sk);
1548 if (smc->sk.sk_state == SMC_INIT ||
1549 smc->sk.sk_state == SMC_CLOSED)
1552 answ = smc->conn.sndbuf_desc->len -
1553 atomic_read(&smc->conn.sndbuf_space);
1556 /* output queue size (not send only) */
1557 if (smc->sk.sk_state == SMC_LISTEN) {
1558 release_sock(&smc->sk);
1561 if (smc->sk.sk_state == SMC_INIT ||
1562 smc->sk.sk_state == SMC_CLOSED)
1565 answ = smc_tx_prepared_sends(&smc->conn);
1568 if (smc->sk.sk_state == SMC_LISTEN) {
1569 release_sock(&smc->sk);
1572 if (smc->sk.sk_state == SMC_INIT ||
1573 smc->sk.sk_state == SMC_CLOSED) {
1576 smc_curs_write(&cons,
1577 smc_curs_read(&conn->local_tx_ctrl.cons, conn),
1579 smc_curs_write(&urg,
1580 smc_curs_read(&conn->urg_curs, conn),
1582 answ = smc_curs_diff(conn->rmb_desc->len,
1587 release_sock(&smc->sk);
1588 return -ENOIOCTLCMD;
1590 release_sock(&smc->sk);
1592 return put_user(answ, (int __user *)arg);
1595 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1596 int offset, size_t size, int flags)
1598 struct sock *sk = sock->sk;
1599 struct smc_sock *smc;
1604 if (sk->sk_state != SMC_ACTIVE) {
1609 if (smc->use_fallback)
1610 rc = kernel_sendpage(smc->clcsock, page, offset,
1613 rc = sock_no_sendpage(sock, page, offset, size, flags);
1619 /* Map the affected portions of the rmbe into an spd, note the number of bytes
1620 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
1621 * updates till whenever a respective page has been fully processed.
1622 * Note that subsequent recv() calls have to wait till all splice() processing
1625 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1626 struct pipe_inode_info *pipe, size_t len,
1629 struct sock *sk = sock->sk;
1630 struct smc_sock *smc;
1636 if (sk->sk_state == SMC_INIT ||
1637 sk->sk_state == SMC_LISTEN ||
1638 sk->sk_state == SMC_CLOSED)
1641 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1646 if (smc->use_fallback) {
1647 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
1654 if (flags & SPLICE_F_NONBLOCK)
1655 flags = MSG_DONTWAIT;
1658 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
1666 /* must look like tcp */
1667 static const struct proto_ops smc_sock_ops = {
1669 .owner = THIS_MODULE,
1670 .release = smc_release,
1672 .connect = smc_connect,
1673 .socketpair = sock_no_socketpair,
1674 .accept = smc_accept,
1675 .getname = smc_getname,
1678 .listen = smc_listen,
1679 .shutdown = smc_shutdown,
1680 .setsockopt = smc_setsockopt,
1681 .getsockopt = smc_getsockopt,
1682 .sendmsg = smc_sendmsg,
1683 .recvmsg = smc_recvmsg,
1684 .mmap = sock_no_mmap,
1685 .sendpage = smc_sendpage,
1686 .splice_read = smc_splice_read,
1689 static int smc_create(struct net *net, struct socket *sock, int protocol,
1692 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
1693 struct smc_sock *smc;
1697 rc = -ESOCKTNOSUPPORT;
1698 if (sock->type != SOCK_STREAM)
1701 rc = -EPROTONOSUPPORT;
1702 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
1706 sock->ops = &smc_sock_ops;
1707 sk = smc_sock_alloc(net, sock, protocol);
1711 /* create internal TCP socket for CLC handshake and fallback */
1713 smc->use_fallback = false; /* assume rdma capability first */
1714 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
1717 sk_common_release(sk);
1720 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
1721 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
1727 static const struct net_proto_family smc_sock_family_ops = {
1729 .owner = THIS_MODULE,
1730 .create = smc_create,
1733 static int __init smc_init(void)
1737 rc = smc_pnet_init();
1741 rc = smc_llc_init();
1743 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
1747 rc = smc_cdc_init();
1749 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
1753 rc = proto_register(&smc_proto, 1);
1755 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
1759 rc = proto_register(&smc_proto6, 1);
1761 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
1765 rc = sock_register(&smc_sock_family_ops);
1767 pr_err("%s: sock_register fails with %d\n", __func__, rc);
1770 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
1771 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
1773 rc = smc_ib_register_client();
1775 pr_err("%s: ib_register fails with %d\n", __func__, rc);
1779 static_branch_enable(&tcp_have_smc);
1783 sock_unregister(PF_SMC);
1785 proto_unregister(&smc_proto6);
1787 proto_unregister(&smc_proto);
1793 static void __exit smc_exit(void)
1796 static_branch_disable(&tcp_have_smc);
1797 smc_ib_unregister_client();
1798 sock_unregister(PF_SMC);
1799 proto_unregister(&smc_proto6);
1800 proto_unregister(&smc_proto);
1804 module_init(smc_init);
1805 module_exit(smc_exit);
1807 MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
1808 MODULE_DESCRIPTION("smc socket address family");
1809 MODULE_LICENSE("GPL");
1810 MODULE_ALIAS_NETPROTO(PF_SMC);