1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Basic Transport Functions exploiting Infiniband API
7 * Copyright IBM Corp. 2016
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/reboot.h>
20 #include <rdma/ib_verbs.h>
21 #include <rdma/ib_cache.h>
30 #include "smc_close.h"
33 #define SMC_LGR_NUM_INCR 256
34 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
35 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
36 #define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
38 static struct smc_lgr_list smc_lgr_list = { /* established link groups */
39 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
40 .list = LIST_HEAD_INIT(smc_lgr_list.list),
44 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
45 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
47 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
48 struct smc_buf_desc *buf_desc);
49 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
51 /* return head of link group list and its lock for a given link group */
52 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
53 spinlock_t **lgr_lock)
56 *lgr_lock = &lgr->smcd->lgr_lock;
57 return &lgr->smcd->lgr_list;
60 *lgr_lock = &smc_lgr_list.lock;
61 return &smc_lgr_list.list;
64 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
66 /* client link group creation always follows the server link group
67 * creation. For client use a somewhat higher removal delay time,
68 * otherwise there is a risk of out-of-sync link groups.
70 if (!lgr->freeing && !lgr->freefast) {
71 mod_delayed_work(system_wq, &lgr->free_work,
72 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
73 SMC_LGR_FREE_DELAY_CLNT :
74 SMC_LGR_FREE_DELAY_SERV);
78 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
80 if (!lgr->freeing && !lgr->freefast) {
82 mod_delayed_work(system_wq, &lgr->free_work,
83 SMC_LGR_FREE_DELAY_FAST);
87 /* Register connection's alert token in our lookup structure.
88 * To use rbtrees we have to implement our own insert core.
89 * Requires @conns_lock
90 * @smc connection to register
91 * Returns 0 on success, != otherwise.
93 static void smc_lgr_add_alert_token(struct smc_connection *conn)
95 struct rb_node **link, *parent = NULL;
96 u32 token = conn->alert_token_local;
98 link = &conn->lgr->conns_all.rb_node;
100 struct smc_connection *cur = rb_entry(*link,
101 struct smc_connection, alert_node);
104 if (cur->alert_token_local > token)
105 link = &parent->rb_left;
107 link = &parent->rb_right;
109 /* Put the new node there */
110 rb_link_node(&conn->alert_node, parent, link);
111 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
114 /* Register connection in link group by assigning an alert token
115 * registered in a search tree.
116 * Requires @conns_lock
117 * Note that '0' is a reserved value and not assigned.
119 static int smc_lgr_register_conn(struct smc_connection *conn)
121 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
122 static atomic_t nexttoken = ATOMIC_INIT(0);
124 /* find a new alert_token_local value not yet used by some connection
127 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
128 while (!conn->alert_token_local) {
129 conn->alert_token_local = atomic_inc_return(&nexttoken);
130 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
131 conn->alert_token_local = 0;
133 smc_lgr_add_alert_token(conn);
135 /* assign the new connection to a link */
136 if (!conn->lgr->is_smcd) {
137 struct smc_link *lnk;
140 /* tbd - link balancing */
141 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
142 lnk = &conn->lgr->lnk[i];
143 if (lnk->state == SMC_LNK_ACTIVATING ||
144 lnk->state == SMC_LNK_ACTIVE)
148 return SMC_CLC_DECL_NOACTLINK;
150 conn->lgr->conns_num++;
154 /* Unregister connection and reset the alert token of the given connection<
156 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
158 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
159 struct smc_link_group *lgr = conn->lgr;
161 rb_erase(&conn->alert_node, &lgr->conns_all);
163 conn->alert_token_local = 0;
164 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
167 /* Unregister connection from lgr
169 static void smc_lgr_unregister_conn(struct smc_connection *conn)
171 struct smc_link_group *lgr = conn->lgr;
175 write_lock_bh(&lgr->conns_lock);
176 if (conn->alert_token_local) {
177 __smc_lgr_unregister_conn(conn);
179 write_unlock_bh(&lgr->conns_lock);
183 void smc_lgr_cleanup_early(struct smc_connection *conn)
185 struct smc_link_group *lgr = conn->lgr;
192 smc_lgr_schedule_free_work_fast(lgr);
195 /* Send delete link, either as client to request the initiation
196 * of the DELETE LINK sequence from server; or as server to
197 * initiate the delete processing. See smc_llc_rx_delete_link().
199 static int smcr_link_send_delete(struct smc_link *lnk, bool orderly)
201 if (lnk->state == SMC_LNK_ACTIVE &&
202 !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, orderly)) {
203 smc_llc_link_deleting(lnk);
209 static void smc_lgr_free(struct smc_link_group *lgr);
211 static void smc_lgr_free_work(struct work_struct *work)
213 struct smc_link_group *lgr = container_of(to_delayed_work(work),
214 struct smc_link_group,
216 spinlock_t *lgr_lock;
220 smc_lgr_list_head(lgr, &lgr_lock);
221 spin_lock_bh(lgr_lock);
223 spin_unlock_bh(lgr_lock);
226 read_lock_bh(&lgr->conns_lock);
227 conns = RB_EMPTY_ROOT(&lgr->conns_all);
228 read_unlock_bh(&lgr->conns_lock);
229 if (!conns) { /* number of lgr connections is no longer zero */
230 spin_unlock_bh(lgr_lock);
233 list_del_init(&lgr->list); /* remove from smc_lgr_list */
235 if (!lgr->is_smcd && !lgr->terminating) {
236 bool do_wait = false;
238 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
239 struct smc_link *lnk = &lgr->lnk[i];
240 /* try to send del link msg, on err free immediately */
241 if (lnk->state == SMC_LNK_ACTIVE &&
242 !smcr_link_send_delete(lnk, true)) {
243 /* reschedule in case we never receive a resp */
244 smc_lgr_schedule_free_work(lgr);
249 spin_unlock_bh(lgr_lock);
250 return; /* wait for resp, see smc_llc_rx_delete_link */
253 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
254 spin_unlock_bh(lgr_lock);
255 cancel_delayed_work(&lgr->free_work);
257 if (lgr->is_smcd && !lgr->terminating)
258 smc_ism_signal_shutdown(lgr);
260 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
261 struct smc_link *lnk = &lgr->lnk[i];
263 if (lnk->state != SMC_LNK_INACTIVE)
264 smc_llc_link_inactive(lnk);
270 static void smc_lgr_terminate_work(struct work_struct *work)
272 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
275 __smc_lgr_terminate(lgr, true);
278 /* return next unique link id for the lgr */
279 static u8 smcr_next_link_id(struct smc_link_group *lgr)
285 link_id = ++lgr->next_link_id;
286 if (!link_id) /* skip zero as link_id */
287 link_id = ++lgr->next_link_id;
288 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
289 if (lgr->lnk[i].state != SMC_LNK_INACTIVE &&
290 lgr->lnk[i].link_id == link_id)
298 static int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
299 u8 link_idx, struct smc_init_info *ini)
304 get_device(&ini->ib_dev->ibdev->dev);
305 atomic_inc(&ini->ib_dev->lnk_cnt);
306 lnk->state = SMC_LNK_ACTIVATING;
307 lnk->link_id = smcr_next_link_id(lgr);
309 lnk->link_idx = link_idx;
310 lnk->smcibdev = ini->ib_dev;
311 lnk->ibport = ini->ib_port;
312 lnk->path_mtu = ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
313 if (!ini->ib_dev->initialized) {
314 rc = (int)smc_ib_setup_per_ibdev(ini->ib_dev);
318 get_random_bytes(rndvec, sizeof(rndvec));
319 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
321 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
322 ini->vlan_id, lnk->gid, &lnk->sgid_index);
325 rc = smc_llc_link_init(lnk);
328 rc = smc_wr_alloc_link_mem(lnk);
331 rc = smc_ib_create_protection_domain(lnk);
334 rc = smc_ib_create_queue_pair(lnk);
337 rc = smc_wr_create_link(lnk);
343 smc_ib_destroy_queue_pair(lnk);
345 smc_ib_dealloc_protection_domain(lnk);
347 smc_wr_free_link_mem(lnk);
349 smc_llc_link_clear(lnk);
351 put_device(&ini->ib_dev->ibdev->dev);
352 memset(lnk, 0, sizeof(struct smc_link));
353 if (!atomic_dec_return(&ini->ib_dev->lnk_cnt))
354 wake_up(&ini->ib_dev->lnks_deleted);
358 /* create a new SMC link group */
359 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
361 struct smc_link_group *lgr;
362 struct list_head *lgr_list;
363 struct smc_link *lnk;
364 spinlock_t *lgr_lock;
369 if (ini->is_smcd && ini->vlan_id) {
370 if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
371 rc = SMC_CLC_DECL_ISMVLANERR;
376 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
378 rc = SMC_CLC_DECL_MEM;
381 lgr->is_smcd = ini->is_smcd;
383 lgr->terminating = 0;
386 lgr->vlan_id = ini->vlan_id;
387 rwlock_init(&lgr->sndbufs_lock);
388 rwlock_init(&lgr->rmbs_lock);
389 rwlock_init(&lgr->conns_lock);
390 for (i = 0; i < SMC_RMBE_SIZES; i++) {
391 INIT_LIST_HEAD(&lgr->sndbufs[i]);
392 INIT_LIST_HEAD(&lgr->rmbs[i]);
394 lgr->next_link_id = 0;
395 smc_lgr_list.num += SMC_LGR_NUM_INCR;
396 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
397 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
398 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
399 lgr->conns_all = RB_ROOT;
401 /* SMC-D specific settings */
402 get_device(&ini->ism_dev->dev);
403 lgr->peer_gid = ini->ism_gid;
404 lgr->smcd = ini->ism_dev;
405 lgr_list = &ini->ism_dev->lgr_list;
406 lgr_lock = &lgr->smcd->lgr_lock;
407 lgr->peer_shutdown = 0;
408 atomic_inc(&ini->ism_dev->lgr_cnt);
410 /* SMC-R specific settings */
411 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
412 memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
415 link_idx = SMC_SINGLE_LINK;
416 lnk = &lgr->lnk[link_idx];
417 rc = smcr_link_init(lgr, lnk, link_idx, ini);
420 lgr_list = &smc_lgr_list.list;
421 lgr_lock = &smc_lgr_list.lock;
422 atomic_inc(&lgr_cnt);
425 spin_lock_bh(lgr_lock);
426 list_add(&lgr->list, lgr_list);
427 spin_unlock_bh(lgr_lock);
433 if (ini->is_smcd && ini->vlan_id)
434 smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
438 rc = SMC_CLC_DECL_MEM;
440 rc = SMC_CLC_DECL_INTERR;
445 static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
446 struct smc_link *lnk)
448 struct smc_link_group *lgr = lnk->lgr;
450 if (rmb_desc->is_conf_rkey && !list_empty(&lgr->list)) {
451 /* unregister rmb with peer */
452 smc_llc_do_delete_rkey(lnk, rmb_desc);
453 rmb_desc->is_conf_rkey = false;
455 if (rmb_desc->is_reg_err) {
456 /* buf registration failed, reuse not possible */
457 write_lock_bh(&lgr->rmbs_lock);
458 list_del(&rmb_desc->list);
459 write_unlock_bh(&lgr->rmbs_lock);
461 smc_buf_free(lgr, true, rmb_desc);
467 static void smc_buf_unuse(struct smc_connection *conn,
468 struct smc_link_group *lgr)
470 if (conn->sndbuf_desc)
471 conn->sndbuf_desc->used = 0;
472 if (conn->rmb_desc && lgr->is_smcd)
473 conn->rmb_desc->used = 0;
474 else if (conn->rmb_desc)
475 smcr_buf_unuse(conn->rmb_desc, conn->lnk);
478 /* remove a finished connection from its link group */
479 void smc_conn_free(struct smc_connection *conn)
481 struct smc_link_group *lgr = conn->lgr;
486 if (!list_empty(&lgr->list))
487 smc_ism_unset_conn(conn);
488 tasklet_kill(&conn->rx_tsklet);
490 smc_cdc_tx_dismiss_slots(conn);
492 if (!list_empty(&lgr->list)) {
493 smc_lgr_unregister_conn(conn);
494 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
498 smc_lgr_schedule_free_work(lgr);
501 static void smcr_link_clear(struct smc_link *lnk)
503 if (lnk->peer_qpn == 0)
506 smc_llc_link_clear(lnk);
507 smc_ib_modify_qp_reset(lnk);
508 smc_wr_free_link(lnk);
509 smc_ib_destroy_queue_pair(lnk);
510 smc_ib_dealloc_protection_domain(lnk);
511 smc_wr_free_link_mem(lnk);
512 put_device(&lnk->smcibdev->ibdev->dev);
513 if (!atomic_dec_return(&lnk->smcibdev->lnk_cnt))
514 wake_up(&lnk->smcibdev->lnks_deleted);
517 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
518 struct smc_buf_desc *buf_desc)
520 struct smc_link *lnk;
523 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
525 if (!buf_desc->is_map_ib[lnk->link_idx])
528 if (buf_desc->mr_rx[lnk->link_idx])
529 smc_ib_put_memory_region(
530 buf_desc->mr_rx[lnk->link_idx]);
531 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_FROM_DEVICE);
533 smc_ib_buf_unmap_sg(lnk, buf_desc, DMA_TO_DEVICE);
535 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
539 __free_pages(buf_desc->pages, buf_desc->order);
543 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
544 struct smc_buf_desc *buf_desc)
547 /* restore original buf len */
548 buf_desc->len += sizeof(struct smcd_cdc_msg);
549 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
551 kfree(buf_desc->cpu_addr);
556 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
557 struct smc_buf_desc *buf_desc)
560 smcd_buf_free(lgr, is_rmb, buf_desc);
562 smcr_buf_free(lgr, is_rmb, buf_desc);
565 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
567 struct smc_buf_desc *buf_desc, *bf_desc;
568 struct list_head *buf_list;
571 for (i = 0; i < SMC_RMBE_SIZES; i++) {
573 buf_list = &lgr->rmbs[i];
575 buf_list = &lgr->sndbufs[i];
576 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
578 list_del(&buf_desc->list);
579 smc_buf_free(lgr, is_rmb, buf_desc);
584 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
586 /* free send buffers */
587 __smc_lgr_free_bufs(lgr, false);
589 __smc_lgr_free_bufs(lgr, true);
592 /* remove a link group */
593 static void smc_lgr_free(struct smc_link_group *lgr)
597 smc_lgr_free_bufs(lgr);
599 if (!lgr->terminating) {
600 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
601 put_device(&lgr->smcd->dev);
603 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
604 wake_up(&lgr->smcd->lgrs_deleted);
606 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
607 if (lgr->lnk[i].state == SMC_LNK_INACTIVE)
609 smcr_link_clear(&lgr->lnk[i]);
611 if (!atomic_dec_return(&lgr_cnt))
612 wake_up(&lgrs_deleted);
617 void smc_lgr_forget(struct smc_link_group *lgr)
619 struct list_head *lgr_list;
620 spinlock_t *lgr_lock;
622 lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
623 spin_lock_bh(lgr_lock);
624 /* do not use this link group for new connections */
625 if (!list_empty(lgr_list))
626 list_del_init(lgr_list);
627 spin_unlock_bh(lgr_lock);
630 static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
634 for (i = 0; i < SMC_RMBE_SIZES; i++) {
635 struct smc_buf_desc *buf_desc;
637 list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
638 buf_desc->len += sizeof(struct smcd_cdc_msg);
639 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
644 static void smc_sk_wake_ups(struct smc_sock *smc)
646 smc->sk.sk_write_space(&smc->sk);
647 smc->sk.sk_data_ready(&smc->sk);
648 smc->sk.sk_state_change(&smc->sk);
651 /* kill a connection */
652 static void smc_conn_kill(struct smc_connection *conn, bool soft)
654 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
656 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
657 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
659 smc_close_abort(conn);
661 smc->sk.sk_err = ECONNABORTED;
662 smc_sk_wake_ups(smc);
663 if (conn->lgr->is_smcd) {
664 smc_ism_unset_conn(conn);
666 tasklet_kill(&conn->rx_tsklet);
668 tasklet_unlock_wait(&conn->rx_tsklet);
670 smc_cdc_tx_dismiss_slots(conn);
672 smc_lgr_unregister_conn(conn);
673 smc_close_active_abort(smc);
676 static void smc_lgr_cleanup(struct smc_link_group *lgr)
681 smc_ism_signal_shutdown(lgr);
682 smcd_unregister_all_dmbs(lgr);
683 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
684 put_device(&lgr->smcd->dev);
686 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
687 struct smc_link *lnk = &lgr->lnk[i];
689 if (lnk->state != SMC_LNK_INACTIVE)
690 smc_llc_link_inactive(lnk);
695 /* terminate link group
696 * @soft: true if link group shutdown can take its time
697 * false if immediate link group shutdown is required
699 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
701 struct smc_connection *conn;
702 struct smc_sock *smc;
703 struct rb_node *node;
706 if (lgr->terminating)
707 return; /* lgr already terminating */
709 cancel_delayed_work_sync(&lgr->free_work);
710 lgr->terminating = 1;
712 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++)
713 smc_llc_link_inactive(&lgr->lnk[i]);
715 /* kill remaining link group connections */
716 read_lock_bh(&lgr->conns_lock);
717 node = rb_first(&lgr->conns_all);
719 read_unlock_bh(&lgr->conns_lock);
720 conn = rb_entry(node, struct smc_connection, alert_node);
721 smc = container_of(conn, struct smc_sock, conn);
722 sock_hold(&smc->sk); /* sock_put below */
724 smc_conn_kill(conn, soft);
725 release_sock(&smc->sk);
726 sock_put(&smc->sk); /* sock_hold above */
727 read_lock_bh(&lgr->conns_lock);
728 node = rb_first(&lgr->conns_all);
730 read_unlock_bh(&lgr->conns_lock);
731 smc_lgr_cleanup(lgr);
733 smc_lgr_schedule_free_work_fast(lgr);
738 /* unlink link group and schedule termination */
739 void smc_lgr_terminate_sched(struct smc_link_group *lgr)
741 spinlock_t *lgr_lock;
743 smc_lgr_list_head(lgr, &lgr_lock);
744 spin_lock_bh(lgr_lock);
745 if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
746 spin_unlock_bh(lgr_lock);
747 return; /* lgr already terminating */
749 list_del_init(&lgr->list);
750 spin_unlock_bh(lgr_lock);
751 schedule_work(&lgr->terminate_work);
754 /* Called when IB port is terminated */
755 void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
757 struct smc_link_group *lgr, *l;
758 LIST_HEAD(lgr_free_list);
761 spin_lock_bh(&smc_lgr_list.lock);
762 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
765 /* tbd - terminate only when no more links are active */
766 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
767 if (lgr->lnk[i].state == SMC_LNK_INACTIVE ||
768 lgr->lnk[i].state == SMC_LNK_DELETING)
770 if (lgr->lnk[i].smcibdev == smcibdev &&
771 lgr->lnk[i].ibport == ibport) {
772 list_move(&lgr->list, &lgr_free_list);
777 spin_unlock_bh(&smc_lgr_list.lock);
779 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
780 list_del_init(&lgr->list);
781 __smc_lgr_terminate(lgr, false);
785 /* Called when peer lgr shutdown (regularly or abnormally) is received */
786 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
788 struct smc_link_group *lgr, *l;
789 LIST_HEAD(lgr_free_list);
791 /* run common cleanup function and build free list */
792 spin_lock_bh(&dev->lgr_lock);
793 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
794 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
795 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
796 if (peer_gid) /* peer triggered termination */
797 lgr->peer_shutdown = 1;
798 list_move(&lgr->list, &lgr_free_list);
801 spin_unlock_bh(&dev->lgr_lock);
803 /* cancel the regular free workers and actually free lgrs */
804 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
805 list_del_init(&lgr->list);
806 schedule_work(&lgr->terminate_work);
810 /* Called when an SMCD device is removed or the smc module is unloaded */
811 void smc_smcd_terminate_all(struct smcd_dev *smcd)
813 struct smc_link_group *lgr, *lg;
814 LIST_HEAD(lgr_free_list);
816 spin_lock_bh(&smcd->lgr_lock);
817 list_splice_init(&smcd->lgr_list, &lgr_free_list);
818 list_for_each_entry(lgr, &lgr_free_list, list)
820 spin_unlock_bh(&smcd->lgr_lock);
822 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
823 list_del_init(&lgr->list);
824 __smc_lgr_terminate(lgr, false);
827 if (atomic_read(&smcd->lgr_cnt))
828 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
831 /* Called when an SMCR device is removed or the smc module is unloaded.
832 * If smcibdev is given, all SMCR link groups using this device are terminated.
833 * If smcibdev is NULL, all SMCR link groups are terminated.
835 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
837 struct smc_link_group *lgr, *lg;
838 LIST_HEAD(lgr_free_list);
841 spin_lock_bh(&smc_lgr_list.lock);
843 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
844 list_for_each_entry(lgr, &lgr_free_list, list)
847 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
848 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
849 if (lgr->lnk[i].smcibdev == smcibdev) {
850 list_move(&lgr->list, &lgr_free_list);
857 spin_unlock_bh(&smc_lgr_list.lock);
859 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
860 list_del_init(&lgr->list);
861 __smc_lgr_terminate(lgr, false);
865 if (atomic_read(&smcibdev->lnk_cnt))
866 wait_event(smcibdev->lnks_deleted,
867 !atomic_read(&smcibdev->lnk_cnt));
869 if (atomic_read(&lgr_cnt))
870 wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
874 /* Determine vlan of internal TCP socket.
875 * @vlan_id: address to store the determined vlan id into
877 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
879 struct dst_entry *dst = sk_dst_get(clcsock->sk);
880 struct net_device *ndev;
881 int i, nest_lvl, rc = 0;
894 if (is_vlan_dev(ndev)) {
895 ini->vlan_id = vlan_dev_vlan_id(ndev);
900 nest_lvl = ndev->lower_level;
901 for (i = 0; i < nest_lvl; i++) {
902 struct list_head *lower = &ndev->adj_list.lower;
904 if (list_empty(lower))
907 ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
908 if (is_vlan_dev(ndev)) {
909 ini->vlan_id = vlan_dev_vlan_id(ndev);
921 static bool smcr_lgr_match(struct smc_link_group *lgr,
922 struct smc_clc_msg_local *lcl,
923 enum smc_lgr_role role, u32 clcqpn)
927 if (memcmp(lgr->peer_systemid, lcl->id_for_peer, SMC_SYSTEMID_LEN) ||
931 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
932 if (lgr->lnk[i].state != SMC_LNK_ACTIVE)
934 if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
935 !memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
936 !memcmp(lgr->lnk[i].peer_mac, lcl->mac, sizeof(lcl->mac)))
942 static bool smcd_lgr_match(struct smc_link_group *lgr,
943 struct smcd_dev *smcismdev, u64 peer_gid)
945 return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
948 /* create a new SMC connection (and a new link group if necessary) */
949 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
951 struct smc_connection *conn = &smc->conn;
952 struct list_head *lgr_list;
953 struct smc_link_group *lgr;
954 enum smc_lgr_role role;
955 spinlock_t *lgr_lock;
958 lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
959 lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
960 ini->cln_first_contact = SMC_FIRST_CONTACT;
961 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
962 if (role == SMC_CLNT && ini->srv_first_contact)
963 /* create new link group as well */
966 /* determine if an existing link group can be reused */
967 spin_lock_bh(lgr_lock);
968 list_for_each_entry(lgr, lgr_list, list) {
969 write_lock_bh(&lgr->conns_lock);
971 smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
972 smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
974 lgr->vlan_id == ini->vlan_id &&
976 lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
977 /* link group found */
978 ini->cln_first_contact = SMC_REUSE_CONTACT;
980 rc = smc_lgr_register_conn(conn); /* add conn to lgr */
981 write_unlock_bh(&lgr->conns_lock);
982 if (!rc && delayed_work_pending(&lgr->free_work))
983 cancel_delayed_work(&lgr->free_work);
986 write_unlock_bh(&lgr->conns_lock);
988 spin_unlock_bh(lgr_lock);
992 if (role == SMC_CLNT && !ini->srv_first_contact &&
993 ini->cln_first_contact == SMC_FIRST_CONTACT) {
994 /* Server reuses a link group, but Client wants to start
996 * send out_of_sync decline, reason synchr. error
998 return SMC_CLC_DECL_SYNCERR;
1002 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
1003 rc = smc_lgr_create(smc, ini);
1007 write_lock_bh(&lgr->conns_lock);
1008 rc = smc_lgr_register_conn(conn); /* add smc conn to lgr */
1009 write_unlock_bh(&lgr->conns_lock);
1013 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
1014 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
1015 conn->urg_state = SMC_URG_READ;
1017 conn->rx_off = sizeof(struct smcd_cdc_msg);
1018 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
1020 #ifndef KERNEL_HAS_ATOMIC64
1021 spin_lock_init(&conn->acurs_lock);
1028 /* convert the RMB size into the compressed notation - minimum 16K.
1029 * In contrast to plain ilog2, this rounds towards the next power of 2,
1030 * so the socket application gets at least its desired sndbuf / rcvbuf size.
1032 static u8 smc_compress_bufsize(int size)
1036 if (size <= SMC_BUF_MIN_SIZE)
1039 size = (size - 1) >> 14;
1040 compressed = ilog2(size) + 1;
1041 if (compressed >= SMC_RMBE_SIZES)
1042 compressed = SMC_RMBE_SIZES - 1;
1046 /* convert the RMB size from compressed notation into integer */
1047 int smc_uncompress_bufsize(u8 compressed)
1051 size = 0x00000001 << (((int)compressed) + 14);
1055 /* try to reuse a sndbuf or rmb description slot for a certain
1056 * buffer size; if not available, return NULL
1058 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
1060 struct list_head *buf_list)
1062 struct smc_buf_desc *buf_slot;
1065 list_for_each_entry(buf_slot, buf_list, list) {
1066 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
1067 read_unlock_bh(lock);
1071 read_unlock_bh(lock);
1075 /* one of the conditions for announcing a receiver's current window size is
1076 * that it "results in a minimum increase in the window size of 10% of the
1077 * receive buffer space" [RFC7609]
1079 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
1081 return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
1084 /* map an rmb buf to a link */
1085 static int smcr_buf_map_link(struct smc_buf_desc *buf_desc, bool is_rmb,
1086 struct smc_link *lnk)
1090 if (buf_desc->is_map_ib[lnk->link_idx])
1093 rc = sg_alloc_table(&buf_desc->sgt[lnk->link_idx], 1, GFP_KERNEL);
1096 sg_set_buf(buf_desc->sgt[lnk->link_idx].sgl,
1097 buf_desc->cpu_addr, buf_desc->len);
1099 /* map sg table to DMA address */
1100 rc = smc_ib_buf_map_sg(lnk, buf_desc,
1101 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1102 /* SMC protocol depends on mapping to one DMA address only */
1108 /* create a new memory region for the RMB */
1110 rc = smc_ib_get_memory_region(lnk->roce_pd,
1111 IB_ACCESS_REMOTE_WRITE |
1112 IB_ACCESS_LOCAL_WRITE,
1113 buf_desc, lnk->link_idx);
1116 smc_ib_sync_sg_for_device(lnk, buf_desc, DMA_FROM_DEVICE);
1118 buf_desc->is_map_ib[lnk->link_idx] = true;
1122 smc_ib_buf_unmap_sg(lnk, buf_desc,
1123 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1125 sg_free_table(&buf_desc->sgt[lnk->link_idx]);
1129 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
1130 bool is_rmb, int bufsize)
1132 struct smc_buf_desc *buf_desc;
1134 /* try to alloc a new buffer */
1135 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1137 return ERR_PTR(-ENOMEM);
1139 buf_desc->order = get_order(bufsize);
1140 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
1141 __GFP_NOMEMALLOC | __GFP_COMP |
1142 __GFP_NORETRY | __GFP_ZERO,
1144 if (!buf_desc->pages) {
1146 return ERR_PTR(-EAGAIN);
1148 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
1149 buf_desc->len = bufsize;
1153 /* map buf_desc on all usable links,
1154 * unused buffers stay mapped as long as the link is up
1156 static int smcr_buf_map_usable_links(struct smc_link_group *lgr,
1157 struct smc_buf_desc *buf_desc, bool is_rmb)
1161 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1162 struct smc_link *lnk = &lgr->lnk[i];
1164 if (lnk->state != SMC_LNK_ACTIVE &&
1165 lnk->state != SMC_LNK_ACTIVATING)
1167 if (smcr_buf_map_link(buf_desc, is_rmb, lnk)) {
1168 smcr_buf_unuse(buf_desc, lnk);
1177 #define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1179 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
1180 bool is_dmb, int bufsize)
1182 struct smc_buf_desc *buf_desc;
1185 if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
1186 return ERR_PTR(-EAGAIN);
1188 /* try to alloc a new DMB */
1189 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1191 return ERR_PTR(-ENOMEM);
1193 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
1196 return ERR_PTR(-EAGAIN);
1198 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
1199 /* CDC header stored in buf. So, pretend it was smaller */
1200 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
1202 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
1203 __GFP_NOWARN | __GFP_NORETRY |
1205 if (!buf_desc->cpu_addr) {
1207 return ERR_PTR(-EAGAIN);
1209 buf_desc->len = bufsize;
1214 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
1216 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
1217 struct smc_connection *conn = &smc->conn;
1218 struct smc_link_group *lgr = conn->lgr;
1219 struct list_head *buf_list;
1220 int bufsize, bufsize_short;
1225 /* use socket recv buffer size (w/o overhead) as start value */
1226 sk_buf_size = smc->sk.sk_rcvbuf / 2;
1228 /* use socket send buffer size (w/o overhead) as start value */
1229 sk_buf_size = smc->sk.sk_sndbuf / 2;
1231 for (bufsize_short = smc_compress_bufsize(sk_buf_size);
1232 bufsize_short >= 0; bufsize_short--) {
1235 lock = &lgr->rmbs_lock;
1236 buf_list = &lgr->rmbs[bufsize_short];
1238 lock = &lgr->sndbufs_lock;
1239 buf_list = &lgr->sndbufs[bufsize_short];
1241 bufsize = smc_uncompress_bufsize(bufsize_short);
1242 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
1245 /* check for reusable slot in the link group */
1246 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
1248 memset(buf_desc->cpu_addr, 0, bufsize);
1249 break; /* found reusable slot */
1253 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
1255 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
1257 if (PTR_ERR(buf_desc) == -ENOMEM)
1259 if (IS_ERR(buf_desc))
1263 write_lock_bh(lock);
1264 list_add(&buf_desc->list, buf_list);
1265 write_unlock_bh(lock);
1269 if (IS_ERR(buf_desc))
1273 if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) {
1279 conn->rmb_desc = buf_desc;
1280 conn->rmbe_size_short = bufsize_short;
1281 smc->sk.sk_rcvbuf = bufsize * 2;
1282 atomic_set(&conn->bytes_to_rcv, 0);
1283 conn->rmbe_update_limit =
1284 smc_rmb_wnd_update_limit(buf_desc->len);
1286 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
1288 conn->sndbuf_desc = buf_desc;
1289 smc->sk.sk_sndbuf = bufsize * 2;
1290 atomic_set(&conn->sndbuf_space, bufsize);
1295 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
1297 if (!conn->lgr || conn->lgr->is_smcd)
1299 smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
1302 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
1304 if (!conn->lgr || conn->lgr->is_smcd)
1306 smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
1309 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
1313 if (!conn->lgr || conn->lgr->is_smcd)
1315 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1316 if (conn->lgr->lnk[i].state != SMC_LNK_ACTIVE &&
1317 conn->lgr->lnk[i].state != SMC_LNK_ACTIVATING)
1319 smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
1324 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
1328 if (!conn->lgr || conn->lgr->is_smcd)
1330 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
1331 if (conn->lgr->lnk[i].state != SMC_LNK_ACTIVE &&
1332 conn->lgr->lnk[i].state != SMC_LNK_ACTIVATING)
1334 smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
1339 /* create the send and receive buffer for an SMC socket;
1340 * receive buffers are called RMBs;
1341 * (even though the SMC protocol allows more than one RMB-element per RMB,
1342 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
1343 * extra RMB for every connection in a link group
1345 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
1349 /* create send buffer */
1350 rc = __smc_buf_create(smc, is_smcd, false);
1354 rc = __smc_buf_create(smc, is_smcd, true);
1356 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1360 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
1364 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
1365 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
1371 /* add a new rtoken from peer */
1372 int smc_rtoken_add(struct smc_link *lnk, __be64 nw_vaddr, __be32 nw_rkey)
1374 struct smc_link_group *lgr = smc_get_lgr(lnk);
1375 u64 dma_addr = be64_to_cpu(nw_vaddr);
1376 u32 rkey = ntohl(nw_rkey);
1379 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1380 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
1381 lgr->rtokens[i][lnk->link_idx].dma_addr == dma_addr &&
1382 test_bit(i, lgr->rtokens_used_mask)) {
1383 /* already in list */
1387 i = smc_rmb_reserve_rtoken_idx(lgr);
1390 lgr->rtokens[i][lnk->link_idx].rkey = rkey;
1391 lgr->rtokens[i][lnk->link_idx].dma_addr = dma_addr;
1395 /* delete an rtoken from all links */
1396 int smc_rtoken_delete(struct smc_link *lnk, __be32 nw_rkey)
1398 struct smc_link_group *lgr = smc_get_lgr(lnk);
1399 u32 rkey = ntohl(nw_rkey);
1402 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1403 if (lgr->rtokens[i][lnk->link_idx].rkey == rkey &&
1404 test_bit(i, lgr->rtokens_used_mask)) {
1405 for (j = 0; j < SMC_LINKS_PER_LGR_MAX; j++) {
1406 lgr->rtokens[i][j].rkey = 0;
1407 lgr->rtokens[i][j].dma_addr = 0;
1409 clear_bit(i, lgr->rtokens_used_mask);
1416 /* save rkey and dma_addr received from peer during clc handshake */
1417 int smc_rmb_rtoken_handling(struct smc_connection *conn,
1418 struct smc_link *lnk,
1419 struct smc_clc_msg_accept_confirm *clc)
1421 conn->rtoken_idx = smc_rtoken_add(lnk, clc->rmb_dma_addr,
1423 if (conn->rtoken_idx < 0)
1424 return conn->rtoken_idx;
1428 static void smc_core_going_away(void)
1430 struct smc_ib_device *smcibdev;
1431 struct smcd_dev *smcd;
1433 spin_lock(&smc_ib_devices.lock);
1434 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1437 for (i = 0; i < SMC_MAX_PORTS; i++)
1438 set_bit(i, smcibdev->ports_going_away);
1440 spin_unlock(&smc_ib_devices.lock);
1442 spin_lock(&smcd_dev_list.lock);
1443 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1444 smcd->going_away = 1;
1446 spin_unlock(&smcd_dev_list.lock);
1449 /* Clean up all SMC link groups */
1450 static void smc_lgrs_shutdown(void)
1452 struct smcd_dev *smcd;
1454 smc_core_going_away();
1456 smc_smcr_terminate_all(NULL);
1458 spin_lock(&smcd_dev_list.lock);
1459 list_for_each_entry(smcd, &smcd_dev_list.list, list)
1460 smc_smcd_terminate_all(smcd);
1461 spin_unlock(&smcd_dev_list.lock);
1464 static int smc_core_reboot_event(struct notifier_block *this,
1465 unsigned long event, void *ptr)
1467 smc_lgrs_shutdown();
1468 smc_ib_unregister_client();
1472 static struct notifier_block smc_reboot_notifier = {
1473 .notifier_call = smc_core_reboot_event,
1476 int __init smc_core_init(void)
1478 return register_reboot_notifier(&smc_reboot_notifier);
1481 /* Called (from smc_exit) when module is removed */
1482 void smc_core_exit(void)
1484 unregister_reboot_notifier(&smc_reboot_notifier);
1485 smc_lgrs_shutdown();