1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Basic Transport Functions exploiting Infiniband API
7 * Copyright IBM Corp. 2016
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/reboot.h>
20 #include <rdma/ib_verbs.h>
21 #include <rdma/ib_cache.h>
30 #include "smc_close.h"
33 #define SMC_LGR_NUM_INCR 256
34 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
35 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
36 #define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
38 static struct smc_lgr_list smc_lgr_list = { /* established link groups */
39 .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
40 .list = LIST_HEAD_INIT(smc_lgr_list.list),
44 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
45 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
47 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
48 struct smc_buf_desc *buf_desc);
50 /* return head of link group list and its lock for a given link group */
51 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
52 spinlock_t **lgr_lock)
55 *lgr_lock = &lgr->smcd->lgr_lock;
56 return &lgr->smcd->lgr_list;
59 *lgr_lock = &smc_lgr_list.lock;
60 return &smc_lgr_list.list;
63 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
65 /* client link group creation always follows the server link group
66 * creation. For client use a somewhat higher removal delay time,
67 * otherwise there is a risk of out-of-sync link groups.
69 if (!lgr->freeing && !lgr->freefast) {
70 mod_delayed_work(system_wq, &lgr->free_work,
71 (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
72 SMC_LGR_FREE_DELAY_CLNT :
73 SMC_LGR_FREE_DELAY_SERV);
77 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
79 if (!lgr->freeing && !lgr->freefast) {
81 mod_delayed_work(system_wq, &lgr->free_work,
82 SMC_LGR_FREE_DELAY_FAST);
86 /* Register connection's alert token in our lookup structure.
87 * To use rbtrees we have to implement our own insert core.
88 * Requires @conns_lock
89 * @smc connection to register
90 * Returns 0 on success, != otherwise.
92 static void smc_lgr_add_alert_token(struct smc_connection *conn)
94 struct rb_node **link, *parent = NULL;
95 u32 token = conn->alert_token_local;
97 link = &conn->lgr->conns_all.rb_node;
99 struct smc_connection *cur = rb_entry(*link,
100 struct smc_connection, alert_node);
103 if (cur->alert_token_local > token)
104 link = &parent->rb_left;
106 link = &parent->rb_right;
108 /* Put the new node there */
109 rb_link_node(&conn->alert_node, parent, link);
110 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
113 /* Register connection in link group by assigning an alert token
114 * registered in a search tree.
115 * Requires @conns_lock
116 * Note that '0' is a reserved value and not assigned.
118 static void smc_lgr_register_conn(struct smc_connection *conn)
120 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
121 static atomic_t nexttoken = ATOMIC_INIT(0);
123 /* find a new alert_token_local value not yet used by some connection
126 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
127 while (!conn->alert_token_local) {
128 conn->alert_token_local = atomic_inc_return(&nexttoken);
129 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
130 conn->alert_token_local = 0;
132 smc_lgr_add_alert_token(conn);
133 conn->lgr->conns_num++;
136 /* Unregister connection and reset the alert token of the given connection<
138 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
140 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
141 struct smc_link_group *lgr = conn->lgr;
143 rb_erase(&conn->alert_node, &lgr->conns_all);
145 conn->alert_token_local = 0;
146 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
149 /* Unregister connection from lgr
151 static void smc_lgr_unregister_conn(struct smc_connection *conn)
153 struct smc_link_group *lgr = conn->lgr;
157 write_lock_bh(&lgr->conns_lock);
158 if (conn->alert_token_local) {
159 __smc_lgr_unregister_conn(conn);
161 write_unlock_bh(&lgr->conns_lock);
165 void smc_lgr_cleanup_early(struct smc_connection *conn)
167 struct smc_link_group *lgr = conn->lgr;
174 smc_lgr_schedule_free_work_fast(lgr);
177 /* Send delete link, either as client to request the initiation
178 * of the DELETE LINK sequence from server; or as server to
179 * initiate the delete processing. See smc_llc_rx_delete_link().
181 static int smc_link_send_delete(struct smc_link *lnk, bool orderly)
183 if (lnk->state == SMC_LNK_ACTIVE &&
184 !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, orderly)) {
185 smc_llc_link_deleting(lnk);
191 static void smc_lgr_free(struct smc_link_group *lgr);
193 static void smc_lgr_free_work(struct work_struct *work)
195 struct smc_link_group *lgr = container_of(to_delayed_work(work),
196 struct smc_link_group,
198 spinlock_t *lgr_lock;
199 struct smc_link *lnk;
202 smc_lgr_list_head(lgr, &lgr_lock);
203 spin_lock_bh(lgr_lock);
205 spin_unlock_bh(lgr_lock);
208 read_lock_bh(&lgr->conns_lock);
209 conns = RB_EMPTY_ROOT(&lgr->conns_all);
210 read_unlock_bh(&lgr->conns_lock);
211 if (!conns) { /* number of lgr connections is no longer zero */
212 spin_unlock_bh(lgr_lock);
215 list_del_init(&lgr->list); /* remove from smc_lgr_list */
217 lnk = &lgr->lnk[SMC_SINGLE_LINK];
218 if (!lgr->is_smcd && !lgr->terminating) {
219 /* try to send del link msg, on error free lgr immediately */
220 if (lnk->state == SMC_LNK_ACTIVE &&
221 !smc_link_send_delete(lnk, true)) {
222 /* reschedule in case we never receive a response */
223 smc_lgr_schedule_free_work(lgr);
224 spin_unlock_bh(lgr_lock);
228 lgr->freeing = 1; /* this instance does the freeing, no new schedule */
229 spin_unlock_bh(lgr_lock);
230 cancel_delayed_work(&lgr->free_work);
232 if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
233 smc_llc_link_inactive(lnk);
234 if (lgr->is_smcd && !lgr->terminating)
235 smc_ism_signal_shutdown(lgr);
239 static void smc_lgr_terminate_work(struct work_struct *work)
241 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
244 smc_lgr_terminate(lgr, true);
247 /* create a new SMC link group */
248 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
250 struct smc_link_group *lgr;
251 struct list_head *lgr_list;
252 struct smc_link *lnk;
253 spinlock_t *lgr_lock;
258 if (ini->is_smcd && ini->vlan_id) {
259 if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
260 rc = SMC_CLC_DECL_ISMVLANERR;
265 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
267 rc = SMC_CLC_DECL_MEM;
270 lgr->is_smcd = ini->is_smcd;
272 lgr->terminating = 0;
275 lgr->vlan_id = ini->vlan_id;
276 rwlock_init(&lgr->sndbufs_lock);
277 rwlock_init(&lgr->rmbs_lock);
278 rwlock_init(&lgr->conns_lock);
279 for (i = 0; i < SMC_RMBE_SIZES; i++) {
280 INIT_LIST_HEAD(&lgr->sndbufs[i]);
281 INIT_LIST_HEAD(&lgr->rmbs[i]);
283 smc_lgr_list.num += SMC_LGR_NUM_INCR;
284 memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
285 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
286 INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
287 lgr->conns_all = RB_ROOT;
289 /* SMC-D specific settings */
290 get_device(&ini->ism_dev->dev);
291 lgr->peer_gid = ini->ism_gid;
292 lgr->smcd = ini->ism_dev;
293 lgr_list = &ini->ism_dev->lgr_list;
294 lgr_lock = &lgr->smcd->lgr_lock;
295 lgr->peer_shutdown = 0;
296 atomic_inc(&ini->ism_dev->lgr_cnt);
298 /* SMC-R specific settings */
299 get_device(&ini->ib_dev->ibdev->dev);
300 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
301 memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
304 lnk = &lgr->lnk[SMC_SINGLE_LINK];
305 /* initialize link */
306 lnk->state = SMC_LNK_ACTIVATING;
307 lnk->link_id = SMC_SINGLE_LINK;
308 lnk->smcibdev = ini->ib_dev;
309 lnk->ibport = ini->ib_port;
310 lgr_list = &smc_lgr_list.list;
311 lgr_lock = &smc_lgr_list.lock;
313 ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
314 if (!ini->ib_dev->initialized)
315 smc_ib_setup_per_ibdev(ini->ib_dev);
316 get_random_bytes(rndvec, sizeof(rndvec));
317 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
319 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
320 ini->vlan_id, lnk->gid,
324 rc = smc_llc_link_init(lnk);
327 rc = smc_wr_alloc_link_mem(lnk);
330 rc = smc_ib_create_protection_domain(lnk);
333 rc = smc_ib_create_queue_pair(lnk);
336 rc = smc_wr_create_link(lnk);
339 atomic_inc(&lgr_cnt);
340 atomic_inc(&ini->ib_dev->lnk_cnt);
343 spin_lock_bh(lgr_lock);
344 list_add(&lgr->list, lgr_list);
345 spin_unlock_bh(lgr_lock);
349 smc_ib_destroy_queue_pair(lnk);
351 smc_ib_dealloc_protection_domain(lnk);
353 smc_wr_free_link_mem(lnk);
355 smc_llc_link_clear(lnk);
359 if (ini->is_smcd && ini->vlan_id)
360 smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
364 rc = SMC_CLC_DECL_MEM;
366 rc = SMC_CLC_DECL_INTERR;
371 static void smc_buf_unuse(struct smc_connection *conn,
372 struct smc_link_group *lgr)
374 if (conn->sndbuf_desc)
375 conn->sndbuf_desc->used = 0;
376 if (conn->rmb_desc) {
377 if (!conn->rmb_desc->regerr) {
378 if (!lgr->is_smcd && !list_empty(&lgr->list)) {
379 /* unregister rmb with peer */
380 smc_llc_do_delete_rkey(
381 &lgr->lnk[SMC_SINGLE_LINK],
384 conn->rmb_desc->used = 0;
386 /* buf registration failed, reuse not possible */
387 write_lock_bh(&lgr->rmbs_lock);
388 list_del(&conn->rmb_desc->list);
389 write_unlock_bh(&lgr->rmbs_lock);
391 smc_buf_free(lgr, true, conn->rmb_desc);
396 /* remove a finished connection from its link group */
397 void smc_conn_free(struct smc_connection *conn)
399 struct smc_link_group *lgr = conn->lgr;
404 if (!list_empty(&lgr->list))
405 smc_ism_unset_conn(conn);
406 tasklet_kill(&conn->rx_tsklet);
408 smc_cdc_tx_dismiss_slots(conn);
410 if (!list_empty(&lgr->list)) {
411 smc_lgr_unregister_conn(conn);
412 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
416 smc_lgr_schedule_free_work(lgr);
419 static void smc_link_clear(struct smc_link *lnk)
422 smc_llc_link_clear(lnk);
423 smc_ib_modify_qp_reset(lnk);
424 smc_wr_free_link(lnk);
425 smc_ib_destroy_queue_pair(lnk);
426 smc_ib_dealloc_protection_domain(lnk);
427 smc_wr_free_link_mem(lnk);
428 if (!atomic_dec_return(&lnk->smcibdev->lnk_cnt))
429 wake_up(&lnk->smcibdev->lnks_deleted);
432 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
433 struct smc_buf_desc *buf_desc)
435 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
438 if (buf_desc->mr_rx[SMC_SINGLE_LINK])
439 smc_ib_put_memory_region(
440 buf_desc->mr_rx[SMC_SINGLE_LINK]);
441 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
444 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
447 sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
449 __free_pages(buf_desc->pages, buf_desc->order);
453 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
454 struct smc_buf_desc *buf_desc)
457 /* restore original buf len */
458 buf_desc->len += sizeof(struct smcd_cdc_msg);
459 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
461 kfree(buf_desc->cpu_addr);
466 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
467 struct smc_buf_desc *buf_desc)
470 smcd_buf_free(lgr, is_rmb, buf_desc);
472 smcr_buf_free(lgr, is_rmb, buf_desc);
475 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
477 struct smc_buf_desc *buf_desc, *bf_desc;
478 struct list_head *buf_list;
481 for (i = 0; i < SMC_RMBE_SIZES; i++) {
483 buf_list = &lgr->rmbs[i];
485 buf_list = &lgr->sndbufs[i];
486 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
488 list_del(&buf_desc->list);
489 smc_buf_free(lgr, is_rmb, buf_desc);
494 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
496 /* free send buffers */
497 __smc_lgr_free_bufs(lgr, false);
499 __smc_lgr_free_bufs(lgr, true);
502 /* remove a link group */
503 static void smc_lgr_free(struct smc_link_group *lgr)
505 smc_lgr_free_bufs(lgr);
507 if (!lgr->terminating) {
508 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
509 put_device(&lgr->smcd->dev);
511 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
512 wake_up(&lgr->smcd->lgrs_deleted);
514 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
515 put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
516 if (!atomic_dec_return(&lgr_cnt))
517 wake_up(&lgrs_deleted);
522 void smc_lgr_forget(struct smc_link_group *lgr)
524 struct list_head *lgr_list;
525 spinlock_t *lgr_lock;
527 lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
528 spin_lock_bh(lgr_lock);
529 /* do not use this link group for new connections */
530 if (!list_empty(lgr_list))
531 list_del_init(lgr_list);
532 spin_unlock_bh(lgr_lock);
535 static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
539 for (i = 0; i < SMC_RMBE_SIZES; i++) {
540 struct smc_buf_desc *buf_desc;
542 list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
543 buf_desc->len += sizeof(struct smcd_cdc_msg);
544 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
549 static void smc_sk_wake_ups(struct smc_sock *smc)
551 smc->sk.sk_write_space(&smc->sk);
552 smc->sk.sk_data_ready(&smc->sk);
553 smc->sk.sk_state_change(&smc->sk);
556 /* kill a connection */
557 static void smc_conn_kill(struct smc_connection *conn, bool soft)
559 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
561 if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
562 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
564 smc_close_abort(conn);
566 smc->sk.sk_err = ECONNABORTED;
567 smc_sk_wake_ups(smc);
568 if (conn->lgr->is_smcd) {
569 smc_ism_unset_conn(conn);
571 tasklet_kill(&conn->rx_tsklet);
573 tasklet_unlock_wait(&conn->rx_tsklet);
575 smc_cdc_tx_dismiss_slots(conn);
577 smc_lgr_unregister_conn(conn);
578 smc_close_active_abort(smc);
581 static void smc_lgr_cleanup(struct smc_link_group *lgr)
584 smc_ism_signal_shutdown(lgr);
585 smcd_unregister_all_dmbs(lgr);
586 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
587 put_device(&lgr->smcd->dev);
589 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
591 wake_up(&lnk->wr_reg_wait);
592 if (lnk->state != SMC_LNK_INACTIVE) {
593 smc_link_send_delete(lnk, false);
594 smc_llc_link_inactive(lnk);
599 /* terminate link group */
600 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
602 struct smc_connection *conn;
603 struct smc_sock *smc;
604 struct rb_node *node;
606 if (lgr->terminating)
607 return; /* lgr already terminating */
609 cancel_delayed_work_sync(&lgr->free_work);
610 lgr->terminating = 1;
612 smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
614 /* kill remaining link group connections */
615 read_lock_bh(&lgr->conns_lock);
616 node = rb_first(&lgr->conns_all);
618 read_unlock_bh(&lgr->conns_lock);
619 conn = rb_entry(node, struct smc_connection, alert_node);
620 smc = container_of(conn, struct smc_sock, conn);
621 sock_hold(&smc->sk); /* sock_put below */
623 smc_conn_kill(conn, soft);
624 release_sock(&smc->sk);
625 sock_put(&smc->sk); /* sock_hold above */
626 read_lock_bh(&lgr->conns_lock);
627 node = rb_first(&lgr->conns_all);
629 read_unlock_bh(&lgr->conns_lock);
630 smc_lgr_cleanup(lgr);
632 smc_lgr_schedule_free_work_fast(lgr);
637 /* unlink and terminate link group
638 * @soft: true if link group shutdown can take its time
639 * false if immediate link group shutdown is required
641 void smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
643 spinlock_t *lgr_lock;
645 smc_lgr_list_head(lgr, &lgr_lock);
646 spin_lock_bh(lgr_lock);
647 if (lgr->terminating) {
648 spin_unlock_bh(lgr_lock);
649 return; /* lgr already terminating */
653 list_del_init(&lgr->list);
654 spin_unlock_bh(lgr_lock);
655 __smc_lgr_terminate(lgr, soft);
658 /* Called when IB port is terminated */
659 void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
661 struct smc_link_group *lgr, *l;
662 LIST_HEAD(lgr_free_list);
664 spin_lock_bh(&smc_lgr_list.lock);
665 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
667 lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
668 lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) {
669 list_move(&lgr->list, &lgr_free_list);
673 spin_unlock_bh(&smc_lgr_list.lock);
675 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
676 list_del_init(&lgr->list);
677 __smc_lgr_terminate(lgr, false);
681 /* Called when peer lgr shutdown (regularly or abnormally) is received */
682 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
684 struct smc_link_group *lgr, *l;
685 LIST_HEAD(lgr_free_list);
687 /* run common cleanup function and build free list */
688 spin_lock_bh(&dev->lgr_lock);
689 list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
690 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
691 (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
692 if (peer_gid) /* peer triggered termination */
693 lgr->peer_shutdown = 1;
694 list_move(&lgr->list, &lgr_free_list);
697 spin_unlock_bh(&dev->lgr_lock);
699 /* cancel the regular free workers and actually free lgrs */
700 list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
701 list_del_init(&lgr->list);
702 schedule_work(&lgr->terminate_work);
706 /* Called when an SMCD device is removed or the smc module is unloaded */
707 void smc_smcd_terminate_all(struct smcd_dev *smcd)
709 struct smc_link_group *lgr, *lg;
710 LIST_HEAD(lgr_free_list);
712 spin_lock_bh(&smcd->lgr_lock);
713 list_splice_init(&smcd->lgr_list, &lgr_free_list);
714 list_for_each_entry(lgr, &lgr_free_list, list)
716 spin_unlock_bh(&smcd->lgr_lock);
718 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
719 list_del_init(&lgr->list);
720 __smc_lgr_terminate(lgr, false);
723 if (atomic_read(&smcd->lgr_cnt))
724 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
727 /* Called when an SMCR device is removed or the smc module is unloaded.
728 * If smcibdev is given, all SMCR link groups using this device are terminated.
729 * If smcibdev is NULL, all SMCR link groups are terminated.
731 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
733 struct smc_link_group *lgr, *lg;
734 LIST_HEAD(lgr_free_list);
736 spin_lock_bh(&smc_lgr_list.lock);
738 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
739 list_for_each_entry(lgr, &lgr_free_list, list)
742 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
743 if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev) {
744 list_move(&lgr->list, &lgr_free_list);
749 spin_unlock_bh(&smc_lgr_list.lock);
751 list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
752 list_del_init(&lgr->list);
753 __smc_lgr_terminate(lgr, false);
757 if (atomic_read(&smcibdev->lnk_cnt))
758 wait_event(smcibdev->lnks_deleted,
759 !atomic_read(&smcibdev->lnk_cnt));
761 if (atomic_read(&lgr_cnt))
762 wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
766 /* Determine vlan of internal TCP socket.
767 * @vlan_id: address to store the determined vlan id into
769 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
771 struct dst_entry *dst = sk_dst_get(clcsock->sk);
772 struct net_device *ndev;
773 int i, nest_lvl, rc = 0;
786 if (is_vlan_dev(ndev)) {
787 ini->vlan_id = vlan_dev_vlan_id(ndev);
792 nest_lvl = ndev->lower_level;
793 for (i = 0; i < nest_lvl; i++) {
794 struct list_head *lower = &ndev->adj_list.lower;
796 if (list_empty(lower))
799 ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
800 if (is_vlan_dev(ndev)) {
801 ini->vlan_id = vlan_dev_vlan_id(ndev);
813 static bool smcr_lgr_match(struct smc_link_group *lgr,
814 struct smc_clc_msg_local *lcl,
815 enum smc_lgr_role role, u32 clcqpn)
817 return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
819 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
821 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
824 (lgr->role == SMC_SERV ||
825 lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
828 static bool smcd_lgr_match(struct smc_link_group *lgr,
829 struct smcd_dev *smcismdev, u64 peer_gid)
831 return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
834 /* create a new SMC connection (and a new link group if necessary) */
835 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
837 struct smc_connection *conn = &smc->conn;
838 struct list_head *lgr_list;
839 struct smc_link_group *lgr;
840 enum smc_lgr_role role;
841 spinlock_t *lgr_lock;
844 lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
845 lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
846 ini->cln_first_contact = SMC_FIRST_CONTACT;
847 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
848 if (role == SMC_CLNT && ini->srv_first_contact)
849 /* create new link group as well */
852 /* determine if an existing link group can be reused */
853 spin_lock_bh(lgr_lock);
854 list_for_each_entry(lgr, lgr_list, list) {
855 write_lock_bh(&lgr->conns_lock);
857 smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
858 smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
860 lgr->vlan_id == ini->vlan_id &&
862 lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
863 /* link group found */
864 ini->cln_first_contact = SMC_REUSE_CONTACT;
866 smc_lgr_register_conn(conn); /* add smc conn to lgr */
867 if (delayed_work_pending(&lgr->free_work))
868 cancel_delayed_work(&lgr->free_work);
869 write_unlock_bh(&lgr->conns_lock);
872 write_unlock_bh(&lgr->conns_lock);
874 spin_unlock_bh(lgr_lock);
876 if (role == SMC_CLNT && !ini->srv_first_contact &&
877 ini->cln_first_contact == SMC_FIRST_CONTACT) {
878 /* Server reuses a link group, but Client wants to start
880 * send out_of_sync decline, reason synchr. error
882 return SMC_CLC_DECL_SYNCERR;
886 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
887 rc = smc_lgr_create(smc, ini);
891 write_lock_bh(&lgr->conns_lock);
892 smc_lgr_register_conn(conn); /* add smc conn to lgr */
893 write_unlock_bh(&lgr->conns_lock);
895 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
896 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
897 conn->urg_state = SMC_URG_READ;
899 conn->rx_off = sizeof(struct smcd_cdc_msg);
900 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
902 #ifndef KERNEL_HAS_ATOMIC64
903 spin_lock_init(&conn->acurs_lock);
910 /* convert the RMB size into the compressed notation - minimum 16K.
911 * In contrast to plain ilog2, this rounds towards the next power of 2,
912 * so the socket application gets at least its desired sndbuf / rcvbuf size.
914 static u8 smc_compress_bufsize(int size)
918 if (size <= SMC_BUF_MIN_SIZE)
921 size = (size - 1) >> 14;
922 compressed = ilog2(size) + 1;
923 if (compressed >= SMC_RMBE_SIZES)
924 compressed = SMC_RMBE_SIZES - 1;
928 /* convert the RMB size from compressed notation into integer */
929 int smc_uncompress_bufsize(u8 compressed)
933 size = 0x00000001 << (((int)compressed) + 14);
937 /* try to reuse a sndbuf or rmb description slot for a certain
938 * buffer size; if not available, return NULL
940 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
942 struct list_head *buf_list)
944 struct smc_buf_desc *buf_slot;
947 list_for_each_entry(buf_slot, buf_list, list) {
948 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
949 read_unlock_bh(lock);
953 read_unlock_bh(lock);
957 /* one of the conditions for announcing a receiver's current window size is
958 * that it "results in a minimum increase in the window size of 10% of the
959 * receive buffer space" [RFC7609]
961 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
963 return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
966 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
967 bool is_rmb, int bufsize)
969 struct smc_buf_desc *buf_desc;
970 struct smc_link *lnk;
973 /* try to alloc a new buffer */
974 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
976 return ERR_PTR(-ENOMEM);
978 buf_desc->order = get_order(bufsize);
979 buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
980 __GFP_NOMEMALLOC | __GFP_COMP |
981 __GFP_NORETRY | __GFP_ZERO,
983 if (!buf_desc->pages) {
985 return ERR_PTR(-EAGAIN);
987 buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
989 /* build the sg table from the pages */
990 lnk = &lgr->lnk[SMC_SINGLE_LINK];
991 rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
994 smc_buf_free(lgr, is_rmb, buf_desc);
997 sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
998 buf_desc->cpu_addr, bufsize);
1000 /* map sg table to DMA address */
1001 rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
1002 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
1003 /* SMC protocol depends on mapping to one DMA address only */
1005 smc_buf_free(lgr, is_rmb, buf_desc);
1006 return ERR_PTR(-EAGAIN);
1009 /* create a new memory region for the RMB */
1011 rc = smc_ib_get_memory_region(lnk->roce_pd,
1012 IB_ACCESS_REMOTE_WRITE |
1013 IB_ACCESS_LOCAL_WRITE,
1016 smc_buf_free(lgr, is_rmb, buf_desc);
1021 buf_desc->len = bufsize;
1025 #define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1027 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
1028 bool is_dmb, int bufsize)
1030 struct smc_buf_desc *buf_desc;
1033 if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
1034 return ERR_PTR(-EAGAIN);
1036 /* try to alloc a new DMB */
1037 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1039 return ERR_PTR(-ENOMEM);
1041 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
1044 return ERR_PTR(-EAGAIN);
1046 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
1047 /* CDC header stored in buf. So, pretend it was smaller */
1048 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
1050 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
1051 __GFP_NOWARN | __GFP_NORETRY |
1053 if (!buf_desc->cpu_addr) {
1055 return ERR_PTR(-EAGAIN);
1057 buf_desc->len = bufsize;
1062 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
1064 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
1065 struct smc_connection *conn = &smc->conn;
1066 struct smc_link_group *lgr = conn->lgr;
1067 struct list_head *buf_list;
1068 int bufsize, bufsize_short;
1073 /* use socket recv buffer size (w/o overhead) as start value */
1074 sk_buf_size = smc->sk.sk_rcvbuf / 2;
1076 /* use socket send buffer size (w/o overhead) as start value */
1077 sk_buf_size = smc->sk.sk_sndbuf / 2;
1079 for (bufsize_short = smc_compress_bufsize(sk_buf_size);
1080 bufsize_short >= 0; bufsize_short--) {
1083 lock = &lgr->rmbs_lock;
1084 buf_list = &lgr->rmbs[bufsize_short];
1086 lock = &lgr->sndbufs_lock;
1087 buf_list = &lgr->sndbufs[bufsize_short];
1089 bufsize = smc_uncompress_bufsize(bufsize_short);
1090 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
1093 /* check for reusable slot in the link group */
1094 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
1096 memset(buf_desc->cpu_addr, 0, bufsize);
1097 break; /* found reusable slot */
1101 buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
1103 buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
1105 if (PTR_ERR(buf_desc) == -ENOMEM)
1107 if (IS_ERR(buf_desc))
1111 write_lock_bh(lock);
1112 list_add(&buf_desc->list, buf_list);
1113 write_unlock_bh(lock);
1117 if (IS_ERR(buf_desc))
1121 conn->rmb_desc = buf_desc;
1122 conn->rmbe_size_short = bufsize_short;
1123 smc->sk.sk_rcvbuf = bufsize * 2;
1124 atomic_set(&conn->bytes_to_rcv, 0);
1125 conn->rmbe_update_limit =
1126 smc_rmb_wnd_update_limit(buf_desc->len);
1128 smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
1130 conn->sndbuf_desc = buf_desc;
1131 smc->sk.sk_sndbuf = bufsize * 2;
1132 atomic_set(&conn->sndbuf_space, bufsize);
1137 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
1139 struct smc_link_group *lgr = conn->lgr;
1141 if (!conn->lgr || conn->lgr->is_smcd)
1143 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1144 conn->sndbuf_desc, DMA_TO_DEVICE);
1147 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
1149 struct smc_link_group *lgr = conn->lgr;
1151 if (!conn->lgr || conn->lgr->is_smcd)
1153 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1154 conn->sndbuf_desc, DMA_TO_DEVICE);
1157 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
1159 struct smc_link_group *lgr = conn->lgr;
1161 if (!conn->lgr || conn->lgr->is_smcd)
1163 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1164 conn->rmb_desc, DMA_FROM_DEVICE);
1167 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
1169 struct smc_link_group *lgr = conn->lgr;
1171 if (!conn->lgr || conn->lgr->is_smcd)
1173 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1174 conn->rmb_desc, DMA_FROM_DEVICE);
1177 /* create the send and receive buffer for an SMC socket;
1178 * receive buffers are called RMBs;
1179 * (even though the SMC protocol allows more than one RMB-element per RMB,
1180 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
1181 * extra RMB for every connection in a link group
1183 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
1187 /* create send buffer */
1188 rc = __smc_buf_create(smc, is_smcd, false);
1192 rc = __smc_buf_create(smc, is_smcd, true);
1194 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1198 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
1202 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
1203 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
1209 /* add a new rtoken from peer */
1210 int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
1212 u64 dma_addr = be64_to_cpu(nw_vaddr);
1213 u32 rkey = ntohl(nw_rkey);
1216 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1217 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
1218 (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
1219 test_bit(i, lgr->rtokens_used_mask)) {
1220 /* already in list */
1224 i = smc_rmb_reserve_rtoken_idx(lgr);
1227 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
1228 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
1232 /* delete an rtoken */
1233 int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
1235 u32 rkey = ntohl(nw_rkey);
1238 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1239 if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
1240 test_bit(i, lgr->rtokens_used_mask)) {
1241 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
1242 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
1244 clear_bit(i, lgr->rtokens_used_mask);
1251 /* save rkey and dma_addr received from peer during clc handshake */
1252 int smc_rmb_rtoken_handling(struct smc_connection *conn,
1253 struct smc_clc_msg_accept_confirm *clc)
1255 conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
1257 if (conn->rtoken_idx < 0)
1258 return conn->rtoken_idx;
1262 static void smc_core_going_away(void)
1264 struct smc_ib_device *smcibdev;
1265 struct smcd_dev *smcd;
1267 spin_lock(&smc_ib_devices.lock);
1268 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1271 for (i = 0; i < SMC_MAX_PORTS; i++)
1272 set_bit(i, smcibdev->ports_going_away);
1274 spin_unlock(&smc_ib_devices.lock);
1276 spin_lock(&smcd_dev_list.lock);
1277 list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1278 smcd->going_away = 1;
1280 spin_unlock(&smcd_dev_list.lock);
1283 /* Clean up all SMC link groups */
1284 static void smc_lgrs_shutdown(void)
1286 struct smcd_dev *smcd;
1288 smc_core_going_away();
1290 smc_smcr_terminate_all(NULL);
1292 spin_lock(&smcd_dev_list.lock);
1293 list_for_each_entry(smcd, &smcd_dev_list.list, list)
1294 smc_smcd_terminate_all(smcd);
1295 spin_unlock(&smcd_dev_list.lock);
1298 static int smc_core_reboot_event(struct notifier_block *this,
1299 unsigned long event, void *ptr)
1301 smc_lgrs_shutdown();
1302 smc_ib_unregister_client();
1306 static struct notifier_block smc_reboot_notifier = {
1307 .notifier_call = smc_core_reboot_event,
1310 int __init smc_core_init(void)
1312 return register_reboot_notifier(&smc_reboot_notifier);
1315 /* Called (from smc_exit) when module is removed */
1316 void smc_core_exit(void)
1318 unregister_reboot_notifier(&smc_reboot_notifier);
1319 smc_lgrs_shutdown();