Merge tag 'modules-for-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu...
[linux-2.6-microblaze.git] / net / smc / smc_core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  *  Basic Transport Functions exploiting Infiniband API
6  *
7  *  Copyright IBM Corp. 2016
8  *
9  *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
10  */
11
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/reboot.h>
18 #include <net/tcp.h>
19 #include <net/sock.h>
20 #include <rdma/ib_verbs.h>
21 #include <rdma/ib_cache.h>
22
23 #include "smc.h"
24 #include "smc_clc.h"
25 #include "smc_core.h"
26 #include "smc_ib.h"
27 #include "smc_wr.h"
28 #include "smc_llc.h"
29 #include "smc_cdc.h"
30 #include "smc_close.h"
31 #include "smc_ism.h"
32
33 #define SMC_LGR_NUM_INCR                256
34 #define SMC_LGR_FREE_DELAY_SERV         (600 * HZ)
35 #define SMC_LGR_FREE_DELAY_CLNT         (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
36 #define SMC_LGR_FREE_DELAY_FAST         (8 * HZ)
37
38 static struct smc_lgr_list smc_lgr_list = {     /* established link groups */
39         .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
40         .list = LIST_HEAD_INIT(smc_lgr_list.list),
41         .num = 0,
42 };
43
44 static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
45 static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
46
47 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
48                          struct smc_buf_desc *buf_desc);
49 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
50
51 /* return head of link group list and its lock for a given link group */
52 static inline struct list_head *smc_lgr_list_head(struct smc_link_group *lgr,
53                                                   spinlock_t **lgr_lock)
54 {
55         if (lgr->is_smcd) {
56                 *lgr_lock = &lgr->smcd->lgr_lock;
57                 return &lgr->smcd->lgr_list;
58         }
59
60         *lgr_lock = &smc_lgr_list.lock;
61         return &smc_lgr_list.list;
62 }
63
64 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
65 {
66         /* client link group creation always follows the server link group
67          * creation. For client use a somewhat higher removal delay time,
68          * otherwise there is a risk of out-of-sync link groups.
69          */
70         if (!lgr->freeing && !lgr->freefast) {
71                 mod_delayed_work(system_wq, &lgr->free_work,
72                                  (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
73                                                 SMC_LGR_FREE_DELAY_CLNT :
74                                                 SMC_LGR_FREE_DELAY_SERV);
75         }
76 }
77
78 void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
79 {
80         if (!lgr->freeing && !lgr->freefast) {
81                 lgr->freefast = 1;
82                 mod_delayed_work(system_wq, &lgr->free_work,
83                                  SMC_LGR_FREE_DELAY_FAST);
84         }
85 }
86
87 /* Register connection's alert token in our lookup structure.
88  * To use rbtrees we have to implement our own insert core.
89  * Requires @conns_lock
90  * @smc         connection to register
91  * Returns 0 on success, != otherwise.
92  */
93 static void smc_lgr_add_alert_token(struct smc_connection *conn)
94 {
95         struct rb_node **link, *parent = NULL;
96         u32 token = conn->alert_token_local;
97
98         link = &conn->lgr->conns_all.rb_node;
99         while (*link) {
100                 struct smc_connection *cur = rb_entry(*link,
101                                         struct smc_connection, alert_node);
102
103                 parent = *link;
104                 if (cur->alert_token_local > token)
105                         link = &parent->rb_left;
106                 else
107                         link = &parent->rb_right;
108         }
109         /* Put the new node there */
110         rb_link_node(&conn->alert_node, parent, link);
111         rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
112 }
113
114 /* Register connection in link group by assigning an alert token
115  * registered in a search tree.
116  * Requires @conns_lock
117  * Note that '0' is a reserved value and not assigned.
118  */
119 static void smc_lgr_register_conn(struct smc_connection *conn)
120 {
121         struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
122         static atomic_t nexttoken = ATOMIC_INIT(0);
123
124         /* find a new alert_token_local value not yet used by some connection
125          * in this link group
126          */
127         sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
128         while (!conn->alert_token_local) {
129                 conn->alert_token_local = atomic_inc_return(&nexttoken);
130                 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
131                         conn->alert_token_local = 0;
132         }
133         smc_lgr_add_alert_token(conn);
134         conn->lgr->conns_num++;
135 }
136
137 /* Unregister connection and reset the alert token of the given connection<
138  */
139 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
140 {
141         struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
142         struct smc_link_group *lgr = conn->lgr;
143
144         rb_erase(&conn->alert_node, &lgr->conns_all);
145         lgr->conns_num--;
146         conn->alert_token_local = 0;
147         sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
148 }
149
150 /* Unregister connection from lgr
151  */
152 static void smc_lgr_unregister_conn(struct smc_connection *conn)
153 {
154         struct smc_link_group *lgr = conn->lgr;
155
156         if (!lgr)
157                 return;
158         write_lock_bh(&lgr->conns_lock);
159         if (conn->alert_token_local) {
160                 __smc_lgr_unregister_conn(conn);
161         }
162         write_unlock_bh(&lgr->conns_lock);
163         conn->lgr = NULL;
164 }
165
166 void smc_lgr_cleanup_early(struct smc_connection *conn)
167 {
168         struct smc_link_group *lgr = conn->lgr;
169
170         if (!lgr)
171                 return;
172
173         smc_conn_free(conn);
174         smc_lgr_forget(lgr);
175         smc_lgr_schedule_free_work_fast(lgr);
176 }
177
178 /* Send delete link, either as client to request the initiation
179  * of the DELETE LINK sequence from server; or as server to
180  * initiate the delete processing. See smc_llc_rx_delete_link().
181  */
182 static int smc_link_send_delete(struct smc_link *lnk, bool orderly)
183 {
184         if (lnk->state == SMC_LNK_ACTIVE &&
185             !smc_llc_send_delete_link(lnk, SMC_LLC_REQ, orderly)) {
186                 smc_llc_link_deleting(lnk);
187                 return 0;
188         }
189         return -ENOTCONN;
190 }
191
192 static void smc_lgr_free(struct smc_link_group *lgr);
193
194 static void smc_lgr_free_work(struct work_struct *work)
195 {
196         struct smc_link_group *lgr = container_of(to_delayed_work(work),
197                                                   struct smc_link_group,
198                                                   free_work);
199         spinlock_t *lgr_lock;
200         struct smc_link *lnk;
201         bool conns;
202
203         smc_lgr_list_head(lgr, &lgr_lock);
204         spin_lock_bh(lgr_lock);
205         if (lgr->freeing) {
206                 spin_unlock_bh(lgr_lock);
207                 return;
208         }
209         read_lock_bh(&lgr->conns_lock);
210         conns = RB_EMPTY_ROOT(&lgr->conns_all);
211         read_unlock_bh(&lgr->conns_lock);
212         if (!conns) { /* number of lgr connections is no longer zero */
213                 spin_unlock_bh(lgr_lock);
214                 return;
215         }
216         list_del_init(&lgr->list); /* remove from smc_lgr_list */
217
218         lnk = &lgr->lnk[SMC_SINGLE_LINK];
219         if (!lgr->is_smcd && !lgr->terminating) {
220                 /* try to send del link msg, on error free lgr immediately */
221                 if (lnk->state == SMC_LNK_ACTIVE &&
222                     !smc_link_send_delete(lnk, true)) {
223                         /* reschedule in case we never receive a response */
224                         smc_lgr_schedule_free_work(lgr);
225                         spin_unlock_bh(lgr_lock);
226                         return;
227                 }
228         }
229         lgr->freeing = 1; /* this instance does the freeing, no new schedule */
230         spin_unlock_bh(lgr_lock);
231         cancel_delayed_work(&lgr->free_work);
232
233         if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
234                 smc_llc_link_inactive(lnk);
235         if (lgr->is_smcd && !lgr->terminating)
236                 smc_ism_signal_shutdown(lgr);
237         smc_lgr_free(lgr);
238 }
239
240 static void smc_lgr_terminate_work(struct work_struct *work)
241 {
242         struct smc_link_group *lgr = container_of(work, struct smc_link_group,
243                                                   terminate_work);
244
245         __smc_lgr_terminate(lgr, true);
246 }
247
248 /* create a new SMC link group */
249 static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
250 {
251         struct smc_link_group *lgr;
252         struct list_head *lgr_list;
253         struct smc_link *lnk;
254         spinlock_t *lgr_lock;
255         u8 rndvec[3];
256         int rc = 0;
257         int i;
258
259         if (ini->is_smcd && ini->vlan_id) {
260                 if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
261                         rc = SMC_CLC_DECL_ISMVLANERR;
262                         goto out;
263                 }
264         }
265
266         lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
267         if (!lgr) {
268                 rc = SMC_CLC_DECL_MEM;
269                 goto ism_put_vlan;
270         }
271         lgr->is_smcd = ini->is_smcd;
272         lgr->sync_err = 0;
273         lgr->terminating = 0;
274         lgr->freefast = 0;
275         lgr->freeing = 0;
276         lgr->vlan_id = ini->vlan_id;
277         rwlock_init(&lgr->sndbufs_lock);
278         rwlock_init(&lgr->rmbs_lock);
279         rwlock_init(&lgr->conns_lock);
280         for (i = 0; i < SMC_RMBE_SIZES; i++) {
281                 INIT_LIST_HEAD(&lgr->sndbufs[i]);
282                 INIT_LIST_HEAD(&lgr->rmbs[i]);
283         }
284         smc_lgr_list.num += SMC_LGR_NUM_INCR;
285         memcpy(&lgr->id, (u8 *)&smc_lgr_list.num, SMC_LGR_ID_SIZE);
286         INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
287         INIT_WORK(&lgr->terminate_work, smc_lgr_terminate_work);
288         lgr->conns_all = RB_ROOT;
289         if (ini->is_smcd) {
290                 /* SMC-D specific settings */
291                 get_device(&ini->ism_dev->dev);
292                 lgr->peer_gid = ini->ism_gid;
293                 lgr->smcd = ini->ism_dev;
294                 lgr_list = &ini->ism_dev->lgr_list;
295                 lgr_lock = &lgr->smcd->lgr_lock;
296                 lgr->peer_shutdown = 0;
297                 atomic_inc(&ini->ism_dev->lgr_cnt);
298         } else {
299                 /* SMC-R specific settings */
300                 get_device(&ini->ib_dev->ibdev->dev);
301                 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
302                 memcpy(lgr->peer_systemid, ini->ib_lcl->id_for_peer,
303                        SMC_SYSTEMID_LEN);
304
305                 lnk = &lgr->lnk[SMC_SINGLE_LINK];
306                 /* initialize link */
307                 lnk->state = SMC_LNK_ACTIVATING;
308                 lnk->link_id = SMC_SINGLE_LINK;
309                 lnk->smcibdev = ini->ib_dev;
310                 lnk->ibport = ini->ib_port;
311                 lgr_list = &smc_lgr_list.list;
312                 lgr_lock = &smc_lgr_list.lock;
313                 lnk->path_mtu =
314                         ini->ib_dev->pattr[ini->ib_port - 1].active_mtu;
315                 if (!ini->ib_dev->initialized)
316                         smc_ib_setup_per_ibdev(ini->ib_dev);
317                 get_random_bytes(rndvec, sizeof(rndvec));
318                 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) +
319                         (rndvec[2] << 16);
320                 rc = smc_ib_determine_gid(lnk->smcibdev, lnk->ibport,
321                                           ini->vlan_id, lnk->gid,
322                                           &lnk->sgid_index);
323                 if (rc)
324                         goto free_lgr;
325                 rc = smc_llc_link_init(lnk);
326                 if (rc)
327                         goto free_lgr;
328                 rc = smc_wr_alloc_link_mem(lnk);
329                 if (rc)
330                         goto clear_llc_lnk;
331                 rc = smc_ib_create_protection_domain(lnk);
332                 if (rc)
333                         goto free_link_mem;
334                 rc = smc_ib_create_queue_pair(lnk);
335                 if (rc)
336                         goto dealloc_pd;
337                 rc = smc_wr_create_link(lnk);
338                 if (rc)
339                         goto destroy_qp;
340                 atomic_inc(&lgr_cnt);
341                 atomic_inc(&ini->ib_dev->lnk_cnt);
342         }
343         smc->conn.lgr = lgr;
344         spin_lock_bh(lgr_lock);
345         list_add(&lgr->list, lgr_list);
346         spin_unlock_bh(lgr_lock);
347         return 0;
348
349 destroy_qp:
350         smc_ib_destroy_queue_pair(lnk);
351 dealloc_pd:
352         smc_ib_dealloc_protection_domain(lnk);
353 free_link_mem:
354         smc_wr_free_link_mem(lnk);
355 clear_llc_lnk:
356         smc_llc_link_clear(lnk);
357 free_lgr:
358         kfree(lgr);
359 ism_put_vlan:
360         if (ini->is_smcd && ini->vlan_id)
361                 smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
362 out:
363         if (rc < 0) {
364                 if (rc == -ENOMEM)
365                         rc = SMC_CLC_DECL_MEM;
366                 else
367                         rc = SMC_CLC_DECL_INTERR;
368         }
369         return rc;
370 }
371
372 static void smc_buf_unuse(struct smc_connection *conn,
373                           struct smc_link_group *lgr)
374 {
375         if (conn->sndbuf_desc)
376                 conn->sndbuf_desc->used = 0;
377         if (conn->rmb_desc) {
378                 if (!conn->rmb_desc->regerr) {
379                         if (!lgr->is_smcd && !list_empty(&lgr->list)) {
380                                 /* unregister rmb with peer */
381                                 smc_llc_do_delete_rkey(
382                                                 &lgr->lnk[SMC_SINGLE_LINK],
383                                                 conn->rmb_desc);
384                         }
385                         conn->rmb_desc->used = 0;
386                 } else {
387                         /* buf registration failed, reuse not possible */
388                         write_lock_bh(&lgr->rmbs_lock);
389                         list_del(&conn->rmb_desc->list);
390                         write_unlock_bh(&lgr->rmbs_lock);
391
392                         smc_buf_free(lgr, true, conn->rmb_desc);
393                 }
394         }
395 }
396
397 /* remove a finished connection from its link group */
398 void smc_conn_free(struct smc_connection *conn)
399 {
400         struct smc_link_group *lgr = conn->lgr;
401
402         if (!lgr)
403                 return;
404         if (lgr->is_smcd) {
405                 if (!list_empty(&lgr->list))
406                         smc_ism_unset_conn(conn);
407                 tasklet_kill(&conn->rx_tsklet);
408         } else {
409                 smc_cdc_tx_dismiss_slots(conn);
410         }
411         if (!list_empty(&lgr->list)) {
412                 smc_lgr_unregister_conn(conn);
413                 smc_buf_unuse(conn, lgr); /* allow buffer reuse */
414         }
415
416         if (!lgr->conns_num)
417                 smc_lgr_schedule_free_work(lgr);
418 }
419
420 static void smc_link_clear(struct smc_link *lnk)
421 {
422         lnk->peer_qpn = 0;
423         smc_llc_link_clear(lnk);
424         smc_ib_modify_qp_reset(lnk);
425         smc_wr_free_link(lnk);
426         smc_ib_destroy_queue_pair(lnk);
427         smc_ib_dealloc_protection_domain(lnk);
428         smc_wr_free_link_mem(lnk);
429         if (!atomic_dec_return(&lnk->smcibdev->lnk_cnt))
430                 wake_up(&lnk->smcibdev->lnks_deleted);
431 }
432
433 static void smcr_buf_free(struct smc_link_group *lgr, bool is_rmb,
434                           struct smc_buf_desc *buf_desc)
435 {
436         struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
437
438         if (is_rmb) {
439                 if (buf_desc->mr_rx[SMC_SINGLE_LINK])
440                         smc_ib_put_memory_region(
441                                         buf_desc->mr_rx[SMC_SINGLE_LINK]);
442                 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
443                                     DMA_FROM_DEVICE);
444         } else {
445                 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
446                                     DMA_TO_DEVICE);
447         }
448         sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
449         if (buf_desc->pages)
450                 __free_pages(buf_desc->pages, buf_desc->order);
451         kfree(buf_desc);
452 }
453
454 static void smcd_buf_free(struct smc_link_group *lgr, bool is_dmb,
455                           struct smc_buf_desc *buf_desc)
456 {
457         if (is_dmb) {
458                 /* restore original buf len */
459                 buf_desc->len += sizeof(struct smcd_cdc_msg);
460                 smc_ism_unregister_dmb(lgr->smcd, buf_desc);
461         } else {
462                 kfree(buf_desc->cpu_addr);
463         }
464         kfree(buf_desc);
465 }
466
467 static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
468                          struct smc_buf_desc *buf_desc)
469 {
470         if (lgr->is_smcd)
471                 smcd_buf_free(lgr, is_rmb, buf_desc);
472         else
473                 smcr_buf_free(lgr, is_rmb, buf_desc);
474 }
475
476 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
477 {
478         struct smc_buf_desc *buf_desc, *bf_desc;
479         struct list_head *buf_list;
480         int i;
481
482         for (i = 0; i < SMC_RMBE_SIZES; i++) {
483                 if (is_rmb)
484                         buf_list = &lgr->rmbs[i];
485                 else
486                         buf_list = &lgr->sndbufs[i];
487                 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
488                                          list) {
489                         list_del(&buf_desc->list);
490                         smc_buf_free(lgr, is_rmb, buf_desc);
491                 }
492         }
493 }
494
495 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
496 {
497         /* free send buffers */
498         __smc_lgr_free_bufs(lgr, false);
499         /* free rmbs */
500         __smc_lgr_free_bufs(lgr, true);
501 }
502
503 /* remove a link group */
504 static void smc_lgr_free(struct smc_link_group *lgr)
505 {
506         smc_lgr_free_bufs(lgr);
507         if (lgr->is_smcd) {
508                 if (!lgr->terminating) {
509                         smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
510                         put_device(&lgr->smcd->dev);
511                 }
512                 if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
513                         wake_up(&lgr->smcd->lgrs_deleted);
514         } else {
515                 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
516                 put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
517                 if (!atomic_dec_return(&lgr_cnt))
518                         wake_up(&lgrs_deleted);
519         }
520         kfree(lgr);
521 }
522
523 void smc_lgr_forget(struct smc_link_group *lgr)
524 {
525         struct list_head *lgr_list;
526         spinlock_t *lgr_lock;
527
528         lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
529         spin_lock_bh(lgr_lock);
530         /* do not use this link group for new connections */
531         if (!list_empty(lgr_list))
532                 list_del_init(lgr_list);
533         spin_unlock_bh(lgr_lock);
534 }
535
536 static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
537 {
538         int i;
539
540         for (i = 0; i < SMC_RMBE_SIZES; i++) {
541                 struct smc_buf_desc *buf_desc;
542
543                 list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
544                         buf_desc->len += sizeof(struct smcd_cdc_msg);
545                         smc_ism_unregister_dmb(lgr->smcd, buf_desc);
546                 }
547         }
548 }
549
550 static void smc_sk_wake_ups(struct smc_sock *smc)
551 {
552         smc->sk.sk_write_space(&smc->sk);
553         smc->sk.sk_data_ready(&smc->sk);
554         smc->sk.sk_state_change(&smc->sk);
555 }
556
557 /* kill a connection */
558 static void smc_conn_kill(struct smc_connection *conn, bool soft)
559 {
560         struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
561
562         if (conn->lgr->is_smcd && conn->lgr->peer_shutdown)
563                 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
564         else
565                 smc_close_abort(conn);
566         conn->killed = 1;
567         smc->sk.sk_err = ECONNABORTED;
568         smc_sk_wake_ups(smc);
569         if (conn->lgr->is_smcd) {
570                 smc_ism_unset_conn(conn);
571                 if (soft)
572                         tasklet_kill(&conn->rx_tsklet);
573                 else
574                         tasklet_unlock_wait(&conn->rx_tsklet);
575         } else {
576                 smc_cdc_tx_dismiss_slots(conn);
577         }
578         smc_lgr_unregister_conn(conn);
579         smc_close_active_abort(smc);
580 }
581
582 static void smc_lgr_cleanup(struct smc_link_group *lgr)
583 {
584         if (lgr->is_smcd) {
585                 smc_ism_signal_shutdown(lgr);
586                 smcd_unregister_all_dmbs(lgr);
587                 smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
588                 put_device(&lgr->smcd->dev);
589         } else {
590                 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
591
592                 if (lnk->state != SMC_LNK_INACTIVE)
593                         smc_llc_link_inactive(lnk);
594         }
595 }
596
597 /* terminate link group
598  * @soft: true if link group shutdown can take its time
599  *        false if immediate link group shutdown is required
600  */
601 static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft)
602 {
603         struct smc_connection *conn;
604         struct smc_sock *smc;
605         struct rb_node *node;
606
607         if (lgr->terminating)
608                 return; /* lgr already terminating */
609         if (!soft)
610                 cancel_delayed_work_sync(&lgr->free_work);
611         lgr->terminating = 1;
612         if (!lgr->is_smcd)
613                 smc_llc_link_inactive(&lgr->lnk[SMC_SINGLE_LINK]);
614
615         /* kill remaining link group connections */
616         read_lock_bh(&lgr->conns_lock);
617         node = rb_first(&lgr->conns_all);
618         while (node) {
619                 read_unlock_bh(&lgr->conns_lock);
620                 conn = rb_entry(node, struct smc_connection, alert_node);
621                 smc = container_of(conn, struct smc_sock, conn);
622                 sock_hold(&smc->sk); /* sock_put below */
623                 lock_sock(&smc->sk);
624                 smc_conn_kill(conn, soft);
625                 release_sock(&smc->sk);
626                 sock_put(&smc->sk); /* sock_hold above */
627                 read_lock_bh(&lgr->conns_lock);
628                 node = rb_first(&lgr->conns_all);
629         }
630         read_unlock_bh(&lgr->conns_lock);
631         smc_lgr_cleanup(lgr);
632         if (soft)
633                 smc_lgr_schedule_free_work_fast(lgr);
634         else
635                 smc_lgr_free(lgr);
636 }
637
638 /* unlink link group and schedule termination */
639 void smc_lgr_terminate_sched(struct smc_link_group *lgr)
640 {
641         spinlock_t *lgr_lock;
642
643         smc_lgr_list_head(lgr, &lgr_lock);
644         spin_lock_bh(lgr_lock);
645         if (list_empty(&lgr->list) || lgr->terminating || lgr->freeing) {
646                 spin_unlock_bh(lgr_lock);
647                 return; /* lgr already terminating */
648         }
649         list_del_init(&lgr->list);
650         spin_unlock_bh(lgr_lock);
651         schedule_work(&lgr->terminate_work);
652 }
653
654 /* Called when IB port is terminated */
655 void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
656 {
657         struct smc_link_group *lgr, *l;
658         LIST_HEAD(lgr_free_list);
659
660         spin_lock_bh(&smc_lgr_list.lock);
661         list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
662                 if (!lgr->is_smcd &&
663                     lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
664                     lgr->lnk[SMC_SINGLE_LINK].ibport == ibport) {
665                         list_move(&lgr->list, &lgr_free_list);
666                         lgr->freeing = 1;
667                 }
668         }
669         spin_unlock_bh(&smc_lgr_list.lock);
670
671         list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
672                 list_del_init(&lgr->list);
673                 __smc_lgr_terminate(lgr, false);
674         }
675 }
676
677 /* Called when peer lgr shutdown (regularly or abnormally) is received */
678 void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
679 {
680         struct smc_link_group *lgr, *l;
681         LIST_HEAD(lgr_free_list);
682
683         /* run common cleanup function and build free list */
684         spin_lock_bh(&dev->lgr_lock);
685         list_for_each_entry_safe(lgr, l, &dev->lgr_list, list) {
686                 if ((!peer_gid || lgr->peer_gid == peer_gid) &&
687                     (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
688                         if (peer_gid) /* peer triggered termination */
689                                 lgr->peer_shutdown = 1;
690                         list_move(&lgr->list, &lgr_free_list);
691                 }
692         }
693         spin_unlock_bh(&dev->lgr_lock);
694
695         /* cancel the regular free workers and actually free lgrs */
696         list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
697                 list_del_init(&lgr->list);
698                 schedule_work(&lgr->terminate_work);
699         }
700 }
701
702 /* Called when an SMCD device is removed or the smc module is unloaded */
703 void smc_smcd_terminate_all(struct smcd_dev *smcd)
704 {
705         struct smc_link_group *lgr, *lg;
706         LIST_HEAD(lgr_free_list);
707
708         spin_lock_bh(&smcd->lgr_lock);
709         list_splice_init(&smcd->lgr_list, &lgr_free_list);
710         list_for_each_entry(lgr, &lgr_free_list, list)
711                 lgr->freeing = 1;
712         spin_unlock_bh(&smcd->lgr_lock);
713
714         list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
715                 list_del_init(&lgr->list);
716                 __smc_lgr_terminate(lgr, false);
717         }
718
719         if (atomic_read(&smcd->lgr_cnt))
720                 wait_event(smcd->lgrs_deleted, !atomic_read(&smcd->lgr_cnt));
721 }
722
723 /* Called when an SMCR device is removed or the smc module is unloaded.
724  * If smcibdev is given, all SMCR link groups using this device are terminated.
725  * If smcibdev is NULL, all SMCR link groups are terminated.
726  */
727 void smc_smcr_terminate_all(struct smc_ib_device *smcibdev)
728 {
729         struct smc_link_group *lgr, *lg;
730         LIST_HEAD(lgr_free_list);
731
732         spin_lock_bh(&smc_lgr_list.lock);
733         if (!smcibdev) {
734                 list_splice_init(&smc_lgr_list.list, &lgr_free_list);
735                 list_for_each_entry(lgr, &lgr_free_list, list)
736                         lgr->freeing = 1;
737         } else {
738                 list_for_each_entry_safe(lgr, lg, &smc_lgr_list.list, list) {
739                         if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev) {
740                                 list_move(&lgr->list, &lgr_free_list);
741                                 lgr->freeing = 1;
742                         }
743                 }
744         }
745         spin_unlock_bh(&smc_lgr_list.lock);
746
747         list_for_each_entry_safe(lgr, lg, &lgr_free_list, list) {
748                 list_del_init(&lgr->list);
749                 __smc_lgr_terminate(lgr, false);
750         }
751
752         if (smcibdev) {
753                 if (atomic_read(&smcibdev->lnk_cnt))
754                         wait_event(smcibdev->lnks_deleted,
755                                    !atomic_read(&smcibdev->lnk_cnt));
756         } else {
757                 if (atomic_read(&lgr_cnt))
758                         wait_event(lgrs_deleted, !atomic_read(&lgr_cnt));
759         }
760 }
761
762 /* Determine vlan of internal TCP socket.
763  * @vlan_id: address to store the determined vlan id into
764  */
765 int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
766 {
767         struct dst_entry *dst = sk_dst_get(clcsock->sk);
768         struct net_device *ndev;
769         int i, nest_lvl, rc = 0;
770
771         ini->vlan_id = 0;
772         if (!dst) {
773                 rc = -ENOTCONN;
774                 goto out;
775         }
776         if (!dst->dev) {
777                 rc = -ENODEV;
778                 goto out_rel;
779         }
780
781         ndev = dst->dev;
782         if (is_vlan_dev(ndev)) {
783                 ini->vlan_id = vlan_dev_vlan_id(ndev);
784                 goto out_rel;
785         }
786
787         rtnl_lock();
788         nest_lvl = ndev->lower_level;
789         for (i = 0; i < nest_lvl; i++) {
790                 struct list_head *lower = &ndev->adj_list.lower;
791
792                 if (list_empty(lower))
793                         break;
794                 lower = lower->next;
795                 ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
796                 if (is_vlan_dev(ndev)) {
797                         ini->vlan_id = vlan_dev_vlan_id(ndev);
798                         break;
799                 }
800         }
801         rtnl_unlock();
802
803 out_rel:
804         dst_release(dst);
805 out:
806         return rc;
807 }
808
809 static bool smcr_lgr_match(struct smc_link_group *lgr,
810                            struct smc_clc_msg_local *lcl,
811                            enum smc_lgr_role role, u32 clcqpn)
812 {
813         return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
814                        SMC_SYSTEMID_LEN) &&
815                 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
816                         SMC_GID_SIZE) &&
817                 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
818                         sizeof(lcl->mac)) &&
819                 lgr->role == role &&
820                 (lgr->role == SMC_SERV ||
821                  lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
822 }
823
824 static bool smcd_lgr_match(struct smc_link_group *lgr,
825                            struct smcd_dev *smcismdev, u64 peer_gid)
826 {
827         return lgr->peer_gid == peer_gid && lgr->smcd == smcismdev;
828 }
829
830 /* create a new SMC connection (and a new link group if necessary) */
831 int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
832 {
833         struct smc_connection *conn = &smc->conn;
834         struct list_head *lgr_list;
835         struct smc_link_group *lgr;
836         enum smc_lgr_role role;
837         spinlock_t *lgr_lock;
838         int rc = 0;
839
840         lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
841         lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
842         ini->cln_first_contact = SMC_FIRST_CONTACT;
843         role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
844         if (role == SMC_CLNT && ini->srv_first_contact)
845                 /* create new link group as well */
846                 goto create;
847
848         /* determine if an existing link group can be reused */
849         spin_lock_bh(lgr_lock);
850         list_for_each_entry(lgr, lgr_list, list) {
851                 write_lock_bh(&lgr->conns_lock);
852                 if ((ini->is_smcd ?
853                      smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
854                      smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
855                     !lgr->sync_err &&
856                     lgr->vlan_id == ini->vlan_id &&
857                     (role == SMC_CLNT ||
858                      lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
859                         /* link group found */
860                         ini->cln_first_contact = SMC_REUSE_CONTACT;
861                         conn->lgr = lgr;
862                         smc_lgr_register_conn(conn); /* add smc conn to lgr */
863                         if (delayed_work_pending(&lgr->free_work))
864                                 cancel_delayed_work(&lgr->free_work);
865                         write_unlock_bh(&lgr->conns_lock);
866                         break;
867                 }
868                 write_unlock_bh(&lgr->conns_lock);
869         }
870         spin_unlock_bh(lgr_lock);
871
872         if (role == SMC_CLNT && !ini->srv_first_contact &&
873             ini->cln_first_contact == SMC_FIRST_CONTACT) {
874                 /* Server reuses a link group, but Client wants to start
875                  * a new one
876                  * send out_of_sync decline, reason synchr. error
877                  */
878                 return SMC_CLC_DECL_SYNCERR;
879         }
880
881 create:
882         if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
883                 rc = smc_lgr_create(smc, ini);
884                 if (rc)
885                         goto out;
886                 lgr = conn->lgr;
887                 write_lock_bh(&lgr->conns_lock);
888                 smc_lgr_register_conn(conn); /* add smc conn to lgr */
889                 write_unlock_bh(&lgr->conns_lock);
890         }
891         conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
892         conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
893         conn->urg_state = SMC_URG_READ;
894         if (ini->is_smcd) {
895                 conn->rx_off = sizeof(struct smcd_cdc_msg);
896                 smcd_cdc_rx_init(conn); /* init tasklet for this conn */
897         }
898 #ifndef KERNEL_HAS_ATOMIC64
899         spin_lock_init(&conn->acurs_lock);
900 #endif
901
902 out:
903         return rc;
904 }
905
906 /* convert the RMB size into the compressed notation - minimum 16K.
907  * In contrast to plain ilog2, this rounds towards the next power of 2,
908  * so the socket application gets at least its desired sndbuf / rcvbuf size.
909  */
910 static u8 smc_compress_bufsize(int size)
911 {
912         u8 compressed;
913
914         if (size <= SMC_BUF_MIN_SIZE)
915                 return 0;
916
917         size = (size - 1) >> 14;
918         compressed = ilog2(size) + 1;
919         if (compressed >= SMC_RMBE_SIZES)
920                 compressed = SMC_RMBE_SIZES - 1;
921         return compressed;
922 }
923
924 /* convert the RMB size from compressed notation into integer */
925 int smc_uncompress_bufsize(u8 compressed)
926 {
927         u32 size;
928
929         size = 0x00000001 << (((int)compressed) + 14);
930         return (int)size;
931 }
932
933 /* try to reuse a sndbuf or rmb description slot for a certain
934  * buffer size; if not available, return NULL
935  */
936 static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
937                                              rwlock_t *lock,
938                                              struct list_head *buf_list)
939 {
940         struct smc_buf_desc *buf_slot;
941
942         read_lock_bh(lock);
943         list_for_each_entry(buf_slot, buf_list, list) {
944                 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
945                         read_unlock_bh(lock);
946                         return buf_slot;
947                 }
948         }
949         read_unlock_bh(lock);
950         return NULL;
951 }
952
953 /* one of the conditions for announcing a receiver's current window size is
954  * that it "results in a minimum increase in the window size of 10% of the
955  * receive buffer space" [RFC7609]
956  */
957 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
958 {
959         return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
960 }
961
962 static struct smc_buf_desc *smcr_new_buf_create(struct smc_link_group *lgr,
963                                                 bool is_rmb, int bufsize)
964 {
965         struct smc_buf_desc *buf_desc;
966         struct smc_link *lnk;
967         int rc;
968
969         /* try to alloc a new buffer */
970         buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
971         if (!buf_desc)
972                 return ERR_PTR(-ENOMEM);
973
974         buf_desc->order = get_order(bufsize);
975         buf_desc->pages = alloc_pages(GFP_KERNEL | __GFP_NOWARN |
976                                       __GFP_NOMEMALLOC | __GFP_COMP |
977                                       __GFP_NORETRY | __GFP_ZERO,
978                                       buf_desc->order);
979         if (!buf_desc->pages) {
980                 kfree(buf_desc);
981                 return ERR_PTR(-EAGAIN);
982         }
983         buf_desc->cpu_addr = (void *)page_address(buf_desc->pages);
984
985         /* build the sg table from the pages */
986         lnk = &lgr->lnk[SMC_SINGLE_LINK];
987         rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
988                             GFP_KERNEL);
989         if (rc) {
990                 smc_buf_free(lgr, is_rmb, buf_desc);
991                 return ERR_PTR(rc);
992         }
993         sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
994                    buf_desc->cpu_addr, bufsize);
995
996         /* map sg table to DMA address */
997         rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
998                                is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
999         /* SMC protocol depends on mapping to one DMA address only */
1000         if (rc != 1)  {
1001                 smc_buf_free(lgr, is_rmb, buf_desc);
1002                 return ERR_PTR(-EAGAIN);
1003         }
1004
1005         /* create a new memory region for the RMB */
1006         if (is_rmb) {
1007                 rc = smc_ib_get_memory_region(lnk->roce_pd,
1008                                               IB_ACCESS_REMOTE_WRITE |
1009                                               IB_ACCESS_LOCAL_WRITE,
1010                                               buf_desc);
1011                 if (rc) {
1012                         smc_buf_free(lgr, is_rmb, buf_desc);
1013                         return ERR_PTR(rc);
1014                 }
1015         }
1016
1017         buf_desc->len = bufsize;
1018         return buf_desc;
1019 }
1020
1021 #define SMCD_DMBE_SIZES         7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
1022
1023 static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
1024                                                 bool is_dmb, int bufsize)
1025 {
1026         struct smc_buf_desc *buf_desc;
1027         int rc;
1028
1029         if (smc_compress_bufsize(bufsize) > SMCD_DMBE_SIZES)
1030                 return ERR_PTR(-EAGAIN);
1031
1032         /* try to alloc a new DMB */
1033         buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
1034         if (!buf_desc)
1035                 return ERR_PTR(-ENOMEM);
1036         if (is_dmb) {
1037                 rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
1038                 if (rc) {
1039                         kfree(buf_desc);
1040                         return ERR_PTR(-EAGAIN);
1041                 }
1042                 buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
1043                 /* CDC header stored in buf. So, pretend it was smaller */
1044                 buf_desc->len = bufsize - sizeof(struct smcd_cdc_msg);
1045         } else {
1046                 buf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL |
1047                                              __GFP_NOWARN | __GFP_NORETRY |
1048                                              __GFP_NOMEMALLOC);
1049                 if (!buf_desc->cpu_addr) {
1050                         kfree(buf_desc);
1051                         return ERR_PTR(-EAGAIN);
1052                 }
1053                 buf_desc->len = bufsize;
1054         }
1055         return buf_desc;
1056 }
1057
1058 static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
1059 {
1060         struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
1061         struct smc_connection *conn = &smc->conn;
1062         struct smc_link_group *lgr = conn->lgr;
1063         struct list_head *buf_list;
1064         int bufsize, bufsize_short;
1065         int sk_buf_size;
1066         rwlock_t *lock;
1067
1068         if (is_rmb)
1069                 /* use socket recv buffer size (w/o overhead) as start value */
1070                 sk_buf_size = smc->sk.sk_rcvbuf / 2;
1071         else
1072                 /* use socket send buffer size (w/o overhead) as start value */
1073                 sk_buf_size = smc->sk.sk_sndbuf / 2;
1074
1075         for (bufsize_short = smc_compress_bufsize(sk_buf_size);
1076              bufsize_short >= 0; bufsize_short--) {
1077
1078                 if (is_rmb) {
1079                         lock = &lgr->rmbs_lock;
1080                         buf_list = &lgr->rmbs[bufsize_short];
1081                 } else {
1082                         lock = &lgr->sndbufs_lock;
1083                         buf_list = &lgr->sndbufs[bufsize_short];
1084                 }
1085                 bufsize = smc_uncompress_bufsize(bufsize_short);
1086                 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
1087                         continue;
1088
1089                 /* check for reusable slot in the link group */
1090                 buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
1091                 if (buf_desc) {
1092                         memset(buf_desc->cpu_addr, 0, bufsize);
1093                         break; /* found reusable slot */
1094                 }
1095
1096                 if (is_smcd)
1097                         buf_desc = smcd_new_buf_create(lgr, is_rmb, bufsize);
1098                 else
1099                         buf_desc = smcr_new_buf_create(lgr, is_rmb, bufsize);
1100
1101                 if (PTR_ERR(buf_desc) == -ENOMEM)
1102                         break;
1103                 if (IS_ERR(buf_desc))
1104                         continue;
1105
1106                 buf_desc->used = 1;
1107                 write_lock_bh(lock);
1108                 list_add(&buf_desc->list, buf_list);
1109                 write_unlock_bh(lock);
1110                 break; /* found */
1111         }
1112
1113         if (IS_ERR(buf_desc))
1114                 return -ENOMEM;
1115
1116         if (is_rmb) {
1117                 conn->rmb_desc = buf_desc;
1118                 conn->rmbe_size_short = bufsize_short;
1119                 smc->sk.sk_rcvbuf = bufsize * 2;
1120                 atomic_set(&conn->bytes_to_rcv, 0);
1121                 conn->rmbe_update_limit =
1122                         smc_rmb_wnd_update_limit(buf_desc->len);
1123                 if (is_smcd)
1124                         smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
1125         } else {
1126                 conn->sndbuf_desc = buf_desc;
1127                 smc->sk.sk_sndbuf = bufsize * 2;
1128                 atomic_set(&conn->sndbuf_space, bufsize);
1129         }
1130         return 0;
1131 }
1132
1133 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
1134 {
1135         struct smc_link_group *lgr = conn->lgr;
1136
1137         if (!conn->lgr || conn->lgr->is_smcd)
1138                 return;
1139         smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1140                                conn->sndbuf_desc, DMA_TO_DEVICE);
1141 }
1142
1143 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
1144 {
1145         struct smc_link_group *lgr = conn->lgr;
1146
1147         if (!conn->lgr || conn->lgr->is_smcd)
1148                 return;
1149         smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1150                                   conn->sndbuf_desc, DMA_TO_DEVICE);
1151 }
1152
1153 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
1154 {
1155         struct smc_link_group *lgr = conn->lgr;
1156
1157         if (!conn->lgr || conn->lgr->is_smcd)
1158                 return;
1159         smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1160                                conn->rmb_desc, DMA_FROM_DEVICE);
1161 }
1162
1163 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
1164 {
1165         struct smc_link_group *lgr = conn->lgr;
1166
1167         if (!conn->lgr || conn->lgr->is_smcd)
1168                 return;
1169         smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
1170                                   conn->rmb_desc, DMA_FROM_DEVICE);
1171 }
1172
1173 /* create the send and receive buffer for an SMC socket;
1174  * receive buffers are called RMBs;
1175  * (even though the SMC protocol allows more than one RMB-element per RMB,
1176  * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
1177  * extra RMB for every connection in a link group
1178  */
1179 int smc_buf_create(struct smc_sock *smc, bool is_smcd)
1180 {
1181         int rc;
1182
1183         /* create send buffer */
1184         rc = __smc_buf_create(smc, is_smcd, false);
1185         if (rc)
1186                 return rc;
1187         /* create rmb */
1188         rc = __smc_buf_create(smc, is_smcd, true);
1189         if (rc)
1190                 smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
1191         return rc;
1192 }
1193
1194 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
1195 {
1196         int i;
1197
1198         for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
1199                 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
1200                         return i;
1201         }
1202         return -ENOSPC;
1203 }
1204
1205 /* add a new rtoken from peer */
1206 int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
1207 {
1208         u64 dma_addr = be64_to_cpu(nw_vaddr);
1209         u32 rkey = ntohl(nw_rkey);
1210         int i;
1211
1212         for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1213                 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
1214                     (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
1215                     test_bit(i, lgr->rtokens_used_mask)) {
1216                         /* already in list */
1217                         return i;
1218                 }
1219         }
1220         i = smc_rmb_reserve_rtoken_idx(lgr);
1221         if (i < 0)
1222                 return i;
1223         lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
1224         lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
1225         return i;
1226 }
1227
1228 /* delete an rtoken */
1229 int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
1230 {
1231         u32 rkey = ntohl(nw_rkey);
1232         int i;
1233
1234         for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
1235                 if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
1236                     test_bit(i, lgr->rtokens_used_mask)) {
1237                         lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
1238                         lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
1239
1240                         clear_bit(i, lgr->rtokens_used_mask);
1241                         return 0;
1242                 }
1243         }
1244         return -ENOENT;
1245 }
1246
1247 /* save rkey and dma_addr received from peer during clc handshake */
1248 int smc_rmb_rtoken_handling(struct smc_connection *conn,
1249                             struct smc_clc_msg_accept_confirm *clc)
1250 {
1251         conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
1252                                           clc->rmb_rkey);
1253         if (conn->rtoken_idx < 0)
1254                 return conn->rtoken_idx;
1255         return 0;
1256 }
1257
1258 static void smc_core_going_away(void)
1259 {
1260         struct smc_ib_device *smcibdev;
1261         struct smcd_dev *smcd;
1262
1263         spin_lock(&smc_ib_devices.lock);
1264         list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
1265                 int i;
1266
1267                 for (i = 0; i < SMC_MAX_PORTS; i++)
1268                         set_bit(i, smcibdev->ports_going_away);
1269         }
1270         spin_unlock(&smc_ib_devices.lock);
1271
1272         spin_lock(&smcd_dev_list.lock);
1273         list_for_each_entry(smcd, &smcd_dev_list.list, list) {
1274                 smcd->going_away = 1;
1275         }
1276         spin_unlock(&smcd_dev_list.lock);
1277 }
1278
1279 /* Clean up all SMC link groups */
1280 static void smc_lgrs_shutdown(void)
1281 {
1282         struct smcd_dev *smcd;
1283
1284         smc_core_going_away();
1285
1286         smc_smcr_terminate_all(NULL);
1287
1288         spin_lock(&smcd_dev_list.lock);
1289         list_for_each_entry(smcd, &smcd_dev_list.list, list)
1290                 smc_smcd_terminate_all(smcd);
1291         spin_unlock(&smcd_dev_list.lock);
1292 }
1293
1294 static int smc_core_reboot_event(struct notifier_block *this,
1295                                  unsigned long event, void *ptr)
1296 {
1297         smc_lgrs_shutdown();
1298         smc_ib_unregister_client();
1299         return 0;
1300 }
1301
1302 static struct notifier_block smc_reboot_notifier = {
1303         .notifier_call = smc_core_reboot_event,
1304 };
1305
1306 int __init smc_core_init(void)
1307 {
1308         return register_reboot_notifier(&smc_reboot_notifier);
1309 }
1310
1311 /* Called (from smc_exit) when module is removed */
1312 void smc_core_exit(void)
1313 {
1314         unregister_reboot_notifier(&smc_reboot_notifier);
1315         smc_lgrs_shutdown();
1316 }