1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
10 * Copyright IBM Corp. 2016
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 #include <linux/random.h>
16 #include <linux/workqueue.h>
17 #include <linux/scatterlist.h>
18 #include <linux/wait.h>
19 #include <rdma/ib_verbs.h>
20 #include <rdma/ib_cache.h>
28 #define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
30 #define SMC_QP_MIN_RNR_TIMER 5
31 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
32 #define SMC_QP_RETRY_CNT 7 /* 7: infinite */
33 #define SMC_QP_RNR_RETRY 7 /* 7: infinite */
35 struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
36 .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
37 .list = LIST_HEAD_INIT(smc_ib_devices.list),
40 u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
42 static int smc_ib_modify_qp_init(struct smc_link *lnk)
44 struct ib_qp_attr qp_attr;
46 memset(&qp_attr, 0, sizeof(qp_attr));
47 qp_attr.qp_state = IB_QPS_INIT;
48 qp_attr.pkey_index = 0;
49 qp_attr.port_num = lnk->ibport;
50 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE
51 | IB_ACCESS_REMOTE_WRITE;
52 return ib_modify_qp(lnk->roce_qp, &qp_attr,
53 IB_QP_STATE | IB_QP_PKEY_INDEX |
54 IB_QP_ACCESS_FLAGS | IB_QP_PORT);
57 static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
59 enum ib_qp_attr_mask qp_attr_mask =
60 IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
61 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
62 struct ib_qp_attr qp_attr;
64 memset(&qp_attr, 0, sizeof(qp_attr));
65 qp_attr.qp_state = IB_QPS_RTR;
66 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
67 qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
68 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
69 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0);
70 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
71 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
72 sizeof(lnk->peer_mac));
73 qp_attr.dest_qp_num = lnk->peer_qpn;
74 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
75 qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
78 qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER;
80 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
83 int smc_ib_modify_qp_rts(struct smc_link *lnk)
85 struct ib_qp_attr qp_attr;
87 memset(&qp_attr, 0, sizeof(qp_attr));
88 qp_attr.qp_state = IB_QPS_RTS;
89 qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */
90 qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */
91 qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */
92 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
93 qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and
96 return ib_modify_qp(lnk->roce_qp, &qp_attr,
97 IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
98 IB_QP_SQ_PSN | IB_QP_RNR_RETRY |
99 IB_QP_MAX_QP_RD_ATOMIC);
102 int smc_ib_modify_qp_reset(struct smc_link *lnk)
104 struct ib_qp_attr qp_attr;
106 memset(&qp_attr, 0, sizeof(qp_attr));
107 qp_attr.qp_state = IB_QPS_RESET;
108 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
111 int smc_ib_ready_link(struct smc_link *lnk)
113 struct smc_link_group *lgr = smc_get_lgr(lnk);
116 rc = smc_ib_modify_qp_init(lnk);
120 rc = smc_ib_modify_qp_rtr(lnk);
123 smc_wr_remember_qp_attr(lnk);
124 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
125 IB_CQ_SOLICITED_MASK);
128 rc = smc_wr_rx_post_init(lnk);
131 smc_wr_remember_qp_attr(lnk);
133 if (lgr->role == SMC_SERV) {
134 rc = smc_ib_modify_qp_rts(lnk);
137 smc_wr_remember_qp_attr(lnk);
143 static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport)
145 const struct ib_gid_attr *attr;
148 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0);
152 rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]);
153 rdma_put_gid_attr(attr);
157 /* Create an identifier unique for this instance of SMC-R.
158 * The MAC-address of the first active registered IB device
159 * plus a random 2-byte number is used to create this identifier.
160 * This name is delivered to the peer during connection initialization.
162 static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
165 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
166 sizeof(smcibdev->mac[ibport - 1]));
169 bool smc_ib_is_valid_local_systemid(void)
171 return !is_zero_ether_addr(&local_systemid[2]);
174 static void smc_ib_init_local_systemid(void)
176 get_random_bytes(&local_systemid[0], 2);
179 bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
181 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
184 /* determine the gid for an ib-device port and vlan id */
185 int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
186 unsigned short vlan_id, u8 gid[], u8 *sgid_index)
188 const struct ib_gid_attr *attr;
189 const struct net_device *ndev;
192 for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
193 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
198 ndev = rdma_read_gid_attr_ndev_rcu(attr);
200 ((!vlan_id && !is_vlan_dev(attr->ndev)) ||
201 (vlan_id && is_vlan_dev(attr->ndev) &&
202 vlan_dev_vlan_id(attr->ndev) == vlan_id)) &&
203 attr->gid_type == IB_GID_TYPE_ROCE) {
206 memcpy(gid, &attr->gid, SMC_GID_SIZE);
208 *sgid_index = attr->index;
209 rdma_put_gid_attr(attr);
213 rdma_put_gid_attr(attr);
218 static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
222 memset(&smcibdev->pattr[ibport - 1], 0,
223 sizeof(smcibdev->pattr[ibport - 1]));
224 rc = ib_query_port(smcibdev->ibdev, ibport,
225 &smcibdev->pattr[ibport - 1]);
228 /* the SMC protocol requires specification of the RoCE MAC address */
229 rc = smc_ib_fill_mac(smcibdev, ibport);
232 if (!smc_ib_is_valid_local_systemid() &&
233 smc_ib_port_active(smcibdev, ibport))
234 /* create unique system identifier */
235 smc_ib_define_local_systemid(smcibdev, ibport);
240 /* process context wrapper for might_sleep smc_ib_remember_port_attr */
241 static void smc_ib_port_event_work(struct work_struct *work)
243 struct smc_ib_device *smcibdev = container_of(
244 work, struct smc_ib_device, port_event_work);
247 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
248 smc_ib_remember_port_attr(smcibdev, port_idx + 1);
249 clear_bit(port_idx, &smcibdev->port_event_mask);
250 if (!smc_ib_port_active(smcibdev, port_idx + 1)) {
251 set_bit(port_idx, smcibdev->ports_going_away);
252 smcr_port_err(smcibdev, port_idx + 1);
254 clear_bit(port_idx, smcibdev->ports_going_away);
255 smcr_port_add(smcibdev, port_idx + 1);
260 /* can be called in IRQ context */
261 static void smc_ib_global_event_handler(struct ib_event_handler *handler,
262 struct ib_event *ibevent)
264 struct smc_ib_device *smcibdev;
265 bool schedule = false;
268 smcibdev = container_of(handler, struct smc_ib_device, event_handler);
270 switch (ibevent->event) {
271 case IB_EVENT_DEVICE_FATAL:
272 /* terminate all ports on device */
273 for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
274 set_bit(port_idx, &smcibdev->port_event_mask);
275 if (!test_and_set_bit(port_idx,
276 smcibdev->ports_going_away))
280 schedule_work(&smcibdev->port_event_work);
282 case IB_EVENT_PORT_ACTIVE:
283 port_idx = ibevent->element.port_num - 1;
284 if (port_idx >= SMC_MAX_PORTS)
286 set_bit(port_idx, &smcibdev->port_event_mask);
287 if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
288 schedule_work(&smcibdev->port_event_work);
290 case IB_EVENT_PORT_ERR:
291 port_idx = ibevent->element.port_num - 1;
292 if (port_idx >= SMC_MAX_PORTS)
294 set_bit(port_idx, &smcibdev->port_event_mask);
295 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
296 schedule_work(&smcibdev->port_event_work);
298 case IB_EVENT_GID_CHANGE:
299 port_idx = ibevent->element.port_num - 1;
300 if (port_idx >= SMC_MAX_PORTS)
302 set_bit(port_idx, &smcibdev->port_event_mask);
303 schedule_work(&smcibdev->port_event_work);
310 void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
313 ib_dealloc_pd(lnk->roce_pd);
317 int smc_ib_create_protection_domain(struct smc_link *lnk)
321 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
322 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
323 if (IS_ERR(lnk->roce_pd))
328 static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
330 struct smc_link *lnk = (struct smc_link *)priv;
331 struct smc_ib_device *smcibdev = lnk->smcibdev;
334 switch (ibevent->event) {
335 case IB_EVENT_QP_FATAL:
336 case IB_EVENT_QP_ACCESS_ERR:
337 port_idx = ibevent->element.qp->port - 1;
338 if (port_idx >= SMC_MAX_PORTS)
340 set_bit(port_idx, &smcibdev->port_event_mask);
341 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
342 schedule_work(&smcibdev->port_event_work);
349 void smc_ib_destroy_queue_pair(struct smc_link *lnk)
352 ib_destroy_qp(lnk->roce_qp);
356 /* create a queue pair within the protection domain for a link */
357 int smc_ib_create_queue_pair(struct smc_link *lnk)
359 struct ib_qp_init_attr qp_attr = {
360 .event_handler = smc_ib_qp_event_handler,
362 .send_cq = lnk->smcibdev->roce_cq_send,
363 .recv_cq = lnk->smcibdev->roce_cq_recv,
366 /* include unsolicited rdma_writes as well,
367 * there are max. 2 RDMA_WRITE per 1 WR_SEND
369 .max_send_wr = SMC_WR_BUF_CNT * 3,
370 .max_recv_wr = SMC_WR_BUF_CNT * 3,
371 .max_send_sge = SMC_IB_MAX_SEND_SGE,
374 .sq_sig_type = IB_SIGNAL_REQ_WR,
375 .qp_type = IB_QPT_RC,
379 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
380 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
381 if (IS_ERR(lnk->roce_qp))
384 smc_wr_remember_qp_attr(lnk);
388 void smc_ib_put_memory_region(struct ib_mr *mr)
393 static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot, u8 link_idx)
395 unsigned int offset = 0;
398 /* map the largest prefix of a dma mapped SG list */
399 sg_num = ib_map_mr_sg(buf_slot->mr_rx[link_idx],
400 buf_slot->sgt[link_idx].sgl,
401 buf_slot->sgt[link_idx].orig_nents,
407 /* Allocate a memory region and map the dma mapped SG list of buf_slot */
408 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
409 struct smc_buf_desc *buf_slot, u8 link_idx)
411 if (buf_slot->mr_rx[link_idx])
412 return 0; /* already done */
414 buf_slot->mr_rx[link_idx] =
415 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
416 if (IS_ERR(buf_slot->mr_rx[link_idx])) {
419 rc = PTR_ERR(buf_slot->mr_rx[link_idx]);
420 buf_slot->mr_rx[link_idx] = NULL;
424 if (smc_ib_map_mr_sg(buf_slot, link_idx) != 1)
430 /* synchronize buffer usage for cpu access */
431 void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
432 struct smc_buf_desc *buf_slot,
433 enum dma_data_direction data_direction)
435 struct scatterlist *sg;
438 /* for now there is just one DMA address */
439 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
440 buf_slot->sgt[lnk->link_idx].nents, i) {
443 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
450 /* synchronize buffer usage for device access */
451 void smc_ib_sync_sg_for_device(struct smc_link *lnk,
452 struct smc_buf_desc *buf_slot,
453 enum dma_data_direction data_direction)
455 struct scatterlist *sg;
458 /* for now there is just one DMA address */
459 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
460 buf_slot->sgt[lnk->link_idx].nents, i) {
463 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
470 /* Map a new TX or RX buffer SG-table to DMA */
471 int smc_ib_buf_map_sg(struct smc_link *lnk,
472 struct smc_buf_desc *buf_slot,
473 enum dma_data_direction data_direction)
477 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
478 buf_slot->sgt[lnk->link_idx].sgl,
479 buf_slot->sgt[lnk->link_idx].orig_nents,
487 void smc_ib_buf_unmap_sg(struct smc_link *lnk,
488 struct smc_buf_desc *buf_slot,
489 enum dma_data_direction data_direction)
491 if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address)
492 return; /* already unmapped */
494 ib_dma_unmap_sg(lnk->smcibdev->ibdev,
495 buf_slot->sgt[lnk->link_idx].sgl,
496 buf_slot->sgt[lnk->link_idx].orig_nents,
498 buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0;
501 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
503 struct ib_cq_init_attr cqattr = {
504 .cqe = SMC_MAX_CQE, .comp_vector = 0 };
505 int cqe_size_order, smc_order;
508 /* the calculated number of cq entries fits to mlx5 cq allocation */
509 cqe_size_order = cache_line_size() == 128 ? 7 : 6;
510 smc_order = MAX_ORDER - cqe_size_order - 1;
511 if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
512 cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
513 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
514 smc_wr_tx_cq_handler, NULL,
516 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
517 if (IS_ERR(smcibdev->roce_cq_send)) {
518 smcibdev->roce_cq_send = NULL;
521 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
522 smc_wr_rx_cq_handler, NULL,
524 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
525 if (IS_ERR(smcibdev->roce_cq_recv)) {
526 smcibdev->roce_cq_recv = NULL;
529 smc_wr_add_dev(smcibdev);
530 smcibdev->initialized = 1;
534 ib_destroy_cq(smcibdev->roce_cq_send);
538 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
540 if (!smcibdev->initialized)
542 smcibdev->initialized = 0;
543 ib_destroy_cq(smcibdev->roce_cq_recv);
544 ib_destroy_cq(smcibdev->roce_cq_send);
545 smc_wr_remove_dev(smcibdev);
548 static struct ib_client smc_ib_client;
550 /* callback function for ib_register_client() */
551 static void smc_ib_add_dev(struct ib_device *ibdev)
553 struct smc_ib_device *smcibdev;
557 if (ibdev->node_type != RDMA_NODE_IB_CA)
560 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
564 smcibdev->ibdev = ibdev;
565 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
566 atomic_set(&smcibdev->lnk_cnt, 0);
567 init_waitqueue_head(&smcibdev->lnks_deleted);
568 spin_lock(&smc_ib_devices.lock);
569 list_add_tail(&smcibdev->list, &smc_ib_devices.list);
570 spin_unlock(&smc_ib_devices.lock);
571 ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
572 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
573 smc_ib_global_event_handler);
574 ib_register_event_handler(&smcibdev->event_handler);
576 /* trigger reading of the port attributes */
577 port_cnt = smcibdev->ibdev->phys_port_cnt;
578 pr_warn_ratelimited("smc: adding ib device %s with port count %d\n",
579 smcibdev->ibdev->name, port_cnt);
581 i < min_t(size_t, port_cnt, SMC_MAX_PORTS);
583 set_bit(i, &smcibdev->port_event_mask);
584 /* determine pnetids of the port */
585 if (smc_pnetid_by_dev_port(ibdev->dev.parent, i,
586 smcibdev->pnetid[i]))
587 smc_pnetid_by_table_ib(smcibdev, i + 1);
588 pr_warn_ratelimited("smc: ib device %s port %d has pnetid "
590 smcibdev->ibdev->name, i + 1,
592 smcibdev->pnetid_by_user[i] ?
596 schedule_work(&smcibdev->port_event_work);
599 /* callback function for ib_unregister_client() */
600 static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
602 struct smc_ib_device *smcibdev;
604 smcibdev = ib_get_client_data(ibdev, &smc_ib_client);
605 if (!smcibdev || smcibdev->ibdev != ibdev)
607 ib_set_client_data(ibdev, &smc_ib_client, NULL);
608 spin_lock(&smc_ib_devices.lock);
609 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
610 spin_unlock(&smc_ib_devices.lock);
611 pr_warn_ratelimited("smc: removing ib device %s\n",
612 smcibdev->ibdev->name);
613 smc_smcr_terminate_all(smcibdev);
614 smc_ib_cleanup_per_ibdev(smcibdev);
615 ib_unregister_event_handler(&smcibdev->event_handler);
616 cancel_work_sync(&smcibdev->port_event_work);
620 static struct ib_client smc_ib_client = {
622 .add = smc_ib_add_dev,
623 .remove = smc_ib_remove_dev,
626 int __init smc_ib_register_client(void)
628 smc_ib_init_local_systemid();
629 return ib_register_client(&smc_ib_client);
632 void smc_ib_unregister_client(void)
634 ib_unregister_client(&smc_ib_client);