2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Establish SMC-R as an Infiniband Client to be notified about added and
6 * removed IB devices of type RDMA.
7 * Determine device and port characteristics for these IB devices.
9 * Copyright IBM Corp. 2016
11 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <linux/scatterlist.h>
17 #include <rdma/ib_verbs.h>
25 #define SMC_QP_MIN_RNR_TIMER 5
26 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
27 #define SMC_QP_RETRY_CNT 7 /* 7: infinite */
28 #define SMC_QP_RNR_RETRY 7 /* 7: infinite */
30 struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
31 .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
32 .list = LIST_HEAD_INIT(smc_ib_devices.list),
35 #define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%"
37 u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system
41 static int smc_ib_modify_qp_init(struct smc_link *lnk)
43 struct ib_qp_attr qp_attr;
45 memset(&qp_attr, 0, sizeof(qp_attr));
46 qp_attr.qp_state = IB_QPS_INIT;
47 qp_attr.pkey_index = 0;
48 qp_attr.port_num = lnk->ibport;
49 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE
50 | IB_ACCESS_REMOTE_WRITE;
51 return ib_modify_qp(lnk->roce_qp, &qp_attr,
52 IB_QP_STATE | IB_QP_PKEY_INDEX |
53 IB_QP_ACCESS_FLAGS | IB_QP_PORT);
56 static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
58 enum ib_qp_attr_mask qp_attr_mask =
59 IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
60 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
61 struct ib_qp_attr qp_attr;
63 memset(&qp_attr, 0, sizeof(qp_attr));
64 qp_attr.qp_state = IB_QPS_RTR;
65 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
66 qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
67 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
68 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, 0, 1, 0);
69 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
70 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
71 sizeof(lnk->peer_mac));
72 qp_attr.dest_qp_num = lnk->peer_qpn;
73 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
74 qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
77 qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER;
79 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
82 int smc_ib_modify_qp_rts(struct smc_link *lnk)
84 struct ib_qp_attr qp_attr;
86 memset(&qp_attr, 0, sizeof(qp_attr));
87 qp_attr.qp_state = IB_QPS_RTS;
88 qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */
89 qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */
90 qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */
91 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
92 qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and
95 return ib_modify_qp(lnk->roce_qp, &qp_attr,
96 IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
97 IB_QP_SQ_PSN | IB_QP_RNR_RETRY |
98 IB_QP_MAX_QP_RD_ATOMIC);
101 int smc_ib_modify_qp_reset(struct smc_link *lnk)
103 struct ib_qp_attr qp_attr;
105 memset(&qp_attr, 0, sizeof(qp_attr));
106 qp_attr.qp_state = IB_QPS_RESET;
107 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
110 int smc_ib_ready_link(struct smc_link *lnk)
112 struct smc_link_group *lgr =
113 container_of(lnk, struct smc_link_group, lnk[0]);
116 rc = smc_ib_modify_qp_init(lnk);
120 rc = smc_ib_modify_qp_rtr(lnk);
123 smc_wr_remember_qp_attr(lnk);
124 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
125 IB_CQ_SOLICITED_MASK);
128 rc = smc_wr_rx_post_init(lnk);
131 smc_wr_remember_qp_attr(lnk);
133 if (lgr->role == SMC_SERV) {
134 rc = smc_ib_modify_qp_rts(lnk);
137 smc_wr_remember_qp_attr(lnk);
143 /* process context wrapper for might_sleep smc_ib_remember_port_attr */
144 static void smc_ib_port_event_work(struct work_struct *work)
146 struct smc_ib_device *smcibdev = container_of(
147 work, struct smc_ib_device, port_event_work);
150 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
151 smc_ib_remember_port_attr(smcibdev, port_idx + 1);
152 clear_bit(port_idx, &smcibdev->port_event_mask);
156 /* can be called in IRQ context */
157 static void smc_ib_global_event_handler(struct ib_event_handler *handler,
158 struct ib_event *ibevent)
160 struct smc_ib_device *smcibdev;
163 smcibdev = container_of(handler, struct smc_ib_device, event_handler);
165 switch (ibevent->event) {
166 case IB_EVENT_PORT_ERR:
167 port_idx = ibevent->element.port_num - 1;
168 set_bit(port_idx, &smcibdev->port_event_mask);
169 schedule_work(&smcibdev->port_event_work);
171 case IB_EVENT_DEVICE_FATAL:
172 /* tbd in follow-on patch:
173 * abnormal close of corresponding connections
176 case IB_EVENT_PORT_ACTIVE:
177 port_idx = ibevent->element.port_num - 1;
178 set_bit(port_idx, &smcibdev->port_event_mask);
179 schedule_work(&smcibdev->port_event_work);
186 void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
188 ib_dealloc_pd(lnk->roce_pd);
192 int smc_ib_create_protection_domain(struct smc_link *lnk)
196 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
197 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
198 if (IS_ERR(lnk->roce_pd))
203 static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
205 switch (ibevent->event) {
206 case IB_EVENT_DEVICE_FATAL:
207 case IB_EVENT_GID_CHANGE:
208 case IB_EVENT_PORT_ERR:
209 case IB_EVENT_QP_ACCESS_ERR:
210 /* tbd in follow-on patch:
211 * abnormal close of corresponding connections
219 void smc_ib_destroy_queue_pair(struct smc_link *lnk)
221 ib_destroy_qp(lnk->roce_qp);
225 /* create a queue pair within the protection domain for a link */
226 int smc_ib_create_queue_pair(struct smc_link *lnk)
228 struct ib_qp_init_attr qp_attr = {
229 .event_handler = smc_ib_qp_event_handler,
231 .send_cq = lnk->smcibdev->roce_cq_send,
232 .recv_cq = lnk->smcibdev->roce_cq_recv,
235 /* include unsolicited rdma_writes as well,
236 * there are max. 2 RDMA_WRITE per 1 WR_SEND
238 .max_send_wr = SMC_WR_BUF_CNT * 3,
239 .max_recv_wr = SMC_WR_BUF_CNT * 3,
240 .max_send_sge = SMC_IB_MAX_SEND_SGE,
243 .sq_sig_type = IB_SIGNAL_REQ_WR,
244 .qp_type = IB_QPT_RC,
248 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
249 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
250 if (IS_ERR(lnk->roce_qp))
253 smc_wr_remember_qp_attr(lnk);
257 void smc_ib_put_memory_region(struct ib_mr *mr)
262 static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot)
264 unsigned int offset = 0;
267 /* map the largest prefix of a dma mapped SG list */
268 sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK],
269 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
270 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
276 /* Allocate a memory region and map the dma mapped SG list of buf_slot */
277 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
278 struct smc_buf_desc *buf_slot)
280 if (buf_slot->mr_rx[SMC_SINGLE_LINK])
281 return 0; /* already done */
283 buf_slot->mr_rx[SMC_SINGLE_LINK] =
284 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
285 if (IS_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK])) {
288 rc = PTR_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK]);
289 buf_slot->mr_rx[SMC_SINGLE_LINK] = NULL;
293 if (smc_ib_map_mr_sg(buf_slot) != 1)
299 /* synchronize buffer usage for cpu access */
300 void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev,
301 struct smc_buf_desc *buf_slot,
302 enum dma_data_direction data_direction)
304 struct scatterlist *sg;
307 /* for now there is just one DMA address */
308 for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
309 buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
312 ib_dma_sync_single_for_cpu(smcibdev->ibdev,
319 /* synchronize buffer usage for device access */
320 void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev,
321 struct smc_buf_desc *buf_slot,
322 enum dma_data_direction data_direction)
324 struct scatterlist *sg;
327 /* for now there is just one DMA address */
328 for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
329 buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
332 ib_dma_sync_single_for_device(smcibdev->ibdev,
339 /* Map a new TX or RX buffer SG-table to DMA */
340 int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
341 struct smc_buf_desc *buf_slot,
342 enum dma_data_direction data_direction)
346 mapped_nents = ib_dma_map_sg(smcibdev->ibdev,
347 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
348 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
356 void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev,
357 struct smc_buf_desc *buf_slot,
358 enum dma_data_direction data_direction)
360 if (!buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address)
361 return; /* already unmapped */
363 ib_dma_unmap_sg(smcibdev->ibdev,
364 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
365 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
367 buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0;
370 static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport)
372 struct net_device *ndev;
375 rc = ib_query_gid(smcibdev->ibdev, ibport, 0,
376 &smcibdev->gid[ibport - 1], NULL);
377 /* the SMC protocol requires specification of the roce MAC address;
378 * if net_device cannot be determined, it can be derived from gid 0
380 ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport);
382 memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN);
385 memcpy(&smcibdev->mac[ibport - 1][0],
386 &smcibdev->gid[ibport - 1].raw[8], 3);
387 memcpy(&smcibdev->mac[ibport - 1][3],
388 &smcibdev->gid[ibport - 1].raw[13], 3);
389 smcibdev->mac[ibport - 1][0] &= ~0x02;
394 /* Create an identifier unique for this instance of SMC-R.
395 * The MAC-address of the first active registered IB device
396 * plus a random 2-byte number is used to create this identifier.
397 * This name is delivered to the peer during connection initialization.
399 static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
402 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
403 sizeof(smcibdev->mac[ibport - 1]));
404 get_random_bytes(&local_systemid[0], 2);
407 bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
409 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
412 int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
416 memset(&smcibdev->pattr[ibport - 1], 0,
417 sizeof(smcibdev->pattr[ibport - 1]));
418 rc = ib_query_port(smcibdev->ibdev, ibport,
419 &smcibdev->pattr[ibport - 1]);
422 rc = smc_ib_fill_gid_and_mac(smcibdev, ibport);
425 if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET,
426 sizeof(local_systemid)) &&
427 smc_ib_port_active(smcibdev, ibport))
428 /* create unique system identifier */
429 smc_ib_define_local_systemid(smcibdev, ibport);
434 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
436 struct ib_cq_init_attr cqattr = {
437 .cqe = SMC_WR_MAX_CQE, .comp_vector = 0 };
440 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
441 smc_wr_tx_cq_handler, NULL,
443 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
444 if (IS_ERR(smcibdev->roce_cq_send)) {
445 smcibdev->roce_cq_send = NULL;
448 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
449 smc_wr_rx_cq_handler, NULL,
451 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
452 if (IS_ERR(smcibdev->roce_cq_recv)) {
453 smcibdev->roce_cq_recv = NULL;
456 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
457 smc_ib_global_event_handler);
458 ib_register_event_handler(&smcibdev->event_handler);
459 smc_wr_add_dev(smcibdev);
460 smcibdev->initialized = 1;
464 ib_destroy_cq(smcibdev->roce_cq_send);
468 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
470 if (!smcibdev->initialized)
472 smc_wr_remove_dev(smcibdev);
473 ib_unregister_event_handler(&smcibdev->event_handler);
474 ib_destroy_cq(smcibdev->roce_cq_recv);
475 ib_destroy_cq(smcibdev->roce_cq_send);
478 static struct ib_client smc_ib_client;
480 /* callback function for ib_register_client() */
481 static void smc_ib_add_dev(struct ib_device *ibdev)
483 struct smc_ib_device *smcibdev;
485 if (ibdev->node_type != RDMA_NODE_IB_CA)
488 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
492 smcibdev->ibdev = ibdev;
493 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
495 spin_lock(&smc_ib_devices.lock);
496 list_add_tail(&smcibdev->list, &smc_ib_devices.list);
497 spin_unlock(&smc_ib_devices.lock);
498 ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
501 /* callback function for ib_register_client() */
502 static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
504 struct smc_ib_device *smcibdev;
506 smcibdev = ib_get_client_data(ibdev, &smc_ib_client);
507 ib_set_client_data(ibdev, &smc_ib_client, NULL);
508 spin_lock(&smc_ib_devices.lock);
509 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
510 spin_unlock(&smc_ib_devices.lock);
511 smc_pnet_remove_by_ibdev(smcibdev);
512 smc_ib_cleanup_per_ibdev(smcibdev);
516 static struct ib_client smc_ib_client = {
518 .add = smc_ib_add_dev,
519 .remove = smc_ib_remove_dev,
522 int __init smc_ib_register_client(void)
524 return ib_register_client(&smc_ib_client);
527 void smc_ib_unregister_client(void)
529 ib_unregister_client(&smc_ib_client);