1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
4 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
10 #include <linux/completion.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/err.h>
15 #include <linux/idr.h>
16 #include <linux/interrupt.h>
17 #include <linux/random.h>
18 #include <linux/rbtree.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/sysfs.h>
22 #include <linux/workqueue.h>
23 #include <linux/kdev_t.h>
24 #include <linux/etherdevice.h>
26 #include <rdma/ib_cache.h>
27 #include <rdma/ib_cm.h>
29 #include "core_priv.h"
31 MODULE_AUTHOR("Sean Hefty");
32 MODULE_DESCRIPTION("InfiniBand CM");
33 MODULE_LICENSE("Dual BSD/GPL");
35 static const char * const ibcm_rej_reason_strs[] = {
36 [IB_CM_REJ_NO_QP] = "no QP",
37 [IB_CM_REJ_NO_EEC] = "no EEC",
38 [IB_CM_REJ_NO_RESOURCES] = "no resources",
39 [IB_CM_REJ_TIMEOUT] = "timeout",
40 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
41 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
42 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
43 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
44 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
45 [IB_CM_REJ_STALE_CONN] = "stale conn",
46 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
47 [IB_CM_REJ_INVALID_GID] = "invalid GID",
48 [IB_CM_REJ_INVALID_LID] = "invalid LID",
49 [IB_CM_REJ_INVALID_SL] = "invalid SL",
50 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
51 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
52 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
53 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
54 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
55 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
56 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
57 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
58 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
59 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
60 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
61 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
62 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
63 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
64 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
65 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
66 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
67 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
68 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
69 [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
70 "vendor option is not supported",
73 const char *__attribute_const__ ibcm_reject_msg(int reason)
75 size_t index = reason;
77 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
78 ibcm_rej_reason_strs[index])
79 return ibcm_rej_reason_strs[index];
81 return "unrecognized reason";
83 EXPORT_SYMBOL(ibcm_reject_msg);
87 static int cm_add_one(struct ib_device *device);
88 static void cm_remove_one(struct ib_device *device, void *client_data);
89 static void cm_process_work(struct cm_id_private *cm_id_priv,
90 struct cm_work *work);
91 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
92 struct ib_cm_sidr_rep_param *param);
93 static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
94 const void *private_data, u8 private_data_len);
95 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
96 void *private_data, u8 private_data_len);
97 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
98 enum ib_cm_rej_reason reason, void *ari,
99 u8 ari_length, const void *private_data,
100 u8 private_data_len);
102 static struct ib_client cm_client = {
105 .remove = cm_remove_one
108 static struct ib_cm {
110 struct list_head device_list;
111 rwlock_t device_lock;
112 struct rb_root listen_service_table;
113 u64 listen_service_id;
114 /* struct rb_root peer_service_table; todo: fix peer to peer */
115 struct rb_root remote_qp_table;
116 struct rb_root remote_id_table;
117 struct rb_root remote_sidr_table;
118 struct xarray local_id_table;
120 __be32 random_id_operand;
121 struct list_head timewait_list;
122 struct workqueue_struct *wq;
123 /* Sync on cm change port state */
124 spinlock_t state_lock;
127 /* Counter indexes ordered by attribute ID */
141 CM_ATTR_ID_OFFSET = 0x0010,
152 static char const counter_group_names[CM_COUNTER_GROUPS]
153 [sizeof("cm_rx_duplicates")] = {
154 "cm_tx_msgs", "cm_tx_retries",
155 "cm_rx_msgs", "cm_rx_duplicates"
158 struct cm_counter_group {
160 atomic_long_t counter[CM_ATTR_COUNT];
163 struct cm_counter_attribute {
164 struct attribute attr;
168 #define CM_COUNTER_ATTR(_name, _index) \
169 struct cm_counter_attribute cm_##_name##_counter_attr = { \
170 .attr = { .name = __stringify(_name), .mode = 0444 }, \
174 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
175 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
176 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
177 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
178 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
179 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
180 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
181 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
182 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
183 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
184 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
186 static struct attribute *cm_counter_default_attrs[] = {
187 &cm_req_counter_attr.attr,
188 &cm_mra_counter_attr.attr,
189 &cm_rej_counter_attr.attr,
190 &cm_rep_counter_attr.attr,
191 &cm_rtu_counter_attr.attr,
192 &cm_dreq_counter_attr.attr,
193 &cm_drep_counter_attr.attr,
194 &cm_sidr_req_counter_attr.attr,
195 &cm_sidr_rep_counter_attr.attr,
196 &cm_lap_counter_attr.attr,
197 &cm_apr_counter_attr.attr,
202 struct cm_device *cm_dev;
203 struct ib_mad_agent *mad_agent;
204 struct kobject port_obj;
206 struct list_head cm_priv_prim_list;
207 struct list_head cm_priv_altr_list;
208 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
212 struct list_head list;
213 struct ib_device *ib_device;
216 struct cm_port *port[];
220 struct cm_port *port;
222 struct rdma_ah_attr ah_attr;
228 struct delayed_work work;
229 struct list_head list;
230 struct cm_port *port;
231 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
232 __be32 local_id; /* Established / timewait */
234 struct ib_cm_event cm_event;
235 struct sa_path_rec path[];
238 struct cm_timewait_info {
240 struct list_head list;
241 struct rb_node remote_qp_node;
242 struct rb_node remote_id_node;
243 __be64 remote_ca_guid;
245 u8 inserted_remote_qp;
246 u8 inserted_remote_id;
249 struct cm_id_private {
252 struct rb_node service_node;
253 struct rb_node sidr_id_node;
254 spinlock_t lock; /* Do not acquire inside cm.lock */
255 struct completion comp;
257 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
258 * Protected by the cm.lock spinlock. */
259 int listen_sharecount;
262 struct ib_mad_send_buf *msg;
263 struct cm_timewait_info *timewait_info;
264 /* todo: use alternate port on send failure */
272 enum ib_qp_type qp_type;
276 enum ib_mtu path_mtu;
280 u8 responder_resources;
287 struct list_head prim_list;
288 struct list_head altr_list;
289 /* Indicates that the send port mad is registered and av is set */
290 int prim_send_port_not_ready;
291 int altr_send_port_not_ready;
293 struct list_head work_list;
296 struct rdma_ucm_ece ece;
299 static void cm_work_handler(struct work_struct *work);
301 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
303 if (refcount_dec_and_test(&cm_id_priv->refcount))
304 complete(&cm_id_priv->comp);
307 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
308 struct ib_mad_send_buf **msg)
310 struct ib_mad_agent *mad_agent;
311 struct ib_mad_send_buf *m;
314 unsigned long flags, flags2;
317 /* don't let the port to be released till the agent is down */
318 spin_lock_irqsave(&cm.state_lock, flags2);
319 spin_lock_irqsave(&cm.lock, flags);
320 if (!cm_id_priv->prim_send_port_not_ready)
321 av = &cm_id_priv->av;
322 else if (!cm_id_priv->altr_send_port_not_ready &&
323 (cm_id_priv->alt_av.port))
324 av = &cm_id_priv->alt_av;
326 pr_info("%s: not valid CM id\n", __func__);
328 spin_unlock_irqrestore(&cm.lock, flags);
331 spin_unlock_irqrestore(&cm.lock, flags);
332 /* Make sure the port haven't released the mad yet */
333 mad_agent = cm_id_priv->av.port->mad_agent;
335 pr_info("%s: not a valid MAD agent\n", __func__);
339 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
345 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
347 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
349 IB_MGMT_BASE_VERSION);
351 rdma_destroy_ah(ah, 0);
356 /* Timeout set by caller if response is expected. */
358 m->retries = cm_id_priv->max_cm_retries;
360 refcount_inc(&cm_id_priv->refcount);
361 m->context[0] = cm_id_priv;
365 spin_unlock_irqrestore(&cm.state_lock, flags2);
369 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
370 struct ib_mad_recv_wc *mad_recv_wc)
372 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
373 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
375 IB_MGMT_BASE_VERSION);
378 static int cm_create_response_msg_ah(struct cm_port *port,
379 struct ib_mad_recv_wc *mad_recv_wc,
380 struct ib_mad_send_buf *msg)
384 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
385 mad_recv_wc->recv_buf.grh, port->port_num);
393 static void cm_free_msg(struct ib_mad_send_buf *msg)
396 rdma_destroy_ah(msg->ah, 0);
398 cm_deref_id(msg->context[0]);
399 ib_free_send_mad(msg);
402 static int cm_alloc_response_msg(struct cm_port *port,
403 struct ib_mad_recv_wc *mad_recv_wc,
404 struct ib_mad_send_buf **msg)
406 struct ib_mad_send_buf *m;
409 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
413 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
423 static void * cm_copy_private_data(const void *private_data,
428 if (!private_data || !private_data_len)
431 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
433 return ERR_PTR(-ENOMEM);
438 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
439 void *private_data, u8 private_data_len)
441 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
442 kfree(cm_id_priv->private_data);
444 cm_id_priv->private_data = private_data;
445 cm_id_priv->private_data_len = private_data_len;
448 static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
449 struct ib_grh *grh, struct cm_av *av)
451 struct rdma_ah_attr new_ah_attr;
455 av->pkey_index = wc->pkey_index;
458 * av->ah_attr might be initialized based on past wc during incoming
459 * connect request or while sending out connect request. So initialize
460 * a new ah_attr on stack. If initialization fails, old ah_attr is
461 * used for sending any responses. If initialization is successful,
462 * than new ah_attr is used by overwriting old one.
464 ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
470 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
474 static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
475 struct ib_grh *grh, struct cm_av *av)
478 av->pkey_index = wc->pkey_index;
479 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
484 static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
485 struct cm_av *av, struct cm_port *port)
489 spin_lock_irqsave(&cm.lock, flags);
490 if (&cm_id_priv->av == av)
491 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
492 else if (&cm_id_priv->alt_av == av)
493 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
496 spin_unlock_irqrestore(&cm.lock, flags);
499 static struct cm_port *
500 get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
502 struct cm_device *cm_dev;
503 struct cm_port *port = NULL;
507 read_lock_irqsave(&cm.device_lock, flags);
508 list_for_each_entry(cm_dev, &cm.device_list, list) {
509 if (cm_dev->ib_device == attr->device) {
510 port = cm_dev->port[attr->port_num - 1];
514 read_unlock_irqrestore(&cm.device_lock, flags);
516 /* SGID attribute can be NULL in following
518 * (a) Alternative path
519 * (b) IB link layer without GRH
520 * (c) LAP send messages
522 read_lock_irqsave(&cm.device_lock, flags);
523 list_for_each_entry(cm_dev, &cm.device_list, list) {
524 attr = rdma_find_gid(cm_dev->ib_device,
526 sa_conv_pathrec_to_gid_type(path),
529 port = cm_dev->port[attr->port_num - 1];
533 read_unlock_irqrestore(&cm.device_lock, flags);
535 rdma_put_gid_attr(attr);
540 static int cm_init_av_by_path(struct sa_path_rec *path,
541 const struct ib_gid_attr *sgid_attr,
543 struct cm_id_private *cm_id_priv)
545 struct rdma_ah_attr new_ah_attr;
546 struct cm_device *cm_dev;
547 struct cm_port *port;
550 port = get_cm_port_from_path(path, sgid_attr);
553 cm_dev = port->cm_dev;
555 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
556 be16_to_cpu(path->pkey), &av->pkey_index);
563 * av->ah_attr might be initialized based on wc or during
564 * request processing time which might have reference to sgid_attr.
565 * So initialize a new ah_attr on stack.
566 * If initialization fails, old ah_attr is used for sending any
567 * responses. If initialization is successful, than new ah_attr
568 * is used by overwriting the old one. So that right ah_attr
569 * can be used to return an error response.
571 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
572 &new_ah_attr, sgid_attr);
576 av->timeout = path->packet_life_time + 1;
577 add_cm_id_to_port_list(cm_id_priv, av, port);
578 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
582 static u32 cm_local_id(__be32 local_id)
584 return (__force u32) (local_id ^ cm.random_id_operand);
587 static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
589 struct cm_id_private *cm_id_priv;
592 cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
593 if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
594 !refcount_inc_not_zero(&cm_id_priv->refcount))
602 * Trivial helpers to strip endian annotation and compare; the
603 * endianness doesn't actually matter since we just need a stable
604 * order for the RB tree.
606 static int be32_lt(__be32 a, __be32 b)
608 return (__force u32) a < (__force u32) b;
611 static int be32_gt(__be32 a, __be32 b)
613 return (__force u32) a > (__force u32) b;
616 static int be64_lt(__be64 a, __be64 b)
618 return (__force u64) a < (__force u64) b;
621 static int be64_gt(__be64 a, __be64 b)
623 return (__force u64) a > (__force u64) b;
627 * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
628 * if the new ID was inserted, NULL if it could not be inserted due to a
629 * collision, or the existing cm_id_priv ready for shared usage.
631 static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
632 ib_cm_handler shared_handler)
634 struct rb_node **link = &cm.listen_service_table.rb_node;
635 struct rb_node *parent = NULL;
636 struct cm_id_private *cur_cm_id_priv;
637 __be64 service_id = cm_id_priv->id.service_id;
638 __be64 service_mask = cm_id_priv->id.service_mask;
641 spin_lock_irqsave(&cm.lock, flags);
644 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
646 if ((cur_cm_id_priv->id.service_mask & service_id) ==
647 (service_mask & cur_cm_id_priv->id.service_id) &&
648 (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
650 * Sharing an ib_cm_id with different handlers is not
653 if (cur_cm_id_priv->id.cm_handler != shared_handler ||
654 cur_cm_id_priv->id.context ||
655 WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
656 spin_unlock_irqrestore(&cm.lock, flags);
659 refcount_inc(&cur_cm_id_priv->refcount);
660 cur_cm_id_priv->listen_sharecount++;
661 spin_unlock_irqrestore(&cm.lock, flags);
662 return cur_cm_id_priv;
665 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
666 link = &(*link)->rb_left;
667 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
668 link = &(*link)->rb_right;
669 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
670 link = &(*link)->rb_left;
671 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
672 link = &(*link)->rb_right;
674 link = &(*link)->rb_right;
676 cm_id_priv->listen_sharecount++;
677 rb_link_node(&cm_id_priv->service_node, parent, link);
678 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
679 spin_unlock_irqrestore(&cm.lock, flags);
683 static struct cm_id_private * cm_find_listen(struct ib_device *device,
686 struct rb_node *node = cm.listen_service_table.rb_node;
687 struct cm_id_private *cm_id_priv;
690 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
691 if ((cm_id_priv->id.service_mask & service_id) ==
692 cm_id_priv->id.service_id &&
693 (cm_id_priv->id.device == device)) {
694 refcount_inc(&cm_id_priv->refcount);
697 if (device < cm_id_priv->id.device)
698 node = node->rb_left;
699 else if (device > cm_id_priv->id.device)
700 node = node->rb_right;
701 else if (be64_lt(service_id, cm_id_priv->id.service_id))
702 node = node->rb_left;
703 else if (be64_gt(service_id, cm_id_priv->id.service_id))
704 node = node->rb_right;
706 node = node->rb_right;
711 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
714 struct rb_node **link = &cm.remote_id_table.rb_node;
715 struct rb_node *parent = NULL;
716 struct cm_timewait_info *cur_timewait_info;
717 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
718 __be32 remote_id = timewait_info->work.remote_id;
722 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
724 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
725 link = &(*link)->rb_left;
726 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
727 link = &(*link)->rb_right;
728 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
729 link = &(*link)->rb_left;
730 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
731 link = &(*link)->rb_right;
733 return cur_timewait_info;
735 timewait_info->inserted_remote_id = 1;
736 rb_link_node(&timewait_info->remote_id_node, parent, link);
737 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
741 static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
744 struct rb_node *node = cm.remote_id_table.rb_node;
745 struct cm_timewait_info *timewait_info;
746 struct cm_id_private *res = NULL;
748 spin_lock_irq(&cm.lock);
750 timewait_info = rb_entry(node, struct cm_timewait_info,
752 if (be32_lt(remote_id, timewait_info->work.remote_id))
753 node = node->rb_left;
754 else if (be32_gt(remote_id, timewait_info->work.remote_id))
755 node = node->rb_right;
756 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
757 node = node->rb_left;
758 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
759 node = node->rb_right;
761 res = cm_acquire_id(timewait_info->work.local_id,
762 timewait_info->work.remote_id);
766 spin_unlock_irq(&cm.lock);
770 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
773 struct rb_node **link = &cm.remote_qp_table.rb_node;
774 struct rb_node *parent = NULL;
775 struct cm_timewait_info *cur_timewait_info;
776 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
777 __be32 remote_qpn = timewait_info->remote_qpn;
781 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
783 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
784 link = &(*link)->rb_left;
785 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
786 link = &(*link)->rb_right;
787 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
788 link = &(*link)->rb_left;
789 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
790 link = &(*link)->rb_right;
792 return cur_timewait_info;
794 timewait_info->inserted_remote_qp = 1;
795 rb_link_node(&timewait_info->remote_qp_node, parent, link);
796 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
800 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
803 struct rb_node **link = &cm.remote_sidr_table.rb_node;
804 struct rb_node *parent = NULL;
805 struct cm_id_private *cur_cm_id_priv;
806 union ib_gid *port_gid = &cm_id_priv->av.dgid;
807 __be32 remote_id = cm_id_priv->id.remote_id;
811 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
813 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
814 link = &(*link)->rb_left;
815 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
816 link = &(*link)->rb_right;
819 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
822 link = &(*link)->rb_left;
824 link = &(*link)->rb_right;
826 return cur_cm_id_priv;
829 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
830 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
834 static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
835 ib_cm_handler cm_handler,
838 struct cm_id_private *cm_id_priv;
842 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
844 return ERR_PTR(-ENOMEM);
846 cm_id_priv->id.state = IB_CM_IDLE;
847 cm_id_priv->id.device = device;
848 cm_id_priv->id.cm_handler = cm_handler;
849 cm_id_priv->id.context = context;
850 cm_id_priv->id.remote_cm_qpn = 1;
852 RB_CLEAR_NODE(&cm_id_priv->service_node);
853 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
854 spin_lock_init(&cm_id_priv->lock);
855 init_completion(&cm_id_priv->comp);
856 INIT_LIST_HEAD(&cm_id_priv->work_list);
857 INIT_LIST_HEAD(&cm_id_priv->prim_list);
858 INIT_LIST_HEAD(&cm_id_priv->altr_list);
859 atomic_set(&cm_id_priv->work_count, -1);
860 refcount_set(&cm_id_priv->refcount, 1);
862 ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
863 &cm.local_id_next, GFP_KERNEL);
866 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
876 * Make the ID visible to the MAD handlers and other threads that use the
879 static void cm_finalize_id(struct cm_id_private *cm_id_priv)
881 xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
882 cm_id_priv, GFP_KERNEL);
885 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
886 ib_cm_handler cm_handler,
889 struct cm_id_private *cm_id_priv;
891 cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
892 if (IS_ERR(cm_id_priv))
893 return ERR_CAST(cm_id_priv);
895 cm_finalize_id(cm_id_priv);
896 return &cm_id_priv->id;
898 EXPORT_SYMBOL(ib_create_cm_id);
900 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
902 struct cm_work *work;
904 if (list_empty(&cm_id_priv->work_list))
907 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
908 list_del(&work->list);
912 static void cm_free_work(struct cm_work *work)
914 if (work->mad_recv_wc)
915 ib_free_recv_mad(work->mad_recv_wc);
919 static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
920 struct cm_work *work)
925 * To deliver the event to the user callback we have the drop the
926 * spinlock, however, we need to ensure that the user callback is single
927 * threaded and receives events in the temporal order. If there are
928 * already events being processed then thread new events onto a list,
929 * the thread currently processing will pick them up.
931 immediate = atomic_inc_and_test(&cm_id_priv->work_count);
933 list_add_tail(&work->list, &cm_id_priv->work_list);
935 * This routine always consumes incoming reference. Once queued
936 * to the work_list then a reference is held by the thread
937 * currently running cm_process_work() and this reference is not
940 cm_deref_id(cm_id_priv);
942 spin_unlock_irq(&cm_id_priv->lock);
945 cm_process_work(cm_id_priv, work);
948 static inline int cm_convert_to_ms(int iba_time)
950 /* approximate conversion to ms from 4.096us x 2^iba_time */
951 return 1 << max(iba_time - 8, 0);
955 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
956 * Because of how ack_timeout is stored, adding one doubles the timeout.
957 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
958 * increment it (round up) only if the other is within 50%.
960 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
962 int ack_timeout = packet_life_time + 1;
964 if (ack_timeout >= ca_ack_delay)
965 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
967 ack_timeout = ca_ack_delay +
968 (ack_timeout >= (ca_ack_delay - 1));
970 return min(31, ack_timeout);
973 static void cm_remove_remote(struct cm_id_private *cm_id_priv)
975 struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
977 if (timewait_info->inserted_remote_id) {
978 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
979 timewait_info->inserted_remote_id = 0;
982 if (timewait_info->inserted_remote_qp) {
983 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
984 timewait_info->inserted_remote_qp = 0;
988 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
990 struct cm_timewait_info *timewait_info;
992 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
994 return ERR_PTR(-ENOMEM);
996 timewait_info->work.local_id = local_id;
997 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
998 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
999 return timewait_info;
1002 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
1005 unsigned long flags;
1006 struct cm_device *cm_dev;
1008 lockdep_assert_held(&cm_id_priv->lock);
1010 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
1014 spin_lock_irqsave(&cm.lock, flags);
1015 cm_remove_remote(cm_id_priv);
1016 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
1017 spin_unlock_irqrestore(&cm.lock, flags);
1020 * The cm_id could be destroyed by the user before we exit timewait.
1021 * To protect against this, we search for the cm_id after exiting
1022 * timewait before notifying the user that we've exited timewait.
1024 cm_id_priv->id.state = IB_CM_TIMEWAIT;
1025 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
1027 /* Check if the device started its remove_one */
1028 spin_lock_irqsave(&cm.lock, flags);
1029 if (!cm_dev->going_down)
1030 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
1031 msecs_to_jiffies(wait_time));
1032 spin_unlock_irqrestore(&cm.lock, flags);
1035 * The timewait_info is converted into a work and gets freed during
1036 * cm_free_work() in cm_timewait_handler().
1038 BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
1039 cm_id_priv->timewait_info = NULL;
1042 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1044 unsigned long flags;
1046 lockdep_assert_held(&cm_id_priv->lock);
1048 cm_id_priv->id.state = IB_CM_IDLE;
1049 if (cm_id_priv->timewait_info) {
1050 spin_lock_irqsave(&cm.lock, flags);
1051 cm_remove_remote(cm_id_priv);
1052 spin_unlock_irqrestore(&cm.lock, flags);
1053 kfree(cm_id_priv->timewait_info);
1054 cm_id_priv->timewait_info = NULL;
1058 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1060 struct cm_id_private *cm_id_priv;
1061 struct cm_work *work;
1063 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1064 spin_lock_irq(&cm_id_priv->lock);
1066 switch (cm_id->state) {
1068 spin_lock(&cm.lock);
1069 if (--cm_id_priv->listen_sharecount > 0) {
1070 /* The id is still shared. */
1071 WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1072 spin_unlock(&cm.lock);
1073 spin_unlock_irq(&cm_id_priv->lock);
1074 cm_deref_id(cm_id_priv);
1077 cm_id->state = IB_CM_IDLE;
1078 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1079 RB_CLEAR_NODE(&cm_id_priv->service_node);
1080 spin_unlock(&cm.lock);
1082 case IB_CM_SIDR_REQ_SENT:
1083 cm_id->state = IB_CM_IDLE;
1084 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1086 case IB_CM_SIDR_REQ_RCVD:
1087 cm_send_sidr_rep_locked(cm_id_priv,
1088 &(struct ib_cm_sidr_rep_param){
1089 .status = IB_SIDR_REJECT });
1090 /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1091 cm_id->state = IB_CM_IDLE;
1093 case IB_CM_REQ_SENT:
1094 case IB_CM_MRA_REQ_RCVD:
1095 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1096 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1097 &cm_id_priv->id.device->node_guid,
1098 sizeof(cm_id_priv->id.device->node_guid),
1101 case IB_CM_REQ_RCVD:
1102 if (err == -ENOMEM) {
1103 /* Do not reject to allow future retries. */
1104 cm_reset_to_idle(cm_id_priv);
1106 cm_send_rej_locked(cm_id_priv,
1107 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1111 case IB_CM_REP_SENT:
1112 case IB_CM_MRA_REP_RCVD:
1113 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1114 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1117 case IB_CM_MRA_REQ_SENT:
1118 case IB_CM_REP_RCVD:
1119 case IB_CM_MRA_REP_SENT:
1120 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1123 case IB_CM_ESTABLISHED:
1124 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1125 cm_id->state = IB_CM_IDLE;
1128 cm_send_dreq_locked(cm_id_priv, NULL, 0);
1130 case IB_CM_DREQ_SENT:
1131 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1132 cm_enter_timewait(cm_id_priv);
1134 case IB_CM_DREQ_RCVD:
1135 cm_send_drep_locked(cm_id_priv, NULL, 0);
1136 WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1138 case IB_CM_TIMEWAIT:
1140 * The cm_acquire_id in cm_timewait_handler will stop working
1141 * once we do xa_erase below, so just move to idle here for
1144 cm_id->state = IB_CM_IDLE;
1149 WARN_ON(cm_id->state != IB_CM_IDLE);
1151 spin_lock(&cm.lock);
1152 /* Required for cleanup paths related cm_req_handler() */
1153 if (cm_id_priv->timewait_info) {
1154 cm_remove_remote(cm_id_priv);
1155 kfree(cm_id_priv->timewait_info);
1156 cm_id_priv->timewait_info = NULL;
1158 if (!list_empty(&cm_id_priv->altr_list) &&
1159 (!cm_id_priv->altr_send_port_not_ready))
1160 list_del(&cm_id_priv->altr_list);
1161 if (!list_empty(&cm_id_priv->prim_list) &&
1162 (!cm_id_priv->prim_send_port_not_ready))
1163 list_del(&cm_id_priv->prim_list);
1164 WARN_ON(cm_id_priv->listen_sharecount);
1165 WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1166 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1167 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1168 spin_unlock(&cm.lock);
1169 spin_unlock_irq(&cm_id_priv->lock);
1171 xa_erase_irq(&cm.local_id_table, cm_local_id(cm_id->local_id));
1172 cm_deref_id(cm_id_priv);
1173 wait_for_completion(&cm_id_priv->comp);
1174 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1177 rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
1178 rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
1179 kfree(cm_id_priv->private_data);
1180 kfree_rcu(cm_id_priv, rcu);
1183 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1185 cm_destroy_id(cm_id, 0);
1187 EXPORT_SYMBOL(ib_destroy_cm_id);
1189 static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
1190 __be64 service_mask)
1192 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1193 service_id &= service_mask;
1194 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1195 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1198 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1199 cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1200 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1202 cm_id_priv->id.service_id = service_id;
1203 cm_id_priv->id.service_mask = service_mask;
1209 * ib_cm_listen - Initiates listening on the specified service ID for
1210 * connection and service ID resolution requests.
1211 * @cm_id: Connection identifier associated with the listen request.
1212 * @service_id: Service identifier matched against incoming connection
1213 * and service ID resolution requests. The service ID should be specified
1214 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1215 * assign a service ID to the caller.
1216 * @service_mask: Mask applied to service ID used to listen across a
1217 * range of service IDs. If set to 0, the service ID is matched
1218 * exactly. This parameter is ignored if %service_id is set to
1219 * IB_CM_ASSIGN_SERVICE_ID.
1221 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1223 struct cm_id_private *cm_id_priv =
1224 container_of(cm_id, struct cm_id_private, id);
1225 unsigned long flags;
1228 spin_lock_irqsave(&cm_id_priv->lock, flags);
1229 if (cm_id_priv->id.state != IB_CM_IDLE) {
1234 ret = cm_init_listen(cm_id_priv, service_id, service_mask);
1238 if (!cm_insert_listen(cm_id_priv, NULL)) {
1243 cm_id_priv->id.state = IB_CM_LISTEN;
1247 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1250 EXPORT_SYMBOL(ib_cm_listen);
1253 * Create a new listening ib_cm_id and listen on the given service ID.
1255 * If there's an existing ID listening on that same device and service ID,
1258 * @device: Device associated with the cm_id. All related communication will
1259 * be associated with the specified device.
1260 * @cm_handler: Callback invoked to notify the user of CM events.
1261 * @service_id: Service identifier matched against incoming connection
1262 * and service ID resolution requests. The service ID should be specified
1263 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1264 * assign a service ID to the caller.
1266 * Callers should call ib_destroy_cm_id when done with the listener ID.
1268 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1269 ib_cm_handler cm_handler,
1272 struct cm_id_private *listen_id_priv;
1273 struct cm_id_private *cm_id_priv;
1276 /* Create an ID in advance, since the creation may sleep */
1277 cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1278 if (IS_ERR(cm_id_priv))
1279 return ERR_CAST(cm_id_priv);
1281 err = cm_init_listen(cm_id_priv, service_id, 0);
1283 return ERR_PTR(err);
1285 spin_lock_irq(&cm_id_priv->lock);
1286 listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1287 if (listen_id_priv != cm_id_priv) {
1288 spin_unlock_irq(&cm_id_priv->lock);
1289 ib_destroy_cm_id(&cm_id_priv->id);
1290 if (!listen_id_priv)
1291 return ERR_PTR(-EINVAL);
1292 return &listen_id_priv->id;
1294 cm_id_priv->id.state = IB_CM_LISTEN;
1295 spin_unlock_irq(&cm_id_priv->lock);
1298 * A listen ID does not need to be in the xarray since it does not
1299 * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1300 * and does not enter timewait.
1303 return &cm_id_priv->id;
1305 EXPORT_SYMBOL(ib_cm_insert_listen);
1307 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1309 u64 hi_tid, low_tid;
1311 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1312 low_tid = (u64)cm_id_priv->id.local_id;
1313 return cpu_to_be64(hi_tid | low_tid);
1316 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1317 __be16 attr_id, __be64 tid)
1319 hdr->base_version = IB_MGMT_BASE_VERSION;
1320 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1321 hdr->class_version = IB_CM_CLASS_VERSION;
1322 hdr->method = IB_MGMT_METHOD_SEND;
1323 hdr->attr_id = attr_id;
1327 static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1328 __be64 tid, u32 attr_mod)
1330 cm_format_mad_hdr(hdr, attr_id, tid);
1331 hdr->attr_mod = cpu_to_be32(attr_mod);
1334 static void cm_format_req(struct cm_req_msg *req_msg,
1335 struct cm_id_private *cm_id_priv,
1336 struct ib_cm_req_param *param)
1338 struct sa_path_rec *pri_path = param->primary_path;
1339 struct sa_path_rec *alt_path = param->alternate_path;
1340 bool pri_ext = false;
1342 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1343 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1344 pri_path->opa.slid);
1346 cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1347 cm_form_tid(cm_id_priv), param->ece.attr_mod);
1349 IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1350 be32_to_cpu(cm_id_priv->id.local_id));
1351 IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1352 IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1353 be64_to_cpu(cm_id_priv->id.device->node_guid));
1354 IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1355 IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1356 IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1357 param->remote_cm_response_timeout);
1358 cm_req_set_qp_type(req_msg, param->qp_type);
1359 IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1360 IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1361 IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1362 param->local_cm_response_timeout);
1363 IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1364 be16_to_cpu(param->primary_path->pkey));
1365 IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1366 param->primary_path->mtu);
1367 IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1369 if (param->qp_type != IB_QPT_XRC_INI) {
1370 IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1371 param->responder_resources);
1372 IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1373 IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1374 param->rnr_retry_count);
1375 IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1378 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1380 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1383 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1384 ->global.interface_id =
1385 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1386 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1387 ->global.interface_id =
1388 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1390 if (pri_path->hop_limit <= 1) {
1391 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1392 be16_to_cpu(pri_ext ? 0 :
1393 htons(ntohl(sa_path_get_slid(
1395 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1396 be16_to_cpu(pri_ext ? 0 :
1397 htons(ntohl(sa_path_get_dlid(
1400 /* Work-around until there's a way to obtain remote LID info */
1401 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1402 be16_to_cpu(IB_LID_PERMISSIVE));
1403 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1404 be16_to_cpu(IB_LID_PERMISSIVE));
1406 IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1407 be32_to_cpu(pri_path->flow_label));
1408 IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1409 IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1410 IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1411 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1412 IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1413 (pri_path->hop_limit <= 1));
1414 IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1415 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1416 pri_path->packet_life_time));
1419 bool alt_ext = false;
1421 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1422 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1423 alt_path->opa.slid);
1425 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1427 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1430 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1432 ->global.interface_id =
1433 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1434 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1436 ->global.interface_id =
1437 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1439 if (alt_path->hop_limit <= 1) {
1440 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1443 htons(ntohl(sa_path_get_slid(
1445 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1448 htons(ntohl(sa_path_get_dlid(
1451 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1452 be16_to_cpu(IB_LID_PERMISSIVE));
1453 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1454 be16_to_cpu(IB_LID_PERMISSIVE));
1456 IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1457 be32_to_cpu(alt_path->flow_label));
1458 IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1459 IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1460 alt_path->traffic_class);
1461 IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1462 alt_path->hop_limit);
1463 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1464 IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1465 (alt_path->hop_limit <= 1));
1466 IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1467 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1468 alt_path->packet_life_time));
1470 IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
1472 if (param->private_data && param->private_data_len)
1473 IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1474 param->private_data_len);
1477 static int cm_validate_req_param(struct ib_cm_req_param *param)
1479 if (!param->primary_path)
1482 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1483 param->qp_type != IB_QPT_XRC_INI)
1486 if (param->private_data &&
1487 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1490 if (param->alternate_path &&
1491 (param->alternate_path->pkey != param->primary_path->pkey ||
1492 param->alternate_path->mtu != param->primary_path->mtu))
1498 int ib_send_cm_req(struct ib_cm_id *cm_id,
1499 struct ib_cm_req_param *param)
1501 struct cm_id_private *cm_id_priv;
1502 struct cm_req_msg *req_msg;
1503 unsigned long flags;
1506 ret = cm_validate_req_param(param);
1510 /* Verify that we're not in timewait. */
1511 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1512 spin_lock_irqsave(&cm_id_priv->lock, flags);
1513 if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1514 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1518 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1520 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1522 if (IS_ERR(cm_id_priv->timewait_info)) {
1523 ret = PTR_ERR(cm_id_priv->timewait_info);
1527 ret = cm_init_av_by_path(param->primary_path,
1528 param->ppath_sgid_attr, &cm_id_priv->av,
1532 if (param->alternate_path) {
1533 ret = cm_init_av_by_path(param->alternate_path, NULL,
1534 &cm_id_priv->alt_av, cm_id_priv);
1538 cm_id->service_id = param->service_id;
1539 cm_id->service_mask = ~cpu_to_be64(0);
1540 cm_id_priv->timeout_ms = cm_convert_to_ms(
1541 param->primary_path->packet_life_time) * 2 +
1543 param->remote_cm_response_timeout);
1544 cm_id_priv->max_cm_retries = param->max_cm_retries;
1545 cm_id_priv->initiator_depth = param->initiator_depth;
1546 cm_id_priv->responder_resources = param->responder_resources;
1547 cm_id_priv->retry_count = param->retry_count;
1548 cm_id_priv->path_mtu = param->primary_path->mtu;
1549 cm_id_priv->pkey = param->primary_path->pkey;
1550 cm_id_priv->qp_type = param->qp_type;
1552 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1556 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1557 cm_format_req(req_msg, cm_id_priv, param);
1558 cm_id_priv->tid = req_msg->hdr.tid;
1559 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1560 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1562 cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1563 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1565 spin_lock_irqsave(&cm_id_priv->lock, flags);
1566 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1568 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1571 BUG_ON(cm_id->state != IB_CM_IDLE);
1572 cm_id->state = IB_CM_REQ_SENT;
1573 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1576 error2: cm_free_msg(cm_id_priv->msg);
1579 EXPORT_SYMBOL(ib_send_cm_req);
1581 static int cm_issue_rej(struct cm_port *port,
1582 struct ib_mad_recv_wc *mad_recv_wc,
1583 enum ib_cm_rej_reason reason,
1584 enum cm_msg_response msg_rejected,
1585 void *ari, u8 ari_length)
1587 struct ib_mad_send_buf *msg = NULL;
1588 struct cm_rej_msg *rej_msg, *rcv_msg;
1591 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1595 /* We just need common CM header information. Cast to any message. */
1596 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1597 rej_msg = (struct cm_rej_msg *) msg->mad;
1599 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1600 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1601 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1602 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1603 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1604 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1605 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1607 if (ari && ari_length) {
1608 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1609 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1612 ret = ib_post_send_mad(msg, NULL);
1619 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1621 return ((cpu_to_be16(
1622 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1623 (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1627 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1628 struct sa_path_rec *path, union ib_gid *gid)
1630 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1631 path->rec_type = SA_PATH_REC_TYPE_OPA;
1633 path->rec_type = SA_PATH_REC_TYPE_IB;
1636 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1637 struct sa_path_rec *primary_path,
1638 struct sa_path_rec *alt_path)
1642 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1643 sa_path_set_dlid(primary_path,
1644 IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
1646 sa_path_set_slid(primary_path,
1647 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1650 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1651 CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1652 sa_path_set_dlid(primary_path, lid);
1654 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1655 CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1656 sa_path_set_slid(primary_path, lid);
1659 if (!cm_req_has_alt_path(req_msg))
1662 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1663 sa_path_set_dlid(alt_path,
1664 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1666 sa_path_set_slid(alt_path,
1667 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1670 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1671 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1672 sa_path_set_dlid(alt_path, lid);
1674 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1675 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1676 sa_path_set_slid(alt_path, lid);
1680 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1681 struct sa_path_rec *primary_path,
1682 struct sa_path_rec *alt_path)
1684 primary_path->dgid =
1685 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1686 primary_path->sgid =
1687 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1688 primary_path->flow_label =
1689 cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1690 primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1691 primary_path->traffic_class =
1692 IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1693 primary_path->reversible = 1;
1694 primary_path->pkey =
1695 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1696 primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1697 primary_path->mtu_selector = IB_SA_EQ;
1698 primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1699 primary_path->rate_selector = IB_SA_EQ;
1700 primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1701 primary_path->packet_life_time_selector = IB_SA_EQ;
1702 primary_path->packet_life_time =
1703 IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1704 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1705 primary_path->service_id =
1706 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1707 if (sa_path_is_roce(primary_path))
1708 primary_path->roce.route_resolved = false;
1710 if (cm_req_has_alt_path(req_msg)) {
1711 alt_path->dgid = *IBA_GET_MEM_PTR(
1712 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1713 alt_path->sgid = *IBA_GET_MEM_PTR(
1714 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1715 alt_path->flow_label = cpu_to_be32(
1716 IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1717 alt_path->hop_limit =
1718 IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1719 alt_path->traffic_class =
1720 IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1721 alt_path->reversible = 1;
1723 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1724 alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1725 alt_path->mtu_selector = IB_SA_EQ;
1727 IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1728 alt_path->rate_selector = IB_SA_EQ;
1729 alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1730 alt_path->packet_life_time_selector = IB_SA_EQ;
1731 alt_path->packet_life_time =
1732 IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1733 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1734 alt_path->service_id =
1735 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1737 if (sa_path_is_roce(alt_path))
1738 alt_path->roce.route_resolved = false;
1740 cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1743 static u16 cm_get_bth_pkey(struct cm_work *work)
1745 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1746 u8 port_num = work->port->port_num;
1747 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1751 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1753 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1754 port_num, pkey_index, ret);
1762 * Convert OPA SGID to IB SGID
1763 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1764 * reject them as the local_gid will not match the sgid. Therefore,
1765 * change the pathrec's SGID to an IB SGID.
1767 * @work: Work completion
1768 * @path: Path record
1770 static void cm_opa_to_ib_sgid(struct cm_work *work,
1771 struct sa_path_rec *path)
1773 struct ib_device *dev = work->port->cm_dev->ib_device;
1774 u8 port_num = work->port->port_num;
1776 if (rdma_cap_opa_ah(dev, port_num) &&
1777 (ib_is_opa_gid(&path->sgid))) {
1780 if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1782 "Error updating sgid in CM request\n");
1790 static void cm_format_req_event(struct cm_work *work,
1791 struct cm_id_private *cm_id_priv,
1792 struct ib_cm_id *listen_id)
1794 struct cm_req_msg *req_msg;
1795 struct ib_cm_req_event_param *param;
1797 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1798 param = &work->cm_event.param.req_rcvd;
1799 param->listen_id = listen_id;
1800 param->bth_pkey = cm_get_bth_pkey(work);
1801 param->port = cm_id_priv->av.port->port_num;
1802 param->primary_path = &work->path[0];
1803 cm_opa_to_ib_sgid(work, param->primary_path);
1804 if (cm_req_has_alt_path(req_msg)) {
1805 param->alternate_path = &work->path[1];
1806 cm_opa_to_ib_sgid(work, param->alternate_path);
1808 param->alternate_path = NULL;
1810 param->remote_ca_guid =
1811 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1812 param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1813 param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1814 param->qp_type = cm_req_get_qp_type(req_msg);
1815 param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1816 param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1817 param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1818 param->local_cm_response_timeout =
1819 IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1820 param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1821 param->remote_cm_response_timeout =
1822 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1823 param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1824 param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1825 param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1826 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1827 param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1828 param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1830 work->cm_event.private_data =
1831 IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1834 static void cm_process_work(struct cm_id_private *cm_id_priv,
1835 struct cm_work *work)
1839 /* We will typically only have the current event to report. */
1840 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1843 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1844 spin_lock_irq(&cm_id_priv->lock);
1845 work = cm_dequeue_work(cm_id_priv);
1846 spin_unlock_irq(&cm_id_priv->lock);
1850 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1854 cm_deref_id(cm_id_priv);
1856 cm_destroy_id(&cm_id_priv->id, ret);
1859 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1860 struct cm_id_private *cm_id_priv,
1861 enum cm_msg_response msg_mraed, u8 service_timeout,
1862 const void *private_data, u8 private_data_len)
1864 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1865 IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1866 IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1867 be32_to_cpu(cm_id_priv->id.local_id));
1868 IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1869 be32_to_cpu(cm_id_priv->id.remote_id));
1870 IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1872 if (private_data && private_data_len)
1873 IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1877 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1878 struct cm_id_private *cm_id_priv,
1879 enum ib_cm_rej_reason reason, void *ari,
1880 u8 ari_length, const void *private_data,
1881 u8 private_data_len, enum ib_cm_state state)
1883 lockdep_assert_held(&cm_id_priv->lock);
1885 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1886 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1887 be32_to_cpu(cm_id_priv->id.remote_id));
1890 case IB_CM_REQ_RCVD:
1891 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1892 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1894 case IB_CM_MRA_REQ_SENT:
1895 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1896 be32_to_cpu(cm_id_priv->id.local_id));
1897 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1899 case IB_CM_REP_RCVD:
1900 case IB_CM_MRA_REP_SENT:
1901 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1902 be32_to_cpu(cm_id_priv->id.local_id));
1903 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1906 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1907 be32_to_cpu(cm_id_priv->id.local_id));
1908 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1909 CM_MSG_RESPONSE_OTHER);
1913 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1914 if (ari && ari_length) {
1915 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1916 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1919 if (private_data && private_data_len)
1920 IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1924 static void cm_dup_req_handler(struct cm_work *work,
1925 struct cm_id_private *cm_id_priv)
1927 struct ib_mad_send_buf *msg = NULL;
1930 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1931 counter[CM_REQ_COUNTER]);
1933 /* Quick state check to discard duplicate REQs. */
1934 spin_lock_irq(&cm_id_priv->lock);
1935 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1936 spin_unlock_irq(&cm_id_priv->lock);
1939 spin_unlock_irq(&cm_id_priv->lock);
1941 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1945 spin_lock_irq(&cm_id_priv->lock);
1946 switch (cm_id_priv->id.state) {
1947 case IB_CM_MRA_REQ_SENT:
1948 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1949 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1950 cm_id_priv->private_data,
1951 cm_id_priv->private_data_len);
1953 case IB_CM_TIMEWAIT:
1954 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1955 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1961 spin_unlock_irq(&cm_id_priv->lock);
1963 ret = ib_post_send_mad(msg, NULL);
1968 unlock: spin_unlock_irq(&cm_id_priv->lock);
1969 free: cm_free_msg(msg);
1972 static struct cm_id_private * cm_match_req(struct cm_work *work,
1973 struct cm_id_private *cm_id_priv)
1975 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1976 struct cm_timewait_info *timewait_info;
1977 struct cm_req_msg *req_msg;
1979 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1981 /* Check for possible duplicate REQ. */
1982 spin_lock_irq(&cm.lock);
1983 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1984 if (timewait_info) {
1985 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1986 timewait_info->work.remote_id);
1987 spin_unlock_irq(&cm.lock);
1988 if (cur_cm_id_priv) {
1989 cm_dup_req_handler(work, cur_cm_id_priv);
1990 cm_deref_id(cur_cm_id_priv);
1995 /* Check for stale connections. */
1996 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1997 if (timewait_info) {
1998 cm_remove_remote(cm_id_priv);
1999 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2000 timewait_info->work.remote_id);
2002 spin_unlock_irq(&cm.lock);
2003 cm_issue_rej(work->port, work->mad_recv_wc,
2004 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
2006 if (cur_cm_id_priv) {
2007 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2008 cm_deref_id(cur_cm_id_priv);
2013 /* Find matching listen request. */
2014 listen_cm_id_priv = cm_find_listen(
2015 cm_id_priv->id.device,
2016 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
2017 if (!listen_cm_id_priv) {
2018 cm_remove_remote(cm_id_priv);
2019 spin_unlock_irq(&cm.lock);
2020 cm_issue_rej(work->port, work->mad_recv_wc,
2021 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
2025 spin_unlock_irq(&cm.lock);
2026 return listen_cm_id_priv;
2030 * Work-around for inter-subnet connections. If the LIDs are permissive,
2031 * we need to override the LID/SL data in the REQ with the LID information
2032 * in the work completion.
2034 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
2036 if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2037 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2038 req_msg)) == IB_LID_PERMISSIVE) {
2039 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2040 be16_to_cpu(ib_lid_be16(wc->slid)));
2041 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
2044 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2045 req_msg)) == IB_LID_PERMISSIVE)
2046 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2047 wc->dlid_path_bits);
2050 if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2051 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2052 req_msg)) == IB_LID_PERMISSIVE) {
2053 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2054 be16_to_cpu(ib_lid_be16(wc->slid)));
2055 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2058 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2059 req_msg)) == IB_LID_PERMISSIVE)
2060 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2061 wc->dlid_path_bits);
2065 static int cm_req_handler(struct cm_work *work)
2067 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2068 struct cm_req_msg *req_msg;
2069 const struct ib_global_route *grh;
2070 const struct ib_gid_attr *gid_attr;
2073 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2076 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2077 if (IS_ERR(cm_id_priv))
2078 return PTR_ERR(cm_id_priv);
2080 cm_id_priv->id.remote_id =
2081 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2082 cm_id_priv->id.service_id =
2083 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2084 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2085 cm_id_priv->tid = req_msg->hdr.tid;
2086 cm_id_priv->timeout_ms = cm_convert_to_ms(
2087 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2088 cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2089 cm_id_priv->remote_qpn =
2090 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2091 cm_id_priv->initiator_depth =
2092 IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2093 cm_id_priv->responder_resources =
2094 IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2095 cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2096 cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2097 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2098 cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2099 cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2100 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2102 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2103 work->mad_recv_wc->recv_buf.grh,
2107 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
2109 if (IS_ERR(cm_id_priv->timewait_info)) {
2110 ret = PTR_ERR(cm_id_priv->timewait_info);
2113 cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2114 cm_id_priv->timewait_info->remote_ca_guid =
2115 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2116 cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2119 * Note that the ID pointer is not in the xarray at this point,
2120 * so this set is only visible to the local thread.
2122 cm_id_priv->id.state = IB_CM_REQ_RCVD;
2124 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2125 if (!listen_cm_id_priv) {
2126 pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
2127 be32_to_cpu(cm_id_priv->id.local_id));
2128 cm_id_priv->id.state = IB_CM_IDLE;
2133 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2135 memset(&work->path[0], 0, sizeof(work->path[0]));
2136 if (cm_req_has_alt_path(req_msg))
2137 memset(&work->path[1], 0, sizeof(work->path[1]));
2138 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
2139 gid_attr = grh->sgid_attr;
2142 rdma_protocol_roce(work->port->cm_dev->ib_device,
2143 work->port->port_num)) {
2144 work->path[0].rec_type =
2145 sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
2147 cm_path_set_rec_type(
2148 work->port->cm_dev->ib_device, work->port->port_num,
2150 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2153 if (cm_req_has_alt_path(req_msg))
2154 work->path[1].rec_type = work->path[0].rec_type;
2155 cm_format_paths_from_req(req_msg, &work->path[0],
2157 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2158 sa_path_set_dmac(&work->path[0],
2159 cm_id_priv->av.ah_attr.roce.dmac);
2160 work->path[0].hop_limit = grh->hop_limit;
2161 ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
2166 err = rdma_query_gid(work->port->cm_dev->ib_device,
2167 work->port->port_num, 0,
2168 &work->path[0].sgid);
2170 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2173 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2174 &work->path[0].sgid,
2175 sizeof(work->path[0].sgid),
2179 if (cm_req_has_alt_path(req_msg)) {
2180 ret = cm_init_av_by_path(&work->path[1], NULL,
2181 &cm_id_priv->alt_av, cm_id_priv);
2183 ib_send_cm_rej(&cm_id_priv->id,
2184 IB_CM_REJ_INVALID_ALT_GID,
2185 &work->path[0].sgid,
2186 sizeof(work->path[0].sgid), NULL, 0);
2191 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2192 cm_id_priv->id.context = listen_cm_id_priv->id.context;
2193 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2195 /* Now MAD handlers can see the new ID */
2196 spin_lock_irq(&cm_id_priv->lock);
2197 cm_finalize_id(cm_id_priv);
2199 /* Refcount belongs to the event, pairs with cm_process_work() */
2200 refcount_inc(&cm_id_priv->refcount);
2201 cm_queue_work_unlock(cm_id_priv, work);
2203 * Since this ID was just created and was not made visible to other MAD
2204 * handlers until the cm_finalize_id() above we know that the
2205 * cm_process_work() will deliver the event and the listen_cm_id
2206 * embedded in the event can be derefed here.
2208 cm_deref_id(listen_cm_id_priv);
2212 cm_deref_id(listen_cm_id_priv);
2214 ib_destroy_cm_id(&cm_id_priv->id);
2218 static void cm_format_rep(struct cm_rep_msg *rep_msg,
2219 struct cm_id_private *cm_id_priv,
2220 struct ib_cm_rep_param *param)
2222 cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
2223 param->ece.attr_mod);
2224 IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2225 be32_to_cpu(cm_id_priv->id.local_id));
2226 IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2227 be32_to_cpu(cm_id_priv->id.remote_id));
2228 IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2229 IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2230 param->responder_resources);
2231 IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2232 cm_id_priv->av.port->cm_dev->ack_delay);
2233 IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2234 IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2235 IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2236 be64_to_cpu(cm_id_priv->id.device->node_guid));
2238 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2239 IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2240 param->initiator_depth);
2241 IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2242 param->flow_control);
2243 IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2244 IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2246 IBA_SET(CM_REP_SRQ, rep_msg, 1);
2247 IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2250 IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2251 IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2252 IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2254 if (param->private_data && param->private_data_len)
2255 IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2256 param->private_data_len);
2259 int ib_send_cm_rep(struct ib_cm_id *cm_id,
2260 struct ib_cm_rep_param *param)
2262 struct cm_id_private *cm_id_priv;
2263 struct ib_mad_send_buf *msg;
2264 struct cm_rep_msg *rep_msg;
2265 unsigned long flags;
2268 if (param->private_data &&
2269 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2272 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2273 spin_lock_irqsave(&cm_id_priv->lock, flags);
2274 if (cm_id->state != IB_CM_REQ_RCVD &&
2275 cm_id->state != IB_CM_MRA_REQ_SENT) {
2276 pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
2277 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2282 ret = cm_alloc_msg(cm_id_priv, &msg);
2286 rep_msg = (struct cm_rep_msg *) msg->mad;
2287 cm_format_rep(rep_msg, cm_id_priv, param);
2288 msg->timeout_ms = cm_id_priv->timeout_ms;
2289 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2291 ret = ib_post_send_mad(msg, NULL);
2293 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2298 cm_id->state = IB_CM_REP_SENT;
2299 cm_id_priv->msg = msg;
2300 cm_id_priv->initiator_depth = param->initiator_depth;
2301 cm_id_priv->responder_resources = param->responder_resources;
2302 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2303 WARN_ONCE(param->qp_num & 0xFF000000,
2304 "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2306 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2308 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2311 EXPORT_SYMBOL(ib_send_cm_rep);
2313 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2314 struct cm_id_private *cm_id_priv,
2315 const void *private_data,
2316 u8 private_data_len)
2318 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2319 IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2320 be32_to_cpu(cm_id_priv->id.local_id));
2321 IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2322 be32_to_cpu(cm_id_priv->id.remote_id));
2324 if (private_data && private_data_len)
2325 IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2329 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2330 const void *private_data,
2331 u8 private_data_len)
2333 struct cm_id_private *cm_id_priv;
2334 struct ib_mad_send_buf *msg;
2335 unsigned long flags;
2339 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2342 data = cm_copy_private_data(private_data, private_data_len);
2344 return PTR_ERR(data);
2346 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2347 spin_lock_irqsave(&cm_id_priv->lock, flags);
2348 if (cm_id->state != IB_CM_REP_RCVD &&
2349 cm_id->state != IB_CM_MRA_REP_SENT) {
2350 pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
2351 be32_to_cpu(cm_id->local_id), cm_id->state);
2356 ret = cm_alloc_msg(cm_id_priv, &msg);
2360 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2361 private_data, private_data_len);
2363 ret = ib_post_send_mad(msg, NULL);
2365 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2371 cm_id->state = IB_CM_ESTABLISHED;
2372 cm_set_private_data(cm_id_priv, data, private_data_len);
2373 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2376 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2380 EXPORT_SYMBOL(ib_send_cm_rtu);
2382 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2384 struct cm_rep_msg *rep_msg;
2385 struct ib_cm_rep_event_param *param;
2387 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2388 param = &work->cm_event.param.rep_rcvd;
2389 param->remote_ca_guid =
2390 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2391 param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2392 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2393 param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2394 param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2395 param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2396 param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2397 param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2398 param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2399 param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2400 param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2401 param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2402 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2403 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2404 param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2406 work->cm_event.private_data =
2407 IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2410 static void cm_dup_rep_handler(struct cm_work *work)
2412 struct cm_id_private *cm_id_priv;
2413 struct cm_rep_msg *rep_msg;
2414 struct ib_mad_send_buf *msg = NULL;
2417 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2418 cm_id_priv = cm_acquire_id(
2419 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2420 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2424 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2425 counter[CM_REP_COUNTER]);
2426 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2430 spin_lock_irq(&cm_id_priv->lock);
2431 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2432 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2433 cm_id_priv->private_data,
2434 cm_id_priv->private_data_len);
2435 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2436 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2437 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2438 cm_id_priv->private_data,
2439 cm_id_priv->private_data_len);
2442 spin_unlock_irq(&cm_id_priv->lock);
2444 ret = ib_post_send_mad(msg, NULL);
2449 unlock: spin_unlock_irq(&cm_id_priv->lock);
2450 free: cm_free_msg(msg);
2451 deref: cm_deref_id(cm_id_priv);
2454 static int cm_rep_handler(struct cm_work *work)
2456 struct cm_id_private *cm_id_priv;
2457 struct cm_rep_msg *rep_msg;
2459 struct cm_id_private *cur_cm_id_priv;
2460 struct cm_timewait_info *timewait_info;
2462 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2463 cm_id_priv = cm_acquire_id(
2464 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
2466 cm_dup_rep_handler(work);
2467 pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
2468 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2472 cm_format_rep_event(work, cm_id_priv->qp_type);
2474 spin_lock_irq(&cm_id_priv->lock);
2475 switch (cm_id_priv->id.state) {
2476 case IB_CM_REQ_SENT:
2477 case IB_CM_MRA_REQ_RCVD:
2482 "%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
2483 __func__, cm_id_priv->id.state,
2484 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2485 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2486 spin_unlock_irq(&cm_id_priv->lock);
2490 cm_id_priv->timewait_info->work.remote_id =
2491 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2492 cm_id_priv->timewait_info->remote_ca_guid =
2493 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2494 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2496 spin_lock(&cm.lock);
2497 /* Check for duplicate REP. */
2498 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2499 spin_unlock(&cm.lock);
2500 spin_unlock_irq(&cm_id_priv->lock);
2502 pr_debug("%s: Failed to insert remote id %d\n", __func__,
2503 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2506 /* Check for a stale connection. */
2507 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2508 if (timewait_info) {
2509 cm_remove_remote(cm_id_priv);
2510 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2511 timewait_info->work.remote_id);
2513 spin_unlock(&cm.lock);
2514 spin_unlock_irq(&cm_id_priv->lock);
2515 cm_issue_rej(work->port, work->mad_recv_wc,
2516 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2520 "%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
2521 __func__, IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2522 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2524 if (cur_cm_id_priv) {
2525 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2526 cm_deref_id(cur_cm_id_priv);
2531 spin_unlock(&cm.lock);
2533 cm_id_priv->id.state = IB_CM_REP_RCVD;
2534 cm_id_priv->id.remote_id =
2535 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2536 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2537 cm_id_priv->initiator_depth =
2538 IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2539 cm_id_priv->responder_resources =
2540 IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2541 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2542 cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2543 cm_id_priv->target_ack_delay =
2544 IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2545 cm_id_priv->av.timeout =
2546 cm_ack_timeout(cm_id_priv->target_ack_delay,
2547 cm_id_priv->av.timeout - 1);
2548 cm_id_priv->alt_av.timeout =
2549 cm_ack_timeout(cm_id_priv->target_ack_delay,
2550 cm_id_priv->alt_av.timeout - 1);
2552 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2553 cm_queue_work_unlock(cm_id_priv, work);
2557 cm_deref_id(cm_id_priv);
2561 static int cm_establish_handler(struct cm_work *work)
2563 struct cm_id_private *cm_id_priv;
2565 /* See comment in cm_establish about lookup. */
2566 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2570 spin_lock_irq(&cm_id_priv->lock);
2571 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2572 spin_unlock_irq(&cm_id_priv->lock);
2576 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2577 cm_queue_work_unlock(cm_id_priv, work);
2580 cm_deref_id(cm_id_priv);
2584 static int cm_rtu_handler(struct cm_work *work)
2586 struct cm_id_private *cm_id_priv;
2587 struct cm_rtu_msg *rtu_msg;
2589 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2590 cm_id_priv = cm_acquire_id(
2591 cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2592 cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2596 work->cm_event.private_data =
2597 IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2599 spin_lock_irq(&cm_id_priv->lock);
2600 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2601 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2602 spin_unlock_irq(&cm_id_priv->lock);
2603 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2604 counter[CM_RTU_COUNTER]);
2607 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2609 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2610 cm_queue_work_unlock(cm_id_priv, work);
2613 cm_deref_id(cm_id_priv);
2617 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2618 struct cm_id_private *cm_id_priv,
2619 const void *private_data,
2620 u8 private_data_len)
2622 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2623 cm_form_tid(cm_id_priv));
2624 IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2625 be32_to_cpu(cm_id_priv->id.local_id));
2626 IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2627 be32_to_cpu(cm_id_priv->id.remote_id));
2628 IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2629 be32_to_cpu(cm_id_priv->remote_qpn));
2631 if (private_data && private_data_len)
2632 IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2636 static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2637 const void *private_data, u8 private_data_len)
2639 struct ib_mad_send_buf *msg;
2642 lockdep_assert_held(&cm_id_priv->lock);
2644 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2647 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2648 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2649 be32_to_cpu(cm_id_priv->id.local_id),
2650 cm_id_priv->id.state);
2654 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2655 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2656 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2658 ret = cm_alloc_msg(cm_id_priv, &msg);
2660 cm_enter_timewait(cm_id_priv);
2664 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2665 private_data, private_data_len);
2666 msg->timeout_ms = cm_id_priv->timeout_ms;
2667 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2669 ret = ib_post_send_mad(msg, NULL);
2671 cm_enter_timewait(cm_id_priv);
2676 cm_id_priv->id.state = IB_CM_DREQ_SENT;
2677 cm_id_priv->msg = msg;
2681 int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2682 u8 private_data_len)
2684 struct cm_id_private *cm_id_priv =
2685 container_of(cm_id, struct cm_id_private, id);
2686 unsigned long flags;
2689 spin_lock_irqsave(&cm_id_priv->lock, flags);
2690 ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2691 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2694 EXPORT_SYMBOL(ib_send_cm_dreq);
2696 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2697 struct cm_id_private *cm_id_priv,
2698 const void *private_data,
2699 u8 private_data_len)
2701 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2702 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2703 be32_to_cpu(cm_id_priv->id.local_id));
2704 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2705 be32_to_cpu(cm_id_priv->id.remote_id));
2707 if (private_data && private_data_len)
2708 IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2712 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2713 void *private_data, u8 private_data_len)
2715 struct ib_mad_send_buf *msg;
2718 lockdep_assert_held(&cm_id_priv->lock);
2720 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2723 if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2725 "%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
2726 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2727 cm_id_priv->id.state);
2728 kfree(private_data);
2732 cm_set_private_data(cm_id_priv, private_data, private_data_len);
2733 cm_enter_timewait(cm_id_priv);
2735 ret = cm_alloc_msg(cm_id_priv, &msg);
2739 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2740 private_data, private_data_len);
2742 ret = ib_post_send_mad(msg, NULL);
2750 int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2751 u8 private_data_len)
2753 struct cm_id_private *cm_id_priv =
2754 container_of(cm_id, struct cm_id_private, id);
2755 unsigned long flags;
2759 data = cm_copy_private_data(private_data, private_data_len);
2761 return PTR_ERR(data);
2763 spin_lock_irqsave(&cm_id_priv->lock, flags);
2764 ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2765 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2768 EXPORT_SYMBOL(ib_send_cm_drep);
2770 static int cm_issue_drep(struct cm_port *port,
2771 struct ib_mad_recv_wc *mad_recv_wc)
2773 struct ib_mad_send_buf *msg = NULL;
2774 struct cm_dreq_msg *dreq_msg;
2775 struct cm_drep_msg *drep_msg;
2778 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2782 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2783 drep_msg = (struct cm_drep_msg *) msg->mad;
2785 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2786 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2787 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2788 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2789 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2791 ret = ib_post_send_mad(msg, NULL);
2798 static int cm_dreq_handler(struct cm_work *work)
2800 struct cm_id_private *cm_id_priv;
2801 struct cm_dreq_msg *dreq_msg;
2802 struct ib_mad_send_buf *msg = NULL;
2804 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2805 cm_id_priv = cm_acquire_id(
2806 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2807 cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2809 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2810 counter[CM_DREQ_COUNTER]);
2811 cm_issue_drep(work->port, work->mad_recv_wc);
2813 "%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
2814 __func__, IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2815 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2819 work->cm_event.private_data =
2820 IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2822 spin_lock_irq(&cm_id_priv->lock);
2823 if (cm_id_priv->local_qpn !=
2824 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2827 switch (cm_id_priv->id.state) {
2828 case IB_CM_REP_SENT:
2829 case IB_CM_DREQ_SENT:
2830 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2832 case IB_CM_ESTABLISHED:
2833 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2834 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2835 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2837 case IB_CM_MRA_REP_RCVD:
2839 case IB_CM_TIMEWAIT:
2840 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2841 counter[CM_DREQ_COUNTER]);
2842 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2846 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2847 cm_id_priv->private_data,
2848 cm_id_priv->private_data_len);
2849 spin_unlock_irq(&cm_id_priv->lock);
2851 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2852 ib_post_send_mad(msg, NULL))
2855 case IB_CM_DREQ_RCVD:
2856 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2857 counter[CM_DREQ_COUNTER]);
2860 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2861 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2862 cm_id_priv->id.state);
2865 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2866 cm_id_priv->tid = dreq_msg->hdr.tid;
2867 cm_queue_work_unlock(cm_id_priv, work);
2870 unlock: spin_unlock_irq(&cm_id_priv->lock);
2871 deref: cm_deref_id(cm_id_priv);
2875 static int cm_drep_handler(struct cm_work *work)
2877 struct cm_id_private *cm_id_priv;
2878 struct cm_drep_msg *drep_msg;
2880 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2881 cm_id_priv = cm_acquire_id(
2882 cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2883 cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2887 work->cm_event.private_data =
2888 IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2890 spin_lock_irq(&cm_id_priv->lock);
2891 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2892 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2893 spin_unlock_irq(&cm_id_priv->lock);
2896 cm_enter_timewait(cm_id_priv);
2898 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2899 cm_queue_work_unlock(cm_id_priv, work);
2902 cm_deref_id(cm_id_priv);
2906 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2907 enum ib_cm_rej_reason reason, void *ari,
2908 u8 ari_length, const void *private_data,
2909 u8 private_data_len)
2911 enum ib_cm_state state = cm_id_priv->id.state;
2912 struct ib_mad_send_buf *msg;
2915 lockdep_assert_held(&cm_id_priv->lock);
2917 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2918 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2922 case IB_CM_REQ_SENT:
2923 case IB_CM_MRA_REQ_RCVD:
2924 case IB_CM_REQ_RCVD:
2925 case IB_CM_MRA_REQ_SENT:
2926 case IB_CM_REP_RCVD:
2927 case IB_CM_MRA_REP_SENT:
2928 cm_reset_to_idle(cm_id_priv);
2929 ret = cm_alloc_msg(cm_id_priv, &msg);
2932 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2933 ari, ari_length, private_data, private_data_len,
2936 case IB_CM_REP_SENT:
2937 case IB_CM_MRA_REP_RCVD:
2938 cm_enter_timewait(cm_id_priv);
2939 ret = cm_alloc_msg(cm_id_priv, &msg);
2942 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2943 ari, ari_length, private_data, private_data_len,
2947 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2948 be32_to_cpu(cm_id_priv->id.local_id),
2949 cm_id_priv->id.state);
2953 ret = ib_post_send_mad(msg, NULL);
2962 int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2963 void *ari, u8 ari_length, const void *private_data,
2964 u8 private_data_len)
2966 struct cm_id_private *cm_id_priv =
2967 container_of(cm_id, struct cm_id_private, id);
2968 unsigned long flags;
2971 spin_lock_irqsave(&cm_id_priv->lock, flags);
2972 ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2973 private_data, private_data_len);
2974 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2977 EXPORT_SYMBOL(ib_send_cm_rej);
2979 static void cm_format_rej_event(struct cm_work *work)
2981 struct cm_rej_msg *rej_msg;
2982 struct ib_cm_rej_event_param *param;
2984 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2985 param = &work->cm_event.param.rej_rcvd;
2986 param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2987 param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
2988 param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
2989 work->cm_event.private_data =
2990 IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
2993 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2995 struct cm_id_private *cm_id_priv;
2998 remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
3000 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
3001 cm_id_priv = cm_find_remote_id(
3002 *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
3004 } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
3005 CM_MSG_RESPONSE_REQ)
3006 cm_id_priv = cm_acquire_id(
3007 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3010 cm_id_priv = cm_acquire_id(
3011 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3017 static int cm_rej_handler(struct cm_work *work)
3019 struct cm_id_private *cm_id_priv;
3020 struct cm_rej_msg *rej_msg;
3022 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3023 cm_id_priv = cm_acquire_rejected_id(rej_msg);
3027 cm_format_rej_event(work);
3029 spin_lock_irq(&cm_id_priv->lock);
3030 switch (cm_id_priv->id.state) {
3031 case IB_CM_REQ_SENT:
3032 case IB_CM_MRA_REQ_RCVD:
3033 case IB_CM_REP_SENT:
3034 case IB_CM_MRA_REP_RCVD:
3035 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3037 case IB_CM_REQ_RCVD:
3038 case IB_CM_MRA_REQ_SENT:
3039 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3040 cm_enter_timewait(cm_id_priv);
3042 cm_reset_to_idle(cm_id_priv);
3044 case IB_CM_DREQ_SENT:
3045 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3047 case IB_CM_REP_RCVD:
3048 case IB_CM_MRA_REP_SENT:
3049 cm_enter_timewait(cm_id_priv);
3051 case IB_CM_ESTABLISHED:
3052 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3053 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3054 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3055 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
3057 cm_enter_timewait(cm_id_priv);
3062 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
3063 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3064 cm_id_priv->id.state);
3065 spin_unlock_irq(&cm_id_priv->lock);
3069 cm_queue_work_unlock(cm_id_priv, work);
3072 cm_deref_id(cm_id_priv);
3076 int ib_send_cm_mra(struct ib_cm_id *cm_id,
3078 const void *private_data,
3079 u8 private_data_len)
3081 struct cm_id_private *cm_id_priv;
3082 struct ib_mad_send_buf *msg;
3083 enum ib_cm_state cm_state;
3084 enum ib_cm_lap_state lap_state;
3085 enum cm_msg_response msg_response;
3087 unsigned long flags;
3090 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3093 data = cm_copy_private_data(private_data, private_data_len);
3095 return PTR_ERR(data);
3097 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3099 spin_lock_irqsave(&cm_id_priv->lock, flags);
3100 switch(cm_id_priv->id.state) {
3101 case IB_CM_REQ_RCVD:
3102 cm_state = IB_CM_MRA_REQ_SENT;
3103 lap_state = cm_id->lap_state;
3104 msg_response = CM_MSG_RESPONSE_REQ;
3106 case IB_CM_REP_RCVD:
3107 cm_state = IB_CM_MRA_REP_SENT;
3108 lap_state = cm_id->lap_state;
3109 msg_response = CM_MSG_RESPONSE_REP;
3111 case IB_CM_ESTABLISHED:
3112 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3113 cm_state = cm_id->state;
3114 lap_state = IB_CM_MRA_LAP_SENT;
3115 msg_response = CM_MSG_RESPONSE_OTHER;
3120 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
3121 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3122 cm_id_priv->id.state);
3127 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3128 ret = cm_alloc_msg(cm_id_priv, &msg);
3132 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3133 msg_response, service_timeout,
3134 private_data, private_data_len);
3135 ret = ib_post_send_mad(msg, NULL);
3140 cm_id->state = cm_state;
3141 cm_id->lap_state = lap_state;
3142 cm_id_priv->service_timeout = service_timeout;
3143 cm_set_private_data(cm_id_priv, data, private_data_len);
3144 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3147 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3151 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3156 EXPORT_SYMBOL(ib_send_cm_mra);
3158 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3160 switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3161 case CM_MSG_RESPONSE_REQ:
3162 return cm_acquire_id(
3163 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3165 case CM_MSG_RESPONSE_REP:
3166 case CM_MSG_RESPONSE_OTHER:
3167 return cm_acquire_id(
3168 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3169 cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3175 static int cm_mra_handler(struct cm_work *work)
3177 struct cm_id_private *cm_id_priv;
3178 struct cm_mra_msg *mra_msg;
3181 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3182 cm_id_priv = cm_acquire_mraed_id(mra_msg);
3186 work->cm_event.private_data =
3187 IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3188 work->cm_event.param.mra_rcvd.service_timeout =
3189 IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3190 timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3191 cm_convert_to_ms(cm_id_priv->av.timeout);
3193 spin_lock_irq(&cm_id_priv->lock);
3194 switch (cm_id_priv->id.state) {
3195 case IB_CM_REQ_SENT:
3196 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3197 CM_MSG_RESPONSE_REQ ||
3198 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3199 cm_id_priv->msg, timeout))
3201 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3203 case IB_CM_REP_SENT:
3204 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3205 CM_MSG_RESPONSE_REP ||
3206 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3207 cm_id_priv->msg, timeout))
3209 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3211 case IB_CM_ESTABLISHED:
3212 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3213 CM_MSG_RESPONSE_OTHER ||
3214 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3215 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3216 cm_id_priv->msg, timeout)) {
3217 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3218 atomic_long_inc(&work->port->
3219 counter_group[CM_RECV_DUPLICATES].
3220 counter[CM_MRA_COUNTER]);
3223 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3225 case IB_CM_MRA_REQ_RCVD:
3226 case IB_CM_MRA_REP_RCVD:
3227 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3228 counter[CM_MRA_COUNTER]);
3231 pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
3232 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3233 cm_id_priv->id.state);
3237 cm_id_priv->msg->context[1] = (void *) (unsigned long)
3238 cm_id_priv->id.state;
3239 cm_queue_work_unlock(cm_id_priv, work);
3242 spin_unlock_irq(&cm_id_priv->lock);
3243 cm_deref_id(cm_id_priv);
3247 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3248 struct sa_path_rec *path)
3252 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3253 sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3255 sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3258 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3259 CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3260 sa_path_set_dlid(path, lid);
3262 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3263 CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3264 sa_path_set_slid(path, lid);
3268 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3269 struct sa_path_rec *path,
3270 struct cm_lap_msg *lap_msg)
3272 path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3274 *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3276 cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3277 path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3278 path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3279 path->reversible = 1;
3280 path->pkey = cm_id_priv->pkey;
3281 path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3282 path->mtu_selector = IB_SA_EQ;
3283 path->mtu = cm_id_priv->path_mtu;
3284 path->rate_selector = IB_SA_EQ;
3285 path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3286 path->packet_life_time_selector = IB_SA_EQ;
3287 path->packet_life_time =
3288 IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3289 path->packet_life_time -= (path->packet_life_time > 0);
3290 cm_format_path_lid_from_lap(lap_msg, path);
3293 static int cm_lap_handler(struct cm_work *work)
3295 struct cm_id_private *cm_id_priv;
3296 struct cm_lap_msg *lap_msg;
3297 struct ib_cm_lap_event_param *param;
3298 struct ib_mad_send_buf *msg = NULL;
3301 /* Currently Alternate path messages are not supported for
3304 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3305 work->port->port_num))
3308 /* todo: verify LAP request and send reject APR if invalid. */
3309 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3310 cm_id_priv = cm_acquire_id(
3311 cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3312 cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3316 param = &work->cm_event.param.lap_rcvd;
3317 memset(&work->path[0], 0, sizeof(work->path[1]));
3318 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3319 work->port->port_num, &work->path[0],
3320 IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3322 param->alternate_path = &work->path[0];
3323 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3324 work->cm_event.private_data =
3325 IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3327 spin_lock_irq(&cm_id_priv->lock);
3328 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3331 switch (cm_id_priv->id.lap_state) {
3332 case IB_CM_LAP_UNINIT:
3333 case IB_CM_LAP_IDLE:
3335 case IB_CM_MRA_LAP_SENT:
3336 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3337 counter[CM_LAP_COUNTER]);
3338 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3342 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3343 CM_MSG_RESPONSE_OTHER,
3344 cm_id_priv->service_timeout,
3345 cm_id_priv->private_data,
3346 cm_id_priv->private_data_len);
3347 spin_unlock_irq(&cm_id_priv->lock);
3349 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3350 ib_post_send_mad(msg, NULL))
3353 case IB_CM_LAP_RCVD:
3354 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3355 counter[CM_LAP_COUNTER]);
3361 ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3362 work->mad_recv_wc->recv_buf.grh,
3367 ret = cm_init_av_by_path(param->alternate_path, NULL,
3368 &cm_id_priv->alt_av, cm_id_priv);
3372 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3373 cm_id_priv->tid = lap_msg->hdr.tid;
3374 cm_queue_work_unlock(cm_id_priv, work);
3377 unlock: spin_unlock_irq(&cm_id_priv->lock);
3378 deref: cm_deref_id(cm_id_priv);
3382 static int cm_apr_handler(struct cm_work *work)
3384 struct cm_id_private *cm_id_priv;
3385 struct cm_apr_msg *apr_msg;
3387 /* Currently Alternate path messages are not supported for
3390 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3391 work->port->port_num))
3394 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3395 cm_id_priv = cm_acquire_id(
3396 cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3397 cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3399 return -EINVAL; /* Unmatched reply. */
3401 work->cm_event.param.apr_rcvd.ap_status =
3402 IBA_GET(CM_APR_AR_STATUS, apr_msg);
3403 work->cm_event.param.apr_rcvd.apr_info =
3404 IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3405 work->cm_event.param.apr_rcvd.info_len =
3406 IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3407 work->cm_event.private_data =
3408 IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3410 spin_lock_irq(&cm_id_priv->lock);
3411 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3412 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3413 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3414 spin_unlock_irq(&cm_id_priv->lock);
3417 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3418 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3419 cm_id_priv->msg = NULL;
3420 cm_queue_work_unlock(cm_id_priv, work);
3423 cm_deref_id(cm_id_priv);
3427 static int cm_timewait_handler(struct cm_work *work)
3429 struct cm_timewait_info *timewait_info;
3430 struct cm_id_private *cm_id_priv;
3432 timewait_info = container_of(work, struct cm_timewait_info, work);
3433 spin_lock_irq(&cm.lock);
3434 list_del(&timewait_info->list);
3435 spin_unlock_irq(&cm.lock);
3437 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3438 timewait_info->work.remote_id);
3442 spin_lock_irq(&cm_id_priv->lock);
3443 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3444 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3445 spin_unlock_irq(&cm_id_priv->lock);
3448 cm_id_priv->id.state = IB_CM_IDLE;
3449 cm_queue_work_unlock(cm_id_priv, work);
3452 cm_deref_id(cm_id_priv);
3456 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3457 struct cm_id_private *cm_id_priv,
3458 struct ib_cm_sidr_req_param *param)
3460 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3461 cm_form_tid(cm_id_priv));
3462 IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3463 be32_to_cpu(cm_id_priv->id.local_id));
3464 IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3465 be16_to_cpu(param->path->pkey));
3466 IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3467 be64_to_cpu(param->service_id));
3469 if (param->private_data && param->private_data_len)
3470 IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3471 param->private_data, param->private_data_len);
3474 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3475 struct ib_cm_sidr_req_param *param)
3477 struct cm_id_private *cm_id_priv;
3478 struct ib_mad_send_buf *msg;
3479 unsigned long flags;
3482 if (!param->path || (param->private_data &&
3483 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3486 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3487 ret = cm_init_av_by_path(param->path, param->sgid_attr,
3493 cm_id->service_id = param->service_id;
3494 cm_id->service_mask = ~cpu_to_be64(0);
3495 cm_id_priv->timeout_ms = param->timeout_ms;
3496 cm_id_priv->max_cm_retries = param->max_cm_retries;
3497 ret = cm_alloc_msg(cm_id_priv, &msg);
3501 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3503 msg->timeout_ms = cm_id_priv->timeout_ms;
3504 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3506 spin_lock_irqsave(&cm_id_priv->lock, flags);
3507 if (cm_id->state == IB_CM_IDLE)
3508 ret = ib_post_send_mad(msg, NULL);
3513 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3517 cm_id->state = IB_CM_SIDR_REQ_SENT;
3518 cm_id_priv->msg = msg;
3519 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3523 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3525 static void cm_format_sidr_req_event(struct cm_work *work,
3526 const struct cm_id_private *rx_cm_id,
3527 struct ib_cm_id *listen_id)
3529 struct cm_sidr_req_msg *sidr_req_msg;
3530 struct ib_cm_sidr_req_event_param *param;
3532 sidr_req_msg = (struct cm_sidr_req_msg *)
3533 work->mad_recv_wc->recv_buf.mad;
3534 param = &work->cm_event.param.sidr_req_rcvd;
3535 param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3536 param->listen_id = listen_id;
3538 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3539 param->bth_pkey = cm_get_bth_pkey(work);
3540 param->port = work->port->port_num;
3541 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3542 work->cm_event.private_data =
3543 IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3546 static int cm_sidr_req_handler(struct cm_work *work)
3548 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3549 struct cm_sidr_req_msg *sidr_req_msg;
3554 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3555 if (IS_ERR(cm_id_priv))
3556 return PTR_ERR(cm_id_priv);
3558 /* Record SGID/SLID and request ID for lookup. */
3559 sidr_req_msg = (struct cm_sidr_req_msg *)
3560 work->mad_recv_wc->recv_buf.mad;
3562 cm_id_priv->id.remote_id =
3563 cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3564 cm_id_priv->id.service_id =
3565 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3566 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3567 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3569 wc = work->mad_recv_wc->wc;
3570 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3571 cm_id_priv->av.dgid.global.interface_id = 0;
3572 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3573 work->mad_recv_wc->recv_buf.grh,
3578 spin_lock_irq(&cm.lock);
3579 listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3580 if (listen_cm_id_priv) {
3581 spin_unlock_irq(&cm.lock);
3582 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3583 counter[CM_SIDR_REQ_COUNTER]);
3584 goto out; /* Duplicate message. */
3586 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3587 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3588 cm_id_priv->id.service_id);
3589 if (!listen_cm_id_priv) {
3590 spin_unlock_irq(&cm.lock);
3591 ib_send_cm_sidr_rep(&cm_id_priv->id,
3592 &(struct ib_cm_sidr_rep_param){
3593 .status = IB_SIDR_UNSUPPORTED });
3594 goto out; /* No match. */
3596 spin_unlock_irq(&cm.lock);
3598 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3599 cm_id_priv->id.context = listen_cm_id_priv->id.context;
3602 * A SIDR ID does not need to be in the xarray since it does not receive
3603 * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3604 * not enter timewait.
3607 cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3608 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3611 * A pointer to the listen_cm_id is held in the event, so this deref
3612 * must be after the event is delivered above.
3614 cm_deref_id(listen_cm_id_priv);
3616 cm_destroy_id(&cm_id_priv->id, ret);
3619 ib_destroy_cm_id(&cm_id_priv->id);
3623 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3624 struct cm_id_private *cm_id_priv,
3625 struct ib_cm_sidr_rep_param *param)
3627 cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3628 cm_id_priv->tid, param->ece.attr_mod);
3629 IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3630 be32_to_cpu(cm_id_priv->id.remote_id));
3631 IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3632 IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3633 IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3634 be64_to_cpu(cm_id_priv->id.service_id));
3635 IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3636 IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3637 param->ece.vendor_id & 0xFF);
3638 IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3639 (param->ece.vendor_id >> 8) & 0xFF);
3641 if (param->info && param->info_length)
3642 IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3643 param->info, param->info_length);
3645 if (param->private_data && param->private_data_len)
3646 IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3647 param->private_data, param->private_data_len);
3650 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3651 struct ib_cm_sidr_rep_param *param)
3653 struct ib_mad_send_buf *msg;
3656 lockdep_assert_held(&cm_id_priv->lock);
3658 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3659 (param->private_data &&
3660 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3663 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3666 ret = cm_alloc_msg(cm_id_priv, &msg);
3670 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3672 ret = ib_post_send_mad(msg, NULL);
3677 cm_id_priv->id.state = IB_CM_IDLE;
3678 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3679 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3680 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3685 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3686 struct ib_cm_sidr_rep_param *param)
3688 struct cm_id_private *cm_id_priv =
3689 container_of(cm_id, struct cm_id_private, id);
3690 unsigned long flags;
3693 spin_lock_irqsave(&cm_id_priv->lock, flags);
3694 ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3695 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3698 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3700 static void cm_format_sidr_rep_event(struct cm_work *work,
3701 const struct cm_id_private *cm_id_priv)
3703 struct cm_sidr_rep_msg *sidr_rep_msg;
3704 struct ib_cm_sidr_rep_event_param *param;
3706 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3707 work->mad_recv_wc->recv_buf.mad;
3708 param = &work->cm_event.param.sidr_rep_rcvd;
3709 param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3710 param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3711 param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3712 param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3714 param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3716 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3717 work->cm_event.private_data =
3718 IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3721 static int cm_sidr_rep_handler(struct cm_work *work)
3723 struct cm_sidr_rep_msg *sidr_rep_msg;
3724 struct cm_id_private *cm_id_priv;
3726 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3727 work->mad_recv_wc->recv_buf.mad;
3728 cm_id_priv = cm_acquire_id(
3729 cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
3731 return -EINVAL; /* Unmatched reply. */
3733 spin_lock_irq(&cm_id_priv->lock);
3734 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3735 spin_unlock_irq(&cm_id_priv->lock);
3738 cm_id_priv->id.state = IB_CM_IDLE;
3739 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3740 spin_unlock_irq(&cm_id_priv->lock);
3742 cm_format_sidr_rep_event(work, cm_id_priv);
3743 cm_process_work(cm_id_priv, work);
3746 cm_deref_id(cm_id_priv);
3750 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3751 enum ib_wc_status wc_status)
3753 struct cm_id_private *cm_id_priv;
3754 struct ib_cm_event cm_event;
3755 enum ib_cm_state state;
3758 memset(&cm_event, 0, sizeof cm_event);
3759 cm_id_priv = msg->context[0];
3761 /* Discard old sends or ones without a response. */
3762 spin_lock_irq(&cm_id_priv->lock);
3763 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3764 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3767 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3768 state, ib_wc_status_msg(wc_status));
3770 case IB_CM_REQ_SENT:
3771 case IB_CM_MRA_REQ_RCVD:
3772 cm_reset_to_idle(cm_id_priv);
3773 cm_event.event = IB_CM_REQ_ERROR;
3775 case IB_CM_REP_SENT:
3776 case IB_CM_MRA_REP_RCVD:
3777 cm_reset_to_idle(cm_id_priv);
3778 cm_event.event = IB_CM_REP_ERROR;
3780 case IB_CM_DREQ_SENT:
3781 cm_enter_timewait(cm_id_priv);
3782 cm_event.event = IB_CM_DREQ_ERROR;
3784 case IB_CM_SIDR_REQ_SENT:
3785 cm_id_priv->id.state = IB_CM_IDLE;
3786 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3791 spin_unlock_irq(&cm_id_priv->lock);
3792 cm_event.param.send_status = wc_status;
3794 /* No other events can occur on the cm_id at this point. */
3795 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3798 ib_destroy_cm_id(&cm_id_priv->id);
3801 spin_unlock_irq(&cm_id_priv->lock);
3805 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3806 struct ib_mad_send_wc *mad_send_wc)
3808 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3809 struct cm_port *port;
3812 port = mad_agent->context;
3813 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3814 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3817 * If the send was in response to a received message (context[0] is not
3818 * set to a cm_id), and is not a REJ, then it is a send that was
3821 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3824 atomic_long_add(1 + msg->retries,
3825 &port->counter_group[CM_XMIT].counter[attr_index]);
3827 atomic_long_add(msg->retries,
3828 &port->counter_group[CM_XMIT_RETRIES].
3829 counter[attr_index]);
3831 switch (mad_send_wc->status) {
3833 case IB_WC_WR_FLUSH_ERR:
3837 if (msg->context[0] && msg->context[1])
3838 cm_process_send_error(msg, mad_send_wc->status);
3845 static void cm_work_handler(struct work_struct *_work)
3847 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3850 switch (work->cm_event.event) {
3851 case IB_CM_REQ_RECEIVED:
3852 ret = cm_req_handler(work);
3854 case IB_CM_MRA_RECEIVED:
3855 ret = cm_mra_handler(work);
3857 case IB_CM_REJ_RECEIVED:
3858 ret = cm_rej_handler(work);
3860 case IB_CM_REP_RECEIVED:
3861 ret = cm_rep_handler(work);
3863 case IB_CM_RTU_RECEIVED:
3864 ret = cm_rtu_handler(work);
3866 case IB_CM_USER_ESTABLISHED:
3867 ret = cm_establish_handler(work);
3869 case IB_CM_DREQ_RECEIVED:
3870 ret = cm_dreq_handler(work);
3872 case IB_CM_DREP_RECEIVED:
3873 ret = cm_drep_handler(work);
3875 case IB_CM_SIDR_REQ_RECEIVED:
3876 ret = cm_sidr_req_handler(work);
3878 case IB_CM_SIDR_REP_RECEIVED:
3879 ret = cm_sidr_rep_handler(work);
3881 case IB_CM_LAP_RECEIVED:
3882 ret = cm_lap_handler(work);
3884 case IB_CM_APR_RECEIVED:
3885 ret = cm_apr_handler(work);
3887 case IB_CM_TIMEWAIT_EXIT:
3888 ret = cm_timewait_handler(work);
3891 pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
3899 static int cm_establish(struct ib_cm_id *cm_id)
3901 struct cm_id_private *cm_id_priv;
3902 struct cm_work *work;
3903 unsigned long flags;
3905 struct cm_device *cm_dev;
3907 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3911 work = kmalloc(sizeof *work, GFP_ATOMIC);
3915 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3916 spin_lock_irqsave(&cm_id_priv->lock, flags);
3917 switch (cm_id->state)
3919 case IB_CM_REP_SENT:
3920 case IB_CM_MRA_REP_RCVD:
3921 cm_id->state = IB_CM_ESTABLISHED;
3923 case IB_CM_ESTABLISHED:
3927 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
3928 be32_to_cpu(cm_id->local_id), cm_id->state);
3932 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3940 * The CM worker thread may try to destroy the cm_id before it
3941 * can execute this work item. To prevent potential deadlock,
3942 * we need to find the cm_id once we're in the context of the
3943 * worker thread, rather than holding a reference on it.
3945 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3946 work->local_id = cm_id->local_id;
3947 work->remote_id = cm_id->remote_id;
3948 work->mad_recv_wc = NULL;
3949 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3951 /* Check if the device started its remove_one */
3952 spin_lock_irqsave(&cm.lock, flags);
3953 if (!cm_dev->going_down) {
3954 queue_delayed_work(cm.wq, &work->work, 0);
3959 spin_unlock_irqrestore(&cm.lock, flags);
3965 static int cm_migrate(struct ib_cm_id *cm_id)
3967 struct cm_id_private *cm_id_priv;
3968 struct cm_av tmp_av;
3969 unsigned long flags;
3970 int tmp_send_port_not_ready;
3973 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3974 spin_lock_irqsave(&cm_id_priv->lock, flags);
3975 if (cm_id->state == IB_CM_ESTABLISHED &&
3976 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3977 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3978 cm_id->lap_state = IB_CM_LAP_IDLE;
3979 /* Swap address vector */
3980 tmp_av = cm_id_priv->av;
3981 cm_id_priv->av = cm_id_priv->alt_av;
3982 cm_id_priv->alt_av = tmp_av;
3983 /* Swap port send ready state */
3984 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3985 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3986 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3989 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3994 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3999 case IB_EVENT_COMM_EST:
4000 ret = cm_establish(cm_id);
4002 case IB_EVENT_PATH_MIG:
4003 ret = cm_migrate(cm_id);
4010 EXPORT_SYMBOL(ib_cm_notify);
4012 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
4013 struct ib_mad_send_buf *send_buf,
4014 struct ib_mad_recv_wc *mad_recv_wc)
4016 struct cm_port *port = mad_agent->context;
4017 struct cm_work *work;
4018 enum ib_cm_event_type event;
4019 bool alt_path = false;
4024 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4025 case CM_REQ_ATTR_ID:
4026 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4027 mad_recv_wc->recv_buf.mad);
4028 paths = 1 + (alt_path != 0);
4029 event = IB_CM_REQ_RECEIVED;
4031 case CM_MRA_ATTR_ID:
4032 event = IB_CM_MRA_RECEIVED;
4034 case CM_REJ_ATTR_ID:
4035 event = IB_CM_REJ_RECEIVED;
4037 case CM_REP_ATTR_ID:
4038 event = IB_CM_REP_RECEIVED;
4040 case CM_RTU_ATTR_ID:
4041 event = IB_CM_RTU_RECEIVED;
4043 case CM_DREQ_ATTR_ID:
4044 event = IB_CM_DREQ_RECEIVED;
4046 case CM_DREP_ATTR_ID:
4047 event = IB_CM_DREP_RECEIVED;
4049 case CM_SIDR_REQ_ATTR_ID:
4050 event = IB_CM_SIDR_REQ_RECEIVED;
4052 case CM_SIDR_REP_ATTR_ID:
4053 event = IB_CM_SIDR_REP_RECEIVED;
4055 case CM_LAP_ATTR_ID:
4057 event = IB_CM_LAP_RECEIVED;
4059 case CM_APR_ATTR_ID:
4060 event = IB_CM_APR_RECEIVED;
4063 ib_free_recv_mad(mad_recv_wc);
4067 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4068 atomic_long_inc(&port->counter_group[CM_RECV].
4069 counter[attr_id - CM_ATTR_ID_OFFSET]);
4071 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4073 ib_free_recv_mad(mad_recv_wc);
4077 INIT_DELAYED_WORK(&work->work, cm_work_handler);
4078 work->cm_event.event = event;
4079 work->mad_recv_wc = mad_recv_wc;
4082 /* Check if the device started its remove_one */
4083 spin_lock_irq(&cm.lock);
4084 if (!port->cm_dev->going_down)
4085 queue_delayed_work(cm.wq, &work->work, 0);
4088 spin_unlock_irq(&cm.lock);
4092 ib_free_recv_mad(mad_recv_wc);
4096 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4097 struct ib_qp_attr *qp_attr,
4100 unsigned long flags;
4103 spin_lock_irqsave(&cm_id_priv->lock, flags);
4104 switch (cm_id_priv->id.state) {
4105 case IB_CM_REQ_SENT:
4106 case IB_CM_MRA_REQ_RCVD:
4107 case IB_CM_REQ_RCVD:
4108 case IB_CM_MRA_REQ_SENT:
4109 case IB_CM_REP_RCVD:
4110 case IB_CM_MRA_REP_SENT:
4111 case IB_CM_REP_SENT:
4112 case IB_CM_MRA_REP_RCVD:
4113 case IB_CM_ESTABLISHED:
4114 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4115 IB_QP_PKEY_INDEX | IB_QP_PORT;
4116 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4117 if (cm_id_priv->responder_resources)
4118 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4119 IB_ACCESS_REMOTE_ATOMIC;
4120 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4121 qp_attr->port_num = cm_id_priv->av.port->port_num;
4125 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4126 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4127 cm_id_priv->id.state);
4131 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4135 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4136 struct ib_qp_attr *qp_attr,
4139 unsigned long flags;
4142 spin_lock_irqsave(&cm_id_priv->lock, flags);
4143 switch (cm_id_priv->id.state) {
4144 case IB_CM_REQ_RCVD:
4145 case IB_CM_MRA_REQ_SENT:
4146 case IB_CM_REP_RCVD:
4147 case IB_CM_MRA_REP_SENT:
4148 case IB_CM_REP_SENT:
4149 case IB_CM_MRA_REP_RCVD:
4150 case IB_CM_ESTABLISHED:
4151 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4152 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4153 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4154 qp_attr->path_mtu = cm_id_priv->path_mtu;
4155 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4156 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4157 if (cm_id_priv->qp_type == IB_QPT_RC ||
4158 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4159 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4160 IB_QP_MIN_RNR_TIMER;
4161 qp_attr->max_dest_rd_atomic =
4162 cm_id_priv->responder_resources;
4163 qp_attr->min_rnr_timer = 0;
4165 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4166 *qp_attr_mask |= IB_QP_ALT_PATH;
4167 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4168 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4169 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4170 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4175 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4176 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4177 cm_id_priv->id.state);
4181 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4185 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4186 struct ib_qp_attr *qp_attr,
4189 unsigned long flags;
4192 spin_lock_irqsave(&cm_id_priv->lock, flags);
4193 switch (cm_id_priv->id.state) {
4194 /* Allow transition to RTS before sending REP */
4195 case IB_CM_REQ_RCVD:
4196 case IB_CM_MRA_REQ_SENT:
4198 case IB_CM_REP_RCVD:
4199 case IB_CM_MRA_REP_SENT:
4200 case IB_CM_REP_SENT:
4201 case IB_CM_MRA_REP_RCVD:
4202 case IB_CM_ESTABLISHED:
4203 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4204 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4205 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4206 switch (cm_id_priv->qp_type) {
4208 case IB_QPT_XRC_INI:
4209 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4210 IB_QP_MAX_QP_RD_ATOMIC;
4211 qp_attr->retry_cnt = cm_id_priv->retry_count;
4212 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4213 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4215 case IB_QPT_XRC_TGT:
4216 *qp_attr_mask |= IB_QP_TIMEOUT;
4217 qp_attr->timeout = cm_id_priv->av.timeout;
4222 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4223 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4224 qp_attr->path_mig_state = IB_MIG_REARM;
4227 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4228 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4229 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4230 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4231 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4232 qp_attr->path_mig_state = IB_MIG_REARM;
4237 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4238 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4239 cm_id_priv->id.state);
4243 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4247 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4248 struct ib_qp_attr *qp_attr,
4251 struct cm_id_private *cm_id_priv;
4254 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4255 switch (qp_attr->qp_state) {
4257 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4260 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4263 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4271 EXPORT_SYMBOL(ib_cm_init_qp_attr);
4273 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4276 struct cm_counter_group *group;
4277 struct cm_counter_attribute *cm_attr;
4279 group = container_of(obj, struct cm_counter_group, obj);
4280 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4282 return sprintf(buf, "%ld\n",
4283 atomic_long_read(&group->counter[cm_attr->index]));
4286 static const struct sysfs_ops cm_counter_ops = {
4287 .show = cm_show_counter
4290 static struct kobj_type cm_counter_obj_type = {
4291 .sysfs_ops = &cm_counter_ops,
4292 .default_attrs = cm_counter_default_attrs
4295 static char *cm_devnode(struct device *dev, umode_t *mode)
4299 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
4302 struct class cm_class = {
4303 .owner = THIS_MODULE,
4304 .name = "infiniband_cm",
4305 .devnode = cm_devnode,
4307 EXPORT_SYMBOL(cm_class);
4309 static int cm_create_port_fs(struct cm_port *port)
4313 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4314 ret = ib_port_register_module_stat(port->cm_dev->ib_device,
4316 &port->counter_group[i].obj,
4317 &cm_counter_obj_type,
4318 counter_group_names[i]);
4327 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4332 static void cm_remove_port_fs(struct cm_port *port)
4336 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4337 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4341 static int cm_add_one(struct ib_device *ib_device)
4343 struct cm_device *cm_dev;
4344 struct cm_port *port;
4345 struct ib_mad_reg_req reg_req = {
4346 .mgmt_class = IB_MGMT_CLASS_CM,
4347 .mgmt_class_version = IB_CM_CLASS_VERSION,
4349 struct ib_port_modify port_modify = {
4350 .set_port_cap_mask = IB_PORT_CM_SUP
4352 unsigned long flags;
4357 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4362 cm_dev->ib_device = ib_device;
4363 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4364 cm_dev->going_down = 0;
4366 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4367 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4368 if (!rdma_cap_ib_cm(ib_device, i))
4371 port = kzalloc(sizeof *port, GFP_KERNEL);
4377 cm_dev->port[i-1] = port;
4378 port->cm_dev = cm_dev;
4381 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4382 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4384 ret = cm_create_port_fs(port);
4388 port->mad_agent = ib_register_mad_agent(ib_device, i,
4396 if (IS_ERR(port->mad_agent)) {
4397 ret = PTR_ERR(port->mad_agent);
4401 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4413 ib_set_client_data(ib_device, &cm_client, cm_dev);
4415 write_lock_irqsave(&cm.device_lock, flags);
4416 list_add_tail(&cm_dev->list, &cm.device_list);
4417 write_unlock_irqrestore(&cm.device_lock, flags);
4421 ib_unregister_mad_agent(port->mad_agent);
4423 cm_remove_port_fs(port);
4425 port_modify.set_port_cap_mask = 0;
4426 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4429 if (!rdma_cap_ib_cm(ib_device, i))
4432 port = cm_dev->port[i-1];
4433 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4434 ib_unregister_mad_agent(port->mad_agent);
4435 cm_remove_port_fs(port);
4443 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4445 struct cm_device *cm_dev = client_data;
4446 struct cm_port *port;
4447 struct cm_id_private *cm_id_priv;
4448 struct ib_mad_agent *cur_mad_agent;
4449 struct ib_port_modify port_modify = {
4450 .clr_port_cap_mask = IB_PORT_CM_SUP
4452 unsigned long flags;
4455 write_lock_irqsave(&cm.device_lock, flags);
4456 list_del(&cm_dev->list);
4457 write_unlock_irqrestore(&cm.device_lock, flags);
4459 spin_lock_irq(&cm.lock);
4460 cm_dev->going_down = 1;
4461 spin_unlock_irq(&cm.lock);
4463 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4464 if (!rdma_cap_ib_cm(ib_device, i))
4467 port = cm_dev->port[i-1];
4468 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4469 /* Mark all the cm_id's as not valid */
4470 spin_lock_irq(&cm.lock);
4471 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4472 cm_id_priv->altr_send_port_not_ready = 1;
4473 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4474 cm_id_priv->prim_send_port_not_ready = 1;
4475 spin_unlock_irq(&cm.lock);
4477 * We flush the queue here after the going_down set, this
4478 * verify that no new works will be queued in the recv handler,
4479 * after that we can call the unregister_mad_agent
4481 flush_workqueue(cm.wq);
4482 spin_lock_irq(&cm.state_lock);
4483 cur_mad_agent = port->mad_agent;
4484 port->mad_agent = NULL;
4485 spin_unlock_irq(&cm.state_lock);
4486 ib_unregister_mad_agent(cur_mad_agent);
4487 cm_remove_port_fs(port);
4494 static int __init ib_cm_init(void)
4498 INIT_LIST_HEAD(&cm.device_list);
4499 rwlock_init(&cm.device_lock);
4500 spin_lock_init(&cm.lock);
4501 spin_lock_init(&cm.state_lock);
4502 cm.listen_service_table = RB_ROOT;
4503 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4504 cm.remote_id_table = RB_ROOT;
4505 cm.remote_qp_table = RB_ROOT;
4506 cm.remote_sidr_table = RB_ROOT;
4507 xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
4508 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4509 INIT_LIST_HEAD(&cm.timewait_list);
4511 ret = class_register(&cm_class);
4517 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4523 ret = ib_register_client(&cm_client);
4529 destroy_workqueue(cm.wq);
4531 class_unregister(&cm_class);
4536 static void __exit ib_cm_cleanup(void)
4538 struct cm_timewait_info *timewait_info, *tmp;
4540 spin_lock_irq(&cm.lock);
4541 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4542 cancel_delayed_work(&timewait_info->work.work);
4543 spin_unlock_irq(&cm.lock);
4545 ib_unregister_client(&cm_client);
4546 destroy_workqueue(cm.wq);
4548 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4549 list_del(&timewait_info->list);
4550 kfree(timewait_info);
4553 class_unregister(&cm_class);
4554 WARN_ON(!xa_empty(&cm.local_id_table));
4557 module_init(ib_cm_init);
4558 module_exit(ib_cm_cleanup);