1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
4 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
10 #include <linux/completion.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/err.h>
15 #include <linux/idr.h>
16 #include <linux/interrupt.h>
17 #include <linux/random.h>
18 #include <linux/rbtree.h>
19 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/sysfs.h>
22 #include <linux/workqueue.h>
23 #include <linux/kdev_t.h>
24 #include <linux/etherdevice.h>
26 #include <rdma/ib_cache.h>
27 #include <rdma/ib_cm.h>
29 #include "core_priv.h"
31 MODULE_AUTHOR("Sean Hefty");
32 MODULE_DESCRIPTION("InfiniBand CM");
33 MODULE_LICENSE("Dual BSD/GPL");
35 static const char * const ibcm_rej_reason_strs[] = {
36 [IB_CM_REJ_NO_QP] = "no QP",
37 [IB_CM_REJ_NO_EEC] = "no EEC",
38 [IB_CM_REJ_NO_RESOURCES] = "no resources",
39 [IB_CM_REJ_TIMEOUT] = "timeout",
40 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
41 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
42 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
43 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
44 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
45 [IB_CM_REJ_STALE_CONN] = "stale conn",
46 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
47 [IB_CM_REJ_INVALID_GID] = "invalid GID",
48 [IB_CM_REJ_INVALID_LID] = "invalid LID",
49 [IB_CM_REJ_INVALID_SL] = "invalid SL",
50 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
51 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
52 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
53 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
54 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
55 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
56 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
57 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
58 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
59 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
60 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
61 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
62 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
63 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
64 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
65 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
66 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
67 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
68 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
69 [IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED] =
70 "vendor option is not supported",
73 const char *__attribute_const__ ibcm_reject_msg(int reason)
75 size_t index = reason;
77 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
78 ibcm_rej_reason_strs[index])
79 return ibcm_rej_reason_strs[index];
81 return "unrecognized reason";
83 EXPORT_SYMBOL(ibcm_reject_msg);
87 static int cm_add_one(struct ib_device *device);
88 static void cm_remove_one(struct ib_device *device, void *client_data);
89 static void cm_process_work(struct cm_id_private *cm_id_priv,
90 struct cm_work *work);
91 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
92 struct ib_cm_sidr_rep_param *param);
93 static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
94 const void *private_data, u8 private_data_len);
95 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
96 void *private_data, u8 private_data_len);
97 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
98 enum ib_cm_rej_reason reason, void *ari,
99 u8 ari_length, const void *private_data,
100 u8 private_data_len);
102 static struct ib_client cm_client = {
105 .remove = cm_remove_one
108 static struct ib_cm {
110 struct list_head device_list;
111 rwlock_t device_lock;
112 struct rb_root listen_service_table;
113 u64 listen_service_id;
114 /* struct rb_root peer_service_table; todo: fix peer to peer */
115 struct rb_root remote_qp_table;
116 struct rb_root remote_id_table;
117 struct rb_root remote_sidr_table;
118 struct xarray local_id_table;
120 __be32 random_id_operand;
121 struct list_head timewait_list;
122 struct workqueue_struct *wq;
123 /* Sync on cm change port state */
124 spinlock_t state_lock;
127 /* Counter indexes ordered by attribute ID */
141 CM_ATTR_ID_OFFSET = 0x0010,
152 static char const counter_group_names[CM_COUNTER_GROUPS]
153 [sizeof("cm_rx_duplicates")] = {
154 "cm_tx_msgs", "cm_tx_retries",
155 "cm_rx_msgs", "cm_rx_duplicates"
158 struct cm_counter_group {
160 atomic_long_t counter[CM_ATTR_COUNT];
163 struct cm_counter_attribute {
164 struct attribute attr;
168 #define CM_COUNTER_ATTR(_name, _index) \
169 struct cm_counter_attribute cm_##_name##_counter_attr = { \
170 .attr = { .name = __stringify(_name), .mode = 0444 }, \
174 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
175 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
176 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
177 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
178 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
179 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
180 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
181 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
182 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
183 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
184 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
186 static struct attribute *cm_counter_default_attrs[] = {
187 &cm_req_counter_attr.attr,
188 &cm_mra_counter_attr.attr,
189 &cm_rej_counter_attr.attr,
190 &cm_rep_counter_attr.attr,
191 &cm_rtu_counter_attr.attr,
192 &cm_dreq_counter_attr.attr,
193 &cm_drep_counter_attr.attr,
194 &cm_sidr_req_counter_attr.attr,
195 &cm_sidr_rep_counter_attr.attr,
196 &cm_lap_counter_attr.attr,
197 &cm_apr_counter_attr.attr,
202 struct cm_device *cm_dev;
203 struct ib_mad_agent *mad_agent;
204 struct kobject port_obj;
206 struct list_head cm_priv_prim_list;
207 struct list_head cm_priv_altr_list;
208 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
212 struct list_head list;
213 struct ib_device *ib_device;
216 struct cm_port *port[];
220 struct cm_port *port;
222 struct rdma_ah_attr ah_attr;
228 struct delayed_work work;
229 struct list_head list;
230 struct cm_port *port;
231 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
232 __be32 local_id; /* Established / timewait */
234 struct ib_cm_event cm_event;
235 struct sa_path_rec path[];
238 struct cm_timewait_info {
240 struct list_head list;
241 struct rb_node remote_qp_node;
242 struct rb_node remote_id_node;
243 __be64 remote_ca_guid;
245 u8 inserted_remote_qp;
246 u8 inserted_remote_id;
249 struct cm_id_private {
252 struct rb_node service_node;
253 struct rb_node sidr_id_node;
254 spinlock_t lock; /* Do not acquire inside cm.lock */
255 struct completion comp;
257 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
258 * Protected by the cm.lock spinlock. */
259 int listen_sharecount;
262 struct ib_mad_send_buf *msg;
263 struct cm_timewait_info *timewait_info;
264 /* todo: use alternate port on send failure */
272 enum ib_qp_type qp_type;
276 enum ib_mtu path_mtu;
280 u8 responder_resources;
287 struct list_head prim_list;
288 struct list_head altr_list;
289 /* Indicates that the send port mad is registered and av is set */
290 int prim_send_port_not_ready;
291 int altr_send_port_not_ready;
293 struct list_head work_list;
296 struct rdma_ucm_ece ece;
299 static void cm_work_handler(struct work_struct *work);
301 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
303 if (refcount_dec_and_test(&cm_id_priv->refcount))
304 complete(&cm_id_priv->comp);
307 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
308 struct ib_mad_send_buf **msg)
310 struct ib_mad_agent *mad_agent;
311 struct ib_mad_send_buf *m;
314 unsigned long flags, flags2;
317 /* don't let the port to be released till the agent is down */
318 spin_lock_irqsave(&cm.state_lock, flags2);
319 spin_lock_irqsave(&cm.lock, flags);
320 if (!cm_id_priv->prim_send_port_not_ready)
321 av = &cm_id_priv->av;
322 else if (!cm_id_priv->altr_send_port_not_ready &&
323 (cm_id_priv->alt_av.port))
324 av = &cm_id_priv->alt_av;
326 pr_info("%s: not valid CM id\n", __func__);
328 spin_unlock_irqrestore(&cm.lock, flags);
331 spin_unlock_irqrestore(&cm.lock, flags);
332 /* Make sure the port haven't released the mad yet */
333 mad_agent = cm_id_priv->av.port->mad_agent;
335 pr_info("%s: not a valid MAD agent\n", __func__);
339 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
345 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
347 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
349 IB_MGMT_BASE_VERSION);
351 rdma_destroy_ah(ah, 0);
356 /* Timeout set by caller if response is expected. */
358 m->retries = cm_id_priv->max_cm_retries;
360 refcount_inc(&cm_id_priv->refcount);
361 m->context[0] = cm_id_priv;
365 spin_unlock_irqrestore(&cm.state_lock, flags2);
369 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
370 struct ib_mad_recv_wc *mad_recv_wc)
372 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
373 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
375 IB_MGMT_BASE_VERSION);
378 static int cm_create_response_msg_ah(struct cm_port *port,
379 struct ib_mad_recv_wc *mad_recv_wc,
380 struct ib_mad_send_buf *msg)
384 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
385 mad_recv_wc->recv_buf.grh, port->port_num);
393 static void cm_free_msg(struct ib_mad_send_buf *msg)
396 rdma_destroy_ah(msg->ah, 0);
398 cm_deref_id(msg->context[0]);
399 ib_free_send_mad(msg);
402 static int cm_alloc_response_msg(struct cm_port *port,
403 struct ib_mad_recv_wc *mad_recv_wc,
404 struct ib_mad_send_buf **msg)
406 struct ib_mad_send_buf *m;
409 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
413 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
423 static void * cm_copy_private_data(const void *private_data,
428 if (!private_data || !private_data_len)
431 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
433 return ERR_PTR(-ENOMEM);
438 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
439 void *private_data, u8 private_data_len)
441 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
442 kfree(cm_id_priv->private_data);
444 cm_id_priv->private_data = private_data;
445 cm_id_priv->private_data_len = private_data_len;
448 static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
449 struct ib_grh *grh, struct cm_av *av)
451 struct rdma_ah_attr new_ah_attr;
455 av->pkey_index = wc->pkey_index;
458 * av->ah_attr might be initialized based on past wc during incoming
459 * connect request or while sending out connect request. So initialize
460 * a new ah_attr on stack. If initialization fails, old ah_attr is
461 * used for sending any responses. If initialization is successful,
462 * than new ah_attr is used by overwriting old one.
464 ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
470 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
474 static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
475 struct ib_grh *grh, struct cm_av *av)
478 av->pkey_index = wc->pkey_index;
479 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
484 static void add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
485 struct cm_av *av, struct cm_port *port)
489 spin_lock_irqsave(&cm.lock, flags);
490 if (&cm_id_priv->av == av)
491 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
492 else if (&cm_id_priv->alt_av == av)
493 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
496 spin_unlock_irqrestore(&cm.lock, flags);
499 static struct cm_port *
500 get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
502 struct cm_device *cm_dev;
503 struct cm_port *port = NULL;
507 read_lock_irqsave(&cm.device_lock, flags);
508 list_for_each_entry(cm_dev, &cm.device_list, list) {
509 if (cm_dev->ib_device == attr->device) {
510 port = cm_dev->port[attr->port_num - 1];
514 read_unlock_irqrestore(&cm.device_lock, flags);
516 /* SGID attribute can be NULL in following
518 * (a) Alternative path
519 * (b) IB link layer without GRH
520 * (c) LAP send messages
522 read_lock_irqsave(&cm.device_lock, flags);
523 list_for_each_entry(cm_dev, &cm.device_list, list) {
524 attr = rdma_find_gid(cm_dev->ib_device,
526 sa_conv_pathrec_to_gid_type(path),
529 port = cm_dev->port[attr->port_num - 1];
533 read_unlock_irqrestore(&cm.device_lock, flags);
535 rdma_put_gid_attr(attr);
540 static int cm_init_av_by_path(struct sa_path_rec *path,
541 const struct ib_gid_attr *sgid_attr,
543 struct cm_id_private *cm_id_priv)
545 struct rdma_ah_attr new_ah_attr;
546 struct cm_device *cm_dev;
547 struct cm_port *port;
550 port = get_cm_port_from_path(path, sgid_attr);
553 cm_dev = port->cm_dev;
555 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
556 be16_to_cpu(path->pkey), &av->pkey_index);
563 * av->ah_attr might be initialized based on wc or during
564 * request processing time which might have reference to sgid_attr.
565 * So initialize a new ah_attr on stack.
566 * If initialization fails, old ah_attr is used for sending any
567 * responses. If initialization is successful, than new ah_attr
568 * is used by overwriting the old one. So that right ah_attr
569 * can be used to return an error response.
571 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
572 &new_ah_attr, sgid_attr);
576 av->timeout = path->packet_life_time + 1;
577 add_cm_id_to_port_list(cm_id_priv, av, port);
578 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
582 static u32 cm_local_id(__be32 local_id)
584 return (__force u32) (local_id ^ cm.random_id_operand);
587 static struct cm_id_private *cm_acquire_id(__be32 local_id, __be32 remote_id)
589 struct cm_id_private *cm_id_priv;
592 cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
593 if (!cm_id_priv || cm_id_priv->id.remote_id != remote_id ||
594 !refcount_inc_not_zero(&cm_id_priv->refcount))
602 * Trivial helpers to strip endian annotation and compare; the
603 * endianness doesn't actually matter since we just need a stable
604 * order for the RB tree.
606 static int be32_lt(__be32 a, __be32 b)
608 return (__force u32) a < (__force u32) b;
611 static int be32_gt(__be32 a, __be32 b)
613 return (__force u32) a > (__force u32) b;
616 static int be64_lt(__be64 a, __be64 b)
618 return (__force u64) a < (__force u64) b;
621 static int be64_gt(__be64 a, __be64 b)
623 return (__force u64) a > (__force u64) b;
627 * Inserts a new cm_id_priv into the listen_service_table. Returns cm_id_priv
628 * if the new ID was inserted, NULL if it could not be inserted due to a
629 * collision, or the existing cm_id_priv ready for shared usage.
631 static struct cm_id_private *cm_insert_listen(struct cm_id_private *cm_id_priv,
632 ib_cm_handler shared_handler)
634 struct rb_node **link = &cm.listen_service_table.rb_node;
635 struct rb_node *parent = NULL;
636 struct cm_id_private *cur_cm_id_priv;
637 __be64 service_id = cm_id_priv->id.service_id;
638 __be64 service_mask = cm_id_priv->id.service_mask;
641 spin_lock_irqsave(&cm.lock, flags);
644 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
646 if ((cur_cm_id_priv->id.service_mask & service_id) ==
647 (service_mask & cur_cm_id_priv->id.service_id) &&
648 (cm_id_priv->id.device == cur_cm_id_priv->id.device)) {
650 * Sharing an ib_cm_id with different handlers is not
653 if (cur_cm_id_priv->id.cm_handler != shared_handler ||
654 cur_cm_id_priv->id.context ||
655 WARN_ON(!cur_cm_id_priv->id.cm_handler)) {
656 spin_unlock_irqrestore(&cm.lock, flags);
659 refcount_inc(&cur_cm_id_priv->refcount);
660 cur_cm_id_priv->listen_sharecount++;
661 spin_unlock_irqrestore(&cm.lock, flags);
662 return cur_cm_id_priv;
665 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
666 link = &(*link)->rb_left;
667 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
668 link = &(*link)->rb_right;
669 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
670 link = &(*link)->rb_left;
671 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
672 link = &(*link)->rb_right;
674 link = &(*link)->rb_right;
676 cm_id_priv->listen_sharecount++;
677 rb_link_node(&cm_id_priv->service_node, parent, link);
678 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
679 spin_unlock_irqrestore(&cm.lock, flags);
683 static struct cm_id_private * cm_find_listen(struct ib_device *device,
686 struct rb_node *node = cm.listen_service_table.rb_node;
687 struct cm_id_private *cm_id_priv;
690 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
691 if ((cm_id_priv->id.service_mask & service_id) ==
692 cm_id_priv->id.service_id &&
693 (cm_id_priv->id.device == device)) {
694 refcount_inc(&cm_id_priv->refcount);
697 if (device < cm_id_priv->id.device)
698 node = node->rb_left;
699 else if (device > cm_id_priv->id.device)
700 node = node->rb_right;
701 else if (be64_lt(service_id, cm_id_priv->id.service_id))
702 node = node->rb_left;
703 else if (be64_gt(service_id, cm_id_priv->id.service_id))
704 node = node->rb_right;
706 node = node->rb_right;
711 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
714 struct rb_node **link = &cm.remote_id_table.rb_node;
715 struct rb_node *parent = NULL;
716 struct cm_timewait_info *cur_timewait_info;
717 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
718 __be32 remote_id = timewait_info->work.remote_id;
722 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
724 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
725 link = &(*link)->rb_left;
726 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
727 link = &(*link)->rb_right;
728 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
729 link = &(*link)->rb_left;
730 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
731 link = &(*link)->rb_right;
733 return cur_timewait_info;
735 timewait_info->inserted_remote_id = 1;
736 rb_link_node(&timewait_info->remote_id_node, parent, link);
737 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
741 static struct cm_id_private *cm_find_remote_id(__be64 remote_ca_guid,
744 struct rb_node *node = cm.remote_id_table.rb_node;
745 struct cm_timewait_info *timewait_info;
746 struct cm_id_private *res = NULL;
748 spin_lock_irq(&cm.lock);
750 timewait_info = rb_entry(node, struct cm_timewait_info,
752 if (be32_lt(remote_id, timewait_info->work.remote_id))
753 node = node->rb_left;
754 else if (be32_gt(remote_id, timewait_info->work.remote_id))
755 node = node->rb_right;
756 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
757 node = node->rb_left;
758 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
759 node = node->rb_right;
761 res = cm_acquire_id(timewait_info->work.local_id,
762 timewait_info->work.remote_id);
766 spin_unlock_irq(&cm.lock);
770 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
773 struct rb_node **link = &cm.remote_qp_table.rb_node;
774 struct rb_node *parent = NULL;
775 struct cm_timewait_info *cur_timewait_info;
776 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
777 __be32 remote_qpn = timewait_info->remote_qpn;
781 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
783 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
784 link = &(*link)->rb_left;
785 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
786 link = &(*link)->rb_right;
787 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
788 link = &(*link)->rb_left;
789 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
790 link = &(*link)->rb_right;
792 return cur_timewait_info;
794 timewait_info->inserted_remote_qp = 1;
795 rb_link_node(&timewait_info->remote_qp_node, parent, link);
796 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
800 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
803 struct rb_node **link = &cm.remote_sidr_table.rb_node;
804 struct rb_node *parent = NULL;
805 struct cm_id_private *cur_cm_id_priv;
806 union ib_gid *port_gid = &cm_id_priv->av.dgid;
807 __be32 remote_id = cm_id_priv->id.remote_id;
811 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
813 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
814 link = &(*link)->rb_left;
815 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
816 link = &(*link)->rb_right;
819 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
822 link = &(*link)->rb_left;
824 link = &(*link)->rb_right;
826 return cur_cm_id_priv;
829 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
830 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
834 static struct cm_id_private *cm_alloc_id_priv(struct ib_device *device,
835 ib_cm_handler cm_handler,
838 struct cm_id_private *cm_id_priv;
842 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
844 return ERR_PTR(-ENOMEM);
846 cm_id_priv->id.state = IB_CM_IDLE;
847 cm_id_priv->id.device = device;
848 cm_id_priv->id.cm_handler = cm_handler;
849 cm_id_priv->id.context = context;
850 cm_id_priv->id.remote_cm_qpn = 1;
852 RB_CLEAR_NODE(&cm_id_priv->service_node);
853 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
854 spin_lock_init(&cm_id_priv->lock);
855 init_completion(&cm_id_priv->comp);
856 INIT_LIST_HEAD(&cm_id_priv->work_list);
857 INIT_LIST_HEAD(&cm_id_priv->prim_list);
858 INIT_LIST_HEAD(&cm_id_priv->altr_list);
859 atomic_set(&cm_id_priv->work_count, -1);
860 refcount_set(&cm_id_priv->refcount, 1);
862 ret = xa_alloc_cyclic_irq(&cm.local_id_table, &id, NULL, xa_limit_32b,
863 &cm.local_id_next, GFP_KERNEL);
866 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
876 * Make the ID visible to the MAD handlers and other threads that use the
879 static void cm_finalize_id(struct cm_id_private *cm_id_priv)
881 xa_store_irq(&cm.local_id_table, cm_local_id(cm_id_priv->id.local_id),
882 cm_id_priv, GFP_KERNEL);
885 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
886 ib_cm_handler cm_handler,
889 struct cm_id_private *cm_id_priv;
891 cm_id_priv = cm_alloc_id_priv(device, cm_handler, context);
892 if (IS_ERR(cm_id_priv))
893 return ERR_CAST(cm_id_priv);
895 cm_finalize_id(cm_id_priv);
896 return &cm_id_priv->id;
898 EXPORT_SYMBOL(ib_create_cm_id);
900 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
902 struct cm_work *work;
904 if (list_empty(&cm_id_priv->work_list))
907 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
908 list_del(&work->list);
912 static void cm_free_work(struct cm_work *work)
914 if (work->mad_recv_wc)
915 ib_free_recv_mad(work->mad_recv_wc);
919 static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
920 struct cm_work *work)
921 __releases(&cm_id_priv->lock)
926 * To deliver the event to the user callback we have the drop the
927 * spinlock, however, we need to ensure that the user callback is single
928 * threaded and receives events in the temporal order. If there are
929 * already events being processed then thread new events onto a list,
930 * the thread currently processing will pick them up.
932 immediate = atomic_inc_and_test(&cm_id_priv->work_count);
934 list_add_tail(&work->list, &cm_id_priv->work_list);
936 * This routine always consumes incoming reference. Once queued
937 * to the work_list then a reference is held by the thread
938 * currently running cm_process_work() and this reference is not
941 cm_deref_id(cm_id_priv);
943 spin_unlock_irq(&cm_id_priv->lock);
946 cm_process_work(cm_id_priv, work);
949 static inline int cm_convert_to_ms(int iba_time)
951 /* approximate conversion to ms from 4.096us x 2^iba_time */
952 return 1 << max(iba_time - 8, 0);
956 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
957 * Because of how ack_timeout is stored, adding one doubles the timeout.
958 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
959 * increment it (round up) only if the other is within 50%.
961 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
963 int ack_timeout = packet_life_time + 1;
965 if (ack_timeout >= ca_ack_delay)
966 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
968 ack_timeout = ca_ack_delay +
969 (ack_timeout >= (ca_ack_delay - 1));
971 return min(31, ack_timeout);
974 static void cm_remove_remote(struct cm_id_private *cm_id_priv)
976 struct cm_timewait_info *timewait_info = cm_id_priv->timewait_info;
978 if (timewait_info->inserted_remote_id) {
979 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
980 timewait_info->inserted_remote_id = 0;
983 if (timewait_info->inserted_remote_qp) {
984 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
985 timewait_info->inserted_remote_qp = 0;
989 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
991 struct cm_timewait_info *timewait_info;
993 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
995 return ERR_PTR(-ENOMEM);
997 timewait_info->work.local_id = local_id;
998 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
999 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
1000 return timewait_info;
1003 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
1006 unsigned long flags;
1007 struct cm_device *cm_dev;
1009 lockdep_assert_held(&cm_id_priv->lock);
1011 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
1015 spin_lock_irqsave(&cm.lock, flags);
1016 cm_remove_remote(cm_id_priv);
1017 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
1018 spin_unlock_irqrestore(&cm.lock, flags);
1021 * The cm_id could be destroyed by the user before we exit timewait.
1022 * To protect against this, we search for the cm_id after exiting
1023 * timewait before notifying the user that we've exited timewait.
1025 cm_id_priv->id.state = IB_CM_TIMEWAIT;
1026 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
1028 /* Check if the device started its remove_one */
1029 spin_lock_irqsave(&cm.lock, flags);
1030 if (!cm_dev->going_down)
1031 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
1032 msecs_to_jiffies(wait_time));
1033 spin_unlock_irqrestore(&cm.lock, flags);
1036 * The timewait_info is converted into a work and gets freed during
1037 * cm_free_work() in cm_timewait_handler().
1039 BUILD_BUG_ON(offsetof(struct cm_timewait_info, work) != 0);
1040 cm_id_priv->timewait_info = NULL;
1043 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1045 unsigned long flags;
1047 lockdep_assert_held(&cm_id_priv->lock);
1049 cm_id_priv->id.state = IB_CM_IDLE;
1050 if (cm_id_priv->timewait_info) {
1051 spin_lock_irqsave(&cm.lock, flags);
1052 cm_remove_remote(cm_id_priv);
1053 spin_unlock_irqrestore(&cm.lock, flags);
1054 kfree(cm_id_priv->timewait_info);
1055 cm_id_priv->timewait_info = NULL;
1059 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1061 struct cm_id_private *cm_id_priv;
1062 struct cm_work *work;
1064 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1065 spin_lock_irq(&cm_id_priv->lock);
1067 switch (cm_id->state) {
1069 spin_lock(&cm.lock);
1070 if (--cm_id_priv->listen_sharecount > 0) {
1071 /* The id is still shared. */
1072 WARN_ON(refcount_read(&cm_id_priv->refcount) == 1);
1073 spin_unlock(&cm.lock);
1074 spin_unlock_irq(&cm_id_priv->lock);
1075 cm_deref_id(cm_id_priv);
1078 cm_id->state = IB_CM_IDLE;
1079 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1080 RB_CLEAR_NODE(&cm_id_priv->service_node);
1081 spin_unlock(&cm.lock);
1083 case IB_CM_SIDR_REQ_SENT:
1084 cm_id->state = IB_CM_IDLE;
1085 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1087 case IB_CM_SIDR_REQ_RCVD:
1088 cm_send_sidr_rep_locked(cm_id_priv,
1089 &(struct ib_cm_sidr_rep_param){
1090 .status = IB_SIDR_REJECT });
1091 /* cm_send_sidr_rep_locked will not move to IDLE if it fails */
1092 cm_id->state = IB_CM_IDLE;
1094 case IB_CM_REQ_SENT:
1095 case IB_CM_MRA_REQ_RCVD:
1096 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1097 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT,
1098 &cm_id_priv->id.device->node_guid,
1099 sizeof(cm_id_priv->id.device->node_guid),
1102 case IB_CM_REQ_RCVD:
1103 if (err == -ENOMEM) {
1104 /* Do not reject to allow future retries. */
1105 cm_reset_to_idle(cm_id_priv);
1107 cm_send_rej_locked(cm_id_priv,
1108 IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
1112 case IB_CM_REP_SENT:
1113 case IB_CM_MRA_REP_RCVD:
1114 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1115 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1118 case IB_CM_MRA_REQ_SENT:
1119 case IB_CM_REP_RCVD:
1120 case IB_CM_MRA_REP_SENT:
1121 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL,
1124 case IB_CM_ESTABLISHED:
1125 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
1126 cm_id->state = IB_CM_IDLE;
1129 cm_send_dreq_locked(cm_id_priv, NULL, 0);
1131 case IB_CM_DREQ_SENT:
1132 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1133 cm_enter_timewait(cm_id_priv);
1135 case IB_CM_DREQ_RCVD:
1136 cm_send_drep_locked(cm_id_priv, NULL, 0);
1137 WARN_ON(cm_id->state != IB_CM_TIMEWAIT);
1139 case IB_CM_TIMEWAIT:
1141 * The cm_acquire_id in cm_timewait_handler will stop working
1142 * once we do xa_erase below, so just move to idle here for
1145 cm_id->state = IB_CM_IDLE;
1150 WARN_ON(cm_id->state != IB_CM_IDLE);
1152 spin_lock(&cm.lock);
1153 /* Required for cleanup paths related cm_req_handler() */
1154 if (cm_id_priv->timewait_info) {
1155 cm_remove_remote(cm_id_priv);
1156 kfree(cm_id_priv->timewait_info);
1157 cm_id_priv->timewait_info = NULL;
1159 if (!list_empty(&cm_id_priv->altr_list) &&
1160 (!cm_id_priv->altr_send_port_not_ready))
1161 list_del(&cm_id_priv->altr_list);
1162 if (!list_empty(&cm_id_priv->prim_list) &&
1163 (!cm_id_priv->prim_send_port_not_ready))
1164 list_del(&cm_id_priv->prim_list);
1165 WARN_ON(cm_id_priv->listen_sharecount);
1166 WARN_ON(!RB_EMPTY_NODE(&cm_id_priv->service_node));
1167 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1168 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
1169 spin_unlock(&cm.lock);
1170 spin_unlock_irq(&cm_id_priv->lock);
1172 xa_erase_irq(&cm.local_id_table, cm_local_id(cm_id->local_id));
1173 cm_deref_id(cm_id_priv);
1174 wait_for_completion(&cm_id_priv->comp);
1175 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1178 rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
1179 rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
1180 kfree(cm_id_priv->private_data);
1181 kfree_rcu(cm_id_priv, rcu);
1184 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1186 cm_destroy_id(cm_id, 0);
1188 EXPORT_SYMBOL(ib_destroy_cm_id);
1190 static int cm_init_listen(struct cm_id_private *cm_id_priv, __be64 service_id,
1191 __be64 service_mask)
1193 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1194 service_id &= service_mask;
1195 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1196 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1199 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1200 cm_id_priv->id.service_id = cpu_to_be64(cm.listen_service_id++);
1201 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1203 cm_id_priv->id.service_id = service_id;
1204 cm_id_priv->id.service_mask = service_mask;
1210 * ib_cm_listen - Initiates listening on the specified service ID for
1211 * connection and service ID resolution requests.
1212 * @cm_id: Connection identifier associated with the listen request.
1213 * @service_id: Service identifier matched against incoming connection
1214 * and service ID resolution requests. The service ID should be specified
1215 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1216 * assign a service ID to the caller.
1217 * @service_mask: Mask applied to service ID used to listen across a
1218 * range of service IDs. If set to 0, the service ID is matched
1219 * exactly. This parameter is ignored if %service_id is set to
1220 * IB_CM_ASSIGN_SERVICE_ID.
1222 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1224 struct cm_id_private *cm_id_priv =
1225 container_of(cm_id, struct cm_id_private, id);
1226 unsigned long flags;
1229 spin_lock_irqsave(&cm_id_priv->lock, flags);
1230 if (cm_id_priv->id.state != IB_CM_IDLE) {
1235 ret = cm_init_listen(cm_id_priv, service_id, service_mask);
1239 if (!cm_insert_listen(cm_id_priv, NULL)) {
1244 cm_id_priv->id.state = IB_CM_LISTEN;
1248 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1251 EXPORT_SYMBOL(ib_cm_listen);
1254 * Create a new listening ib_cm_id and listen on the given service ID.
1256 * If there's an existing ID listening on that same device and service ID,
1259 * @device: Device associated with the cm_id. All related communication will
1260 * be associated with the specified device.
1261 * @cm_handler: Callback invoked to notify the user of CM events.
1262 * @service_id: Service identifier matched against incoming connection
1263 * and service ID resolution requests. The service ID should be specified
1264 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1265 * assign a service ID to the caller.
1267 * Callers should call ib_destroy_cm_id when done with the listener ID.
1269 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1270 ib_cm_handler cm_handler,
1273 struct cm_id_private *listen_id_priv;
1274 struct cm_id_private *cm_id_priv;
1277 /* Create an ID in advance, since the creation may sleep */
1278 cm_id_priv = cm_alloc_id_priv(device, cm_handler, NULL);
1279 if (IS_ERR(cm_id_priv))
1280 return ERR_CAST(cm_id_priv);
1282 err = cm_init_listen(cm_id_priv, service_id, 0);
1284 return ERR_PTR(err);
1286 spin_lock_irq(&cm_id_priv->lock);
1287 listen_id_priv = cm_insert_listen(cm_id_priv, cm_handler);
1288 if (listen_id_priv != cm_id_priv) {
1289 spin_unlock_irq(&cm_id_priv->lock);
1290 ib_destroy_cm_id(&cm_id_priv->id);
1291 if (!listen_id_priv)
1292 return ERR_PTR(-EINVAL);
1293 return &listen_id_priv->id;
1295 cm_id_priv->id.state = IB_CM_LISTEN;
1296 spin_unlock_irq(&cm_id_priv->lock);
1299 * A listen ID does not need to be in the xarray since it does not
1300 * receive mads, is not placed in the remote_id or remote_qpn rbtree,
1301 * and does not enter timewait.
1304 return &cm_id_priv->id;
1306 EXPORT_SYMBOL(ib_cm_insert_listen);
1308 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1310 u64 hi_tid, low_tid;
1312 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1313 low_tid = (u64)cm_id_priv->id.local_id;
1314 return cpu_to_be64(hi_tid | low_tid);
1317 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1318 __be16 attr_id, __be64 tid)
1320 hdr->base_version = IB_MGMT_BASE_VERSION;
1321 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1322 hdr->class_version = IB_CM_CLASS_VERSION;
1323 hdr->method = IB_MGMT_METHOD_SEND;
1324 hdr->attr_id = attr_id;
1328 static void cm_format_mad_ece_hdr(struct ib_mad_hdr *hdr, __be16 attr_id,
1329 __be64 tid, u32 attr_mod)
1331 cm_format_mad_hdr(hdr, attr_id, tid);
1332 hdr->attr_mod = cpu_to_be32(attr_mod);
1335 static void cm_format_req(struct cm_req_msg *req_msg,
1336 struct cm_id_private *cm_id_priv,
1337 struct ib_cm_req_param *param)
1339 struct sa_path_rec *pri_path = param->primary_path;
1340 struct sa_path_rec *alt_path = param->alternate_path;
1341 bool pri_ext = false;
1343 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1344 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1345 pri_path->opa.slid);
1347 cm_format_mad_ece_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1348 cm_form_tid(cm_id_priv), param->ece.attr_mod);
1350 IBA_SET(CM_REQ_LOCAL_COMM_ID, req_msg,
1351 be32_to_cpu(cm_id_priv->id.local_id));
1352 IBA_SET(CM_REQ_SERVICE_ID, req_msg, be64_to_cpu(param->service_id));
1353 IBA_SET(CM_REQ_LOCAL_CA_GUID, req_msg,
1354 be64_to_cpu(cm_id_priv->id.device->node_guid));
1355 IBA_SET(CM_REQ_LOCAL_QPN, req_msg, param->qp_num);
1356 IBA_SET(CM_REQ_INITIATOR_DEPTH, req_msg, param->initiator_depth);
1357 IBA_SET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg,
1358 param->remote_cm_response_timeout);
1359 cm_req_set_qp_type(req_msg, param->qp_type);
1360 IBA_SET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg, param->flow_control);
1361 IBA_SET(CM_REQ_STARTING_PSN, req_msg, param->starting_psn);
1362 IBA_SET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg,
1363 param->local_cm_response_timeout);
1364 IBA_SET(CM_REQ_PARTITION_KEY, req_msg,
1365 be16_to_cpu(param->primary_path->pkey));
1366 IBA_SET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg,
1367 param->primary_path->mtu);
1368 IBA_SET(CM_REQ_MAX_CM_RETRIES, req_msg, param->max_cm_retries);
1370 if (param->qp_type != IB_QPT_XRC_INI) {
1371 IBA_SET(CM_REQ_RESPONDER_RESOURCES, req_msg,
1372 param->responder_resources);
1373 IBA_SET(CM_REQ_RETRY_COUNT, req_msg, param->retry_count);
1374 IBA_SET(CM_REQ_RNR_RETRY_COUNT, req_msg,
1375 param->rnr_retry_count);
1376 IBA_SET(CM_REQ_SRQ, req_msg, param->srq);
1379 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg) =
1381 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg) =
1384 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg)
1385 ->global.interface_id =
1386 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1387 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg)
1388 ->global.interface_id =
1389 OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1391 if (pri_path->hop_limit <= 1) {
1392 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1393 be16_to_cpu(pri_ext ? 0 :
1394 htons(ntohl(sa_path_get_slid(
1396 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1397 be16_to_cpu(pri_ext ? 0 :
1398 htons(ntohl(sa_path_get_dlid(
1401 /* Work-around until there's a way to obtain remote LID info */
1402 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
1403 be16_to_cpu(IB_LID_PERMISSIVE));
1404 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
1405 be16_to_cpu(IB_LID_PERMISSIVE));
1407 IBA_SET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg,
1408 be32_to_cpu(pri_path->flow_label));
1409 IBA_SET(CM_REQ_PRIMARY_PACKET_RATE, req_msg, pri_path->rate);
1410 IBA_SET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg, pri_path->traffic_class);
1411 IBA_SET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg, pri_path->hop_limit);
1412 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, pri_path->sl);
1413 IBA_SET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg,
1414 (pri_path->hop_limit <= 1));
1415 IBA_SET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg,
1416 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1417 pri_path->packet_life_time));
1420 bool alt_ext = false;
1422 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1423 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1424 alt_path->opa.slid);
1426 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg) =
1428 *IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg) =
1431 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1433 ->global.interface_id =
1434 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1435 IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_REMOTE_PORT_GID,
1437 ->global.interface_id =
1438 OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1440 if (alt_path->hop_limit <= 1) {
1441 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1444 htons(ntohl(sa_path_get_slid(
1446 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1449 htons(ntohl(sa_path_get_dlid(
1452 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
1453 be16_to_cpu(IB_LID_PERMISSIVE));
1454 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
1455 be16_to_cpu(IB_LID_PERMISSIVE));
1457 IBA_SET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg,
1458 be32_to_cpu(alt_path->flow_label));
1459 IBA_SET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg, alt_path->rate);
1460 IBA_SET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg,
1461 alt_path->traffic_class);
1462 IBA_SET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg,
1463 alt_path->hop_limit);
1464 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, alt_path->sl);
1465 IBA_SET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg,
1466 (alt_path->hop_limit <= 1));
1467 IBA_SET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg,
1468 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1469 alt_path->packet_life_time));
1471 IBA_SET(CM_REQ_VENDOR_ID, req_msg, param->ece.vendor_id);
1473 if (param->private_data && param->private_data_len)
1474 IBA_SET_MEM(CM_REQ_PRIVATE_DATA, req_msg, param->private_data,
1475 param->private_data_len);
1478 static int cm_validate_req_param(struct ib_cm_req_param *param)
1480 if (!param->primary_path)
1483 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1484 param->qp_type != IB_QPT_XRC_INI)
1487 if (param->private_data &&
1488 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1491 if (param->alternate_path &&
1492 (param->alternate_path->pkey != param->primary_path->pkey ||
1493 param->alternate_path->mtu != param->primary_path->mtu))
1499 int ib_send_cm_req(struct ib_cm_id *cm_id,
1500 struct ib_cm_req_param *param)
1502 struct cm_id_private *cm_id_priv;
1503 struct cm_req_msg *req_msg;
1504 unsigned long flags;
1507 ret = cm_validate_req_param(param);
1511 /* Verify that we're not in timewait. */
1512 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1513 spin_lock_irqsave(&cm_id_priv->lock, flags);
1514 if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
1515 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1519 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1521 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1523 if (IS_ERR(cm_id_priv->timewait_info)) {
1524 ret = PTR_ERR(cm_id_priv->timewait_info);
1528 ret = cm_init_av_by_path(param->primary_path,
1529 param->ppath_sgid_attr, &cm_id_priv->av,
1533 if (param->alternate_path) {
1534 ret = cm_init_av_by_path(param->alternate_path, NULL,
1535 &cm_id_priv->alt_av, cm_id_priv);
1539 cm_id->service_id = param->service_id;
1540 cm_id->service_mask = ~cpu_to_be64(0);
1541 cm_id_priv->timeout_ms = cm_convert_to_ms(
1542 param->primary_path->packet_life_time) * 2 +
1544 param->remote_cm_response_timeout);
1545 cm_id_priv->max_cm_retries = param->max_cm_retries;
1546 cm_id_priv->initiator_depth = param->initiator_depth;
1547 cm_id_priv->responder_resources = param->responder_resources;
1548 cm_id_priv->retry_count = param->retry_count;
1549 cm_id_priv->path_mtu = param->primary_path->mtu;
1550 cm_id_priv->pkey = param->primary_path->pkey;
1551 cm_id_priv->qp_type = param->qp_type;
1553 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1557 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1558 cm_format_req(req_msg, cm_id_priv, param);
1559 cm_id_priv->tid = req_msg->hdr.tid;
1560 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1561 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1563 cm_id_priv->local_qpn = cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
1564 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
1566 spin_lock_irqsave(&cm_id_priv->lock, flags);
1567 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1569 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1572 BUG_ON(cm_id->state != IB_CM_IDLE);
1573 cm_id->state = IB_CM_REQ_SENT;
1574 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1577 error2: cm_free_msg(cm_id_priv->msg);
1580 EXPORT_SYMBOL(ib_send_cm_req);
1582 static int cm_issue_rej(struct cm_port *port,
1583 struct ib_mad_recv_wc *mad_recv_wc,
1584 enum ib_cm_rej_reason reason,
1585 enum cm_msg_response msg_rejected,
1586 void *ari, u8 ari_length)
1588 struct ib_mad_send_buf *msg = NULL;
1589 struct cm_rej_msg *rej_msg, *rcv_msg;
1592 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1596 /* We just need common CM header information. Cast to any message. */
1597 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1598 rej_msg = (struct cm_rej_msg *) msg->mad;
1600 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1601 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1602 IBA_GET(CM_REJ_LOCAL_COMM_ID, rcv_msg));
1603 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1604 IBA_GET(CM_REJ_REMOTE_COMM_ID, rcv_msg));
1605 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, msg_rejected);
1606 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1608 if (ari && ari_length) {
1609 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1610 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1613 ret = ib_post_send_mad(msg, NULL);
1620 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1622 return ((cpu_to_be16(
1623 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg))) ||
1624 (ib_is_opa_gid(IBA_GET_MEM_PTR(CM_REQ_ALTERNATE_LOCAL_PORT_GID,
1628 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1629 struct sa_path_rec *path, union ib_gid *gid)
1631 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1632 path->rec_type = SA_PATH_REC_TYPE_OPA;
1634 path->rec_type = SA_PATH_REC_TYPE_IB;
1637 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1638 struct sa_path_rec *primary_path,
1639 struct sa_path_rec *alt_path)
1643 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1644 sa_path_set_dlid(primary_path,
1645 IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
1647 sa_path_set_slid(primary_path,
1648 IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
1651 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1652 CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg));
1653 sa_path_set_dlid(primary_path, lid);
1655 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1656 CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg));
1657 sa_path_set_slid(primary_path, lid);
1660 if (!cm_req_has_alt_path(req_msg))
1663 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1664 sa_path_set_dlid(alt_path,
1665 IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
1667 sa_path_set_slid(alt_path,
1668 IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
1671 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1672 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg));
1673 sa_path_set_dlid(alt_path, lid);
1675 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
1676 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg));
1677 sa_path_set_slid(alt_path, lid);
1681 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1682 struct sa_path_rec *primary_path,
1683 struct sa_path_rec *alt_path)
1685 primary_path->dgid =
1686 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID, req_msg);
1687 primary_path->sgid =
1688 *IBA_GET_MEM_PTR(CM_REQ_PRIMARY_REMOTE_PORT_GID, req_msg);
1689 primary_path->flow_label =
1690 cpu_to_be32(IBA_GET(CM_REQ_PRIMARY_FLOW_LABEL, req_msg));
1691 primary_path->hop_limit = IBA_GET(CM_REQ_PRIMARY_HOP_LIMIT, req_msg);
1692 primary_path->traffic_class =
1693 IBA_GET(CM_REQ_PRIMARY_TRAFFIC_CLASS, req_msg);
1694 primary_path->reversible = 1;
1695 primary_path->pkey =
1696 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1697 primary_path->sl = IBA_GET(CM_REQ_PRIMARY_SL, req_msg);
1698 primary_path->mtu_selector = IB_SA_EQ;
1699 primary_path->mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1700 primary_path->rate_selector = IB_SA_EQ;
1701 primary_path->rate = IBA_GET(CM_REQ_PRIMARY_PACKET_RATE, req_msg);
1702 primary_path->packet_life_time_selector = IB_SA_EQ;
1703 primary_path->packet_life_time =
1704 IBA_GET(CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT, req_msg);
1705 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1706 primary_path->service_id =
1707 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1708 if (sa_path_is_roce(primary_path))
1709 primary_path->roce.route_resolved = false;
1711 if (cm_req_has_alt_path(req_msg)) {
1712 alt_path->dgid = *IBA_GET_MEM_PTR(
1713 CM_REQ_ALTERNATE_LOCAL_PORT_GID, req_msg);
1714 alt_path->sgid = *IBA_GET_MEM_PTR(
1715 CM_REQ_ALTERNATE_REMOTE_PORT_GID, req_msg);
1716 alt_path->flow_label = cpu_to_be32(
1717 IBA_GET(CM_REQ_ALTERNATE_FLOW_LABEL, req_msg));
1718 alt_path->hop_limit =
1719 IBA_GET(CM_REQ_ALTERNATE_HOP_LIMIT, req_msg);
1720 alt_path->traffic_class =
1721 IBA_GET(CM_REQ_ALTERNATE_TRAFFIC_CLASS, req_msg);
1722 alt_path->reversible = 1;
1724 cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
1725 alt_path->sl = IBA_GET(CM_REQ_ALTERNATE_SL, req_msg);
1726 alt_path->mtu_selector = IB_SA_EQ;
1728 IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
1729 alt_path->rate_selector = IB_SA_EQ;
1730 alt_path->rate = IBA_GET(CM_REQ_ALTERNATE_PACKET_RATE, req_msg);
1731 alt_path->packet_life_time_selector = IB_SA_EQ;
1732 alt_path->packet_life_time =
1733 IBA_GET(CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT, req_msg);
1734 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1735 alt_path->service_id =
1736 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
1738 if (sa_path_is_roce(alt_path))
1739 alt_path->roce.route_resolved = false;
1741 cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1744 static u16 cm_get_bth_pkey(struct cm_work *work)
1746 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1747 u8 port_num = work->port->port_num;
1748 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1752 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1754 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1755 port_num, pkey_index, ret);
1763 * Convert OPA SGID to IB SGID
1764 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1765 * reject them as the local_gid will not match the sgid. Therefore,
1766 * change the pathrec's SGID to an IB SGID.
1768 * @work: Work completion
1769 * @path: Path record
1771 static void cm_opa_to_ib_sgid(struct cm_work *work,
1772 struct sa_path_rec *path)
1774 struct ib_device *dev = work->port->cm_dev->ib_device;
1775 u8 port_num = work->port->port_num;
1777 if (rdma_cap_opa_ah(dev, port_num) &&
1778 (ib_is_opa_gid(&path->sgid))) {
1781 if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1783 "Error updating sgid in CM request\n");
1791 static void cm_format_req_event(struct cm_work *work,
1792 struct cm_id_private *cm_id_priv,
1793 struct ib_cm_id *listen_id)
1795 struct cm_req_msg *req_msg;
1796 struct ib_cm_req_event_param *param;
1798 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1799 param = &work->cm_event.param.req_rcvd;
1800 param->listen_id = listen_id;
1801 param->bth_pkey = cm_get_bth_pkey(work);
1802 param->port = cm_id_priv->av.port->port_num;
1803 param->primary_path = &work->path[0];
1804 cm_opa_to_ib_sgid(work, param->primary_path);
1805 if (cm_req_has_alt_path(req_msg)) {
1806 param->alternate_path = &work->path[1];
1807 cm_opa_to_ib_sgid(work, param->alternate_path);
1809 param->alternate_path = NULL;
1811 param->remote_ca_guid =
1812 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
1813 param->remote_qkey = IBA_GET(CM_REQ_LOCAL_Q_KEY, req_msg);
1814 param->remote_qpn = IBA_GET(CM_REQ_LOCAL_QPN, req_msg);
1815 param->qp_type = cm_req_get_qp_type(req_msg);
1816 param->starting_psn = IBA_GET(CM_REQ_STARTING_PSN, req_msg);
1817 param->responder_resources = IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
1818 param->initiator_depth = IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
1819 param->local_cm_response_timeout =
1820 IBA_GET(CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT, req_msg);
1821 param->flow_control = IBA_GET(CM_REQ_END_TO_END_FLOW_CONTROL, req_msg);
1822 param->remote_cm_response_timeout =
1823 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg);
1824 param->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
1825 param->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
1826 param->srq = IBA_GET(CM_REQ_SRQ, req_msg);
1827 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1828 param->ece.vendor_id = IBA_GET(CM_REQ_VENDOR_ID, req_msg);
1829 param->ece.attr_mod = be32_to_cpu(req_msg->hdr.attr_mod);
1831 work->cm_event.private_data =
1832 IBA_GET_MEM_PTR(CM_REQ_PRIVATE_DATA, req_msg);
1835 static void cm_process_work(struct cm_id_private *cm_id_priv,
1836 struct cm_work *work)
1840 /* We will typically only have the current event to report. */
1841 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1844 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1845 spin_lock_irq(&cm_id_priv->lock);
1846 work = cm_dequeue_work(cm_id_priv);
1847 spin_unlock_irq(&cm_id_priv->lock);
1851 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1855 cm_deref_id(cm_id_priv);
1857 cm_destroy_id(&cm_id_priv->id, ret);
1860 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1861 struct cm_id_private *cm_id_priv,
1862 enum cm_msg_response msg_mraed, u8 service_timeout,
1863 const void *private_data, u8 private_data_len)
1865 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1866 IBA_SET(CM_MRA_MESSAGE_MRAED, mra_msg, msg_mraed);
1867 IBA_SET(CM_MRA_LOCAL_COMM_ID, mra_msg,
1868 be32_to_cpu(cm_id_priv->id.local_id));
1869 IBA_SET(CM_MRA_REMOTE_COMM_ID, mra_msg,
1870 be32_to_cpu(cm_id_priv->id.remote_id));
1871 IBA_SET(CM_MRA_SERVICE_TIMEOUT, mra_msg, service_timeout);
1873 if (private_data && private_data_len)
1874 IBA_SET_MEM(CM_MRA_PRIVATE_DATA, mra_msg, private_data,
1878 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1879 struct cm_id_private *cm_id_priv,
1880 enum ib_cm_rej_reason reason, void *ari,
1881 u8 ari_length, const void *private_data,
1882 u8 private_data_len, enum ib_cm_state state)
1884 lockdep_assert_held(&cm_id_priv->lock);
1886 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1887 IBA_SET(CM_REJ_REMOTE_COMM_ID, rej_msg,
1888 be32_to_cpu(cm_id_priv->id.remote_id));
1891 case IB_CM_REQ_RCVD:
1892 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg, be32_to_cpu(0));
1893 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1895 case IB_CM_MRA_REQ_SENT:
1896 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1897 be32_to_cpu(cm_id_priv->id.local_id));
1898 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REQ);
1900 case IB_CM_REP_RCVD:
1901 case IB_CM_MRA_REP_SENT:
1902 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1903 be32_to_cpu(cm_id_priv->id.local_id));
1904 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg, CM_MSG_RESPONSE_REP);
1907 IBA_SET(CM_REJ_LOCAL_COMM_ID, rej_msg,
1908 be32_to_cpu(cm_id_priv->id.local_id));
1909 IBA_SET(CM_REJ_MESSAGE_REJECTED, rej_msg,
1910 CM_MSG_RESPONSE_OTHER);
1914 IBA_SET(CM_REJ_REASON, rej_msg, reason);
1915 if (ari && ari_length) {
1916 IBA_SET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg, ari_length);
1917 IBA_SET_MEM(CM_REJ_ARI, rej_msg, ari, ari_length);
1920 if (private_data && private_data_len)
1921 IBA_SET_MEM(CM_REJ_PRIVATE_DATA, rej_msg, private_data,
1925 static void cm_dup_req_handler(struct cm_work *work,
1926 struct cm_id_private *cm_id_priv)
1928 struct ib_mad_send_buf *msg = NULL;
1931 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1932 counter[CM_REQ_COUNTER]);
1934 /* Quick state check to discard duplicate REQs. */
1935 spin_lock_irq(&cm_id_priv->lock);
1936 if (cm_id_priv->id.state == IB_CM_REQ_RCVD) {
1937 spin_unlock_irq(&cm_id_priv->lock);
1940 spin_unlock_irq(&cm_id_priv->lock);
1942 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1946 spin_lock_irq(&cm_id_priv->lock);
1947 switch (cm_id_priv->id.state) {
1948 case IB_CM_MRA_REQ_SENT:
1949 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1950 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1951 cm_id_priv->private_data,
1952 cm_id_priv->private_data_len);
1954 case IB_CM_TIMEWAIT:
1955 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv,
1956 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0,
1962 spin_unlock_irq(&cm_id_priv->lock);
1964 ret = ib_post_send_mad(msg, NULL);
1969 unlock: spin_unlock_irq(&cm_id_priv->lock);
1970 free: cm_free_msg(msg);
1973 static struct cm_id_private * cm_match_req(struct cm_work *work,
1974 struct cm_id_private *cm_id_priv)
1976 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1977 struct cm_timewait_info *timewait_info;
1978 struct cm_req_msg *req_msg;
1980 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1982 /* Check for possible duplicate REQ. */
1983 spin_lock_irq(&cm.lock);
1984 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1985 if (timewait_info) {
1986 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
1987 timewait_info->work.remote_id);
1988 spin_unlock_irq(&cm.lock);
1989 if (cur_cm_id_priv) {
1990 cm_dup_req_handler(work, cur_cm_id_priv);
1991 cm_deref_id(cur_cm_id_priv);
1996 /* Check for stale connections. */
1997 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1998 if (timewait_info) {
1999 cm_remove_remote(cm_id_priv);
2000 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2001 timewait_info->work.remote_id);
2003 spin_unlock_irq(&cm.lock);
2004 cm_issue_rej(work->port, work->mad_recv_wc,
2005 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
2007 if (cur_cm_id_priv) {
2008 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2009 cm_deref_id(cur_cm_id_priv);
2014 /* Find matching listen request. */
2015 listen_cm_id_priv = cm_find_listen(
2016 cm_id_priv->id.device,
2017 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg)));
2018 if (!listen_cm_id_priv) {
2019 cm_remove_remote(cm_id_priv);
2020 spin_unlock_irq(&cm.lock);
2021 cm_issue_rej(work->port, work->mad_recv_wc,
2022 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
2026 spin_unlock_irq(&cm.lock);
2027 return listen_cm_id_priv;
2031 * Work-around for inter-subnet connections. If the LIDs are permissive,
2032 * we need to override the LID/SL data in the REQ with the LID information
2033 * in the work completion.
2035 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
2037 if (!IBA_GET(CM_REQ_PRIMARY_SUBNET_LOCAL, req_msg)) {
2038 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_LOCAL_PORT_LID,
2039 req_msg)) == IB_LID_PERMISSIVE) {
2040 IBA_SET(CM_REQ_PRIMARY_LOCAL_PORT_LID, req_msg,
2041 be16_to_cpu(ib_lid_be16(wc->slid)));
2042 IBA_SET(CM_REQ_PRIMARY_SL, req_msg, wc->sl);
2045 if (cpu_to_be16(IBA_GET(CM_REQ_PRIMARY_REMOTE_PORT_LID,
2046 req_msg)) == IB_LID_PERMISSIVE)
2047 IBA_SET(CM_REQ_PRIMARY_REMOTE_PORT_LID, req_msg,
2048 wc->dlid_path_bits);
2051 if (!IBA_GET(CM_REQ_ALTERNATE_SUBNET_LOCAL, req_msg)) {
2052 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_LOCAL_PORT_LID,
2053 req_msg)) == IB_LID_PERMISSIVE) {
2054 IBA_SET(CM_REQ_ALTERNATE_LOCAL_PORT_LID, req_msg,
2055 be16_to_cpu(ib_lid_be16(wc->slid)));
2056 IBA_SET(CM_REQ_ALTERNATE_SL, req_msg, wc->sl);
2059 if (cpu_to_be16(IBA_GET(CM_REQ_ALTERNATE_REMOTE_PORT_LID,
2060 req_msg)) == IB_LID_PERMISSIVE)
2061 IBA_SET(CM_REQ_ALTERNATE_REMOTE_PORT_LID, req_msg,
2062 wc->dlid_path_bits);
2066 static int cm_req_handler(struct cm_work *work)
2068 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
2069 struct cm_req_msg *req_msg;
2070 const struct ib_global_route *grh;
2071 const struct ib_gid_attr *gid_attr;
2074 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
2077 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
2078 if (IS_ERR(cm_id_priv))
2079 return PTR_ERR(cm_id_priv);
2081 cm_id_priv->id.remote_id =
2082 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_COMM_ID, req_msg));
2083 cm_id_priv->id.service_id =
2084 cpu_to_be64(IBA_GET(CM_REQ_SERVICE_ID, req_msg));
2085 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
2086 cm_id_priv->tid = req_msg->hdr.tid;
2087 cm_id_priv->timeout_ms = cm_convert_to_ms(
2088 IBA_GET(CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT, req_msg));
2089 cm_id_priv->max_cm_retries = IBA_GET(CM_REQ_MAX_CM_RETRIES, req_msg);
2090 cm_id_priv->remote_qpn =
2091 cpu_to_be32(IBA_GET(CM_REQ_LOCAL_QPN, req_msg));
2092 cm_id_priv->initiator_depth =
2093 IBA_GET(CM_REQ_RESPONDER_RESOURCES, req_msg);
2094 cm_id_priv->responder_resources =
2095 IBA_GET(CM_REQ_INITIATOR_DEPTH, req_msg);
2096 cm_id_priv->path_mtu = IBA_GET(CM_REQ_PATH_PACKET_PAYLOAD_MTU, req_msg);
2097 cm_id_priv->pkey = cpu_to_be16(IBA_GET(CM_REQ_PARTITION_KEY, req_msg));
2098 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REQ_STARTING_PSN, req_msg));
2099 cm_id_priv->retry_count = IBA_GET(CM_REQ_RETRY_COUNT, req_msg);
2100 cm_id_priv->rnr_retry_count = IBA_GET(CM_REQ_RNR_RETRY_COUNT, req_msg);
2101 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2103 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2104 work->mad_recv_wc->recv_buf.grh,
2108 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
2110 if (IS_ERR(cm_id_priv->timewait_info)) {
2111 ret = PTR_ERR(cm_id_priv->timewait_info);
2114 cm_id_priv->timewait_info->work.remote_id = cm_id_priv->id.remote_id;
2115 cm_id_priv->timewait_info->remote_ca_guid =
2116 cpu_to_be64(IBA_GET(CM_REQ_LOCAL_CA_GUID, req_msg));
2117 cm_id_priv->timewait_info->remote_qpn = cm_id_priv->remote_qpn;
2120 * Note that the ID pointer is not in the xarray at this point,
2121 * so this set is only visible to the local thread.
2123 cm_id_priv->id.state = IB_CM_REQ_RCVD;
2125 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
2126 if (!listen_cm_id_priv) {
2127 pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
2128 be32_to_cpu(cm_id_priv->id.local_id));
2129 cm_id_priv->id.state = IB_CM_IDLE;
2134 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
2136 memset(&work->path[0], 0, sizeof(work->path[0]));
2137 if (cm_req_has_alt_path(req_msg))
2138 memset(&work->path[1], 0, sizeof(work->path[1]));
2139 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
2140 gid_attr = grh->sgid_attr;
2143 rdma_protocol_roce(work->port->cm_dev->ib_device,
2144 work->port->port_num)) {
2145 work->path[0].rec_type =
2146 sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
2148 cm_path_set_rec_type(
2149 work->port->cm_dev->ib_device, work->port->port_num,
2151 IBA_GET_MEM_PTR(CM_REQ_PRIMARY_LOCAL_PORT_GID,
2154 if (cm_req_has_alt_path(req_msg))
2155 work->path[1].rec_type = work->path[0].rec_type;
2156 cm_format_paths_from_req(req_msg, &work->path[0],
2158 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2159 sa_path_set_dmac(&work->path[0],
2160 cm_id_priv->av.ah_attr.roce.dmac);
2161 work->path[0].hop_limit = grh->hop_limit;
2162 ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
2167 err = rdma_query_gid(work->port->cm_dev->ib_device,
2168 work->port->port_num, 0,
2169 &work->path[0].sgid);
2171 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2174 ib_send_cm_rej(&cm_id_priv->id, IB_CM_REJ_INVALID_GID,
2175 &work->path[0].sgid,
2176 sizeof(work->path[0].sgid),
2180 if (cm_req_has_alt_path(req_msg)) {
2181 ret = cm_init_av_by_path(&work->path[1], NULL,
2182 &cm_id_priv->alt_av, cm_id_priv);
2184 ib_send_cm_rej(&cm_id_priv->id,
2185 IB_CM_REJ_INVALID_ALT_GID,
2186 &work->path[0].sgid,
2187 sizeof(work->path[0].sgid), NULL, 0);
2192 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
2193 cm_id_priv->id.context = listen_cm_id_priv->id.context;
2194 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2196 /* Now MAD handlers can see the new ID */
2197 spin_lock_irq(&cm_id_priv->lock);
2198 cm_finalize_id(cm_id_priv);
2200 /* Refcount belongs to the event, pairs with cm_process_work() */
2201 refcount_inc(&cm_id_priv->refcount);
2202 cm_queue_work_unlock(cm_id_priv, work);
2204 * Since this ID was just created and was not made visible to other MAD
2205 * handlers until the cm_finalize_id() above we know that the
2206 * cm_process_work() will deliver the event and the listen_cm_id
2207 * embedded in the event can be derefed here.
2209 cm_deref_id(listen_cm_id_priv);
2213 cm_deref_id(listen_cm_id_priv);
2215 ib_destroy_cm_id(&cm_id_priv->id);
2219 static void cm_format_rep(struct cm_rep_msg *rep_msg,
2220 struct cm_id_private *cm_id_priv,
2221 struct ib_cm_rep_param *param)
2223 cm_format_mad_ece_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid,
2224 param->ece.attr_mod);
2225 IBA_SET(CM_REP_LOCAL_COMM_ID, rep_msg,
2226 be32_to_cpu(cm_id_priv->id.local_id));
2227 IBA_SET(CM_REP_REMOTE_COMM_ID, rep_msg,
2228 be32_to_cpu(cm_id_priv->id.remote_id));
2229 IBA_SET(CM_REP_STARTING_PSN, rep_msg, param->starting_psn);
2230 IBA_SET(CM_REP_RESPONDER_RESOURCES, rep_msg,
2231 param->responder_resources);
2232 IBA_SET(CM_REP_TARGET_ACK_DELAY, rep_msg,
2233 cm_id_priv->av.port->cm_dev->ack_delay);
2234 IBA_SET(CM_REP_FAILOVER_ACCEPTED, rep_msg, param->failover_accepted);
2235 IBA_SET(CM_REP_RNR_RETRY_COUNT, rep_msg, param->rnr_retry_count);
2236 IBA_SET(CM_REP_LOCAL_CA_GUID, rep_msg,
2237 be64_to_cpu(cm_id_priv->id.device->node_guid));
2239 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2240 IBA_SET(CM_REP_INITIATOR_DEPTH, rep_msg,
2241 param->initiator_depth);
2242 IBA_SET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg,
2243 param->flow_control);
2244 IBA_SET(CM_REP_SRQ, rep_msg, param->srq);
2245 IBA_SET(CM_REP_LOCAL_QPN, rep_msg, param->qp_num);
2247 IBA_SET(CM_REP_SRQ, rep_msg, 1);
2248 IBA_SET(CM_REP_LOCAL_EE_CONTEXT_NUMBER, rep_msg, param->qp_num);
2251 IBA_SET(CM_REP_VENDOR_ID_L, rep_msg, param->ece.vendor_id);
2252 IBA_SET(CM_REP_VENDOR_ID_M, rep_msg, param->ece.vendor_id >> 8);
2253 IBA_SET(CM_REP_VENDOR_ID_H, rep_msg, param->ece.vendor_id >> 16);
2255 if (param->private_data && param->private_data_len)
2256 IBA_SET_MEM(CM_REP_PRIVATE_DATA, rep_msg, param->private_data,
2257 param->private_data_len);
2260 int ib_send_cm_rep(struct ib_cm_id *cm_id,
2261 struct ib_cm_rep_param *param)
2263 struct cm_id_private *cm_id_priv;
2264 struct ib_mad_send_buf *msg;
2265 struct cm_rep_msg *rep_msg;
2266 unsigned long flags;
2269 if (param->private_data &&
2270 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2273 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2274 spin_lock_irqsave(&cm_id_priv->lock, flags);
2275 if (cm_id->state != IB_CM_REQ_RCVD &&
2276 cm_id->state != IB_CM_MRA_REQ_SENT) {
2277 pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
2278 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2283 ret = cm_alloc_msg(cm_id_priv, &msg);
2287 rep_msg = (struct cm_rep_msg *) msg->mad;
2288 cm_format_rep(rep_msg, cm_id_priv, param);
2289 msg->timeout_ms = cm_id_priv->timeout_ms;
2290 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2292 ret = ib_post_send_mad(msg, NULL);
2294 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2299 cm_id->state = IB_CM_REP_SENT;
2300 cm_id_priv->msg = msg;
2301 cm_id_priv->initiator_depth = param->initiator_depth;
2302 cm_id_priv->responder_resources = param->responder_resources;
2303 cm_id_priv->rq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2304 WARN_ONCE(param->qp_num & 0xFF000000,
2305 "IBTA declares QPN to be 24 bits, but it is 0x%X\n",
2307 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2309 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2312 EXPORT_SYMBOL(ib_send_cm_rep);
2314 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2315 struct cm_id_private *cm_id_priv,
2316 const void *private_data,
2317 u8 private_data_len)
2319 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2320 IBA_SET(CM_RTU_LOCAL_COMM_ID, rtu_msg,
2321 be32_to_cpu(cm_id_priv->id.local_id));
2322 IBA_SET(CM_RTU_REMOTE_COMM_ID, rtu_msg,
2323 be32_to_cpu(cm_id_priv->id.remote_id));
2325 if (private_data && private_data_len)
2326 IBA_SET_MEM(CM_RTU_PRIVATE_DATA, rtu_msg, private_data,
2330 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2331 const void *private_data,
2332 u8 private_data_len)
2334 struct cm_id_private *cm_id_priv;
2335 struct ib_mad_send_buf *msg;
2336 unsigned long flags;
2340 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2343 data = cm_copy_private_data(private_data, private_data_len);
2345 return PTR_ERR(data);
2347 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2348 spin_lock_irqsave(&cm_id_priv->lock, flags);
2349 if (cm_id->state != IB_CM_REP_RCVD &&
2350 cm_id->state != IB_CM_MRA_REP_SENT) {
2351 pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
2352 be32_to_cpu(cm_id->local_id), cm_id->state);
2357 ret = cm_alloc_msg(cm_id_priv, &msg);
2361 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2362 private_data, private_data_len);
2364 ret = ib_post_send_mad(msg, NULL);
2366 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2372 cm_id->state = IB_CM_ESTABLISHED;
2373 cm_set_private_data(cm_id_priv, data, private_data_len);
2374 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2377 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2381 EXPORT_SYMBOL(ib_send_cm_rtu);
2383 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2385 struct cm_rep_msg *rep_msg;
2386 struct ib_cm_rep_event_param *param;
2388 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2389 param = &work->cm_event.param.rep_rcvd;
2390 param->remote_ca_guid =
2391 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2392 param->remote_qkey = IBA_GET(CM_REP_LOCAL_Q_KEY, rep_msg);
2393 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2394 param->starting_psn = IBA_GET(CM_REP_STARTING_PSN, rep_msg);
2395 param->responder_resources = IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2396 param->initiator_depth = IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2397 param->target_ack_delay = IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2398 param->failover_accepted = IBA_GET(CM_REP_FAILOVER_ACCEPTED, rep_msg);
2399 param->flow_control = IBA_GET(CM_REP_END_TO_END_FLOW_CONTROL, rep_msg);
2400 param->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2401 param->srq = IBA_GET(CM_REP_SRQ, rep_msg);
2402 param->ece.vendor_id = IBA_GET(CM_REP_VENDOR_ID_H, rep_msg) << 16;
2403 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_M, rep_msg) << 8;
2404 param->ece.vendor_id |= IBA_GET(CM_REP_VENDOR_ID_L, rep_msg);
2405 param->ece.attr_mod = be32_to_cpu(rep_msg->hdr.attr_mod);
2407 work->cm_event.private_data =
2408 IBA_GET_MEM_PTR(CM_REP_PRIVATE_DATA, rep_msg);
2411 static void cm_dup_rep_handler(struct cm_work *work)
2413 struct cm_id_private *cm_id_priv;
2414 struct cm_rep_msg *rep_msg;
2415 struct ib_mad_send_buf *msg = NULL;
2418 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2419 cm_id_priv = cm_acquire_id(
2420 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)),
2421 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg)));
2425 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2426 counter[CM_REP_COUNTER]);
2427 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2431 spin_lock_irq(&cm_id_priv->lock);
2432 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2433 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2434 cm_id_priv->private_data,
2435 cm_id_priv->private_data_len);
2436 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2437 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2438 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2439 cm_id_priv->private_data,
2440 cm_id_priv->private_data_len);
2443 spin_unlock_irq(&cm_id_priv->lock);
2445 ret = ib_post_send_mad(msg, NULL);
2450 unlock: spin_unlock_irq(&cm_id_priv->lock);
2451 free: cm_free_msg(msg);
2452 deref: cm_deref_id(cm_id_priv);
2455 static int cm_rep_handler(struct cm_work *work)
2457 struct cm_id_private *cm_id_priv;
2458 struct cm_rep_msg *rep_msg;
2460 struct cm_id_private *cur_cm_id_priv;
2461 struct cm_timewait_info *timewait_info;
2463 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2464 cm_id_priv = cm_acquire_id(
2465 cpu_to_be32(IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg)), 0);
2467 cm_dup_rep_handler(work);
2468 pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
2469 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2473 cm_format_rep_event(work, cm_id_priv->qp_type);
2475 spin_lock_irq(&cm_id_priv->lock);
2476 switch (cm_id_priv->id.state) {
2477 case IB_CM_REQ_SENT:
2478 case IB_CM_MRA_REQ_RCVD:
2483 "%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
2484 __func__, cm_id_priv->id.state,
2485 IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2486 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2487 spin_unlock_irq(&cm_id_priv->lock);
2491 cm_id_priv->timewait_info->work.remote_id =
2492 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2493 cm_id_priv->timewait_info->remote_ca_guid =
2494 cpu_to_be64(IBA_GET(CM_REP_LOCAL_CA_GUID, rep_msg));
2495 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2497 spin_lock(&cm.lock);
2498 /* Check for duplicate REP. */
2499 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2500 spin_unlock(&cm.lock);
2501 spin_unlock_irq(&cm_id_priv->lock);
2503 pr_debug("%s: Failed to insert remote id %d\n", __func__,
2504 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2507 /* Check for a stale connection. */
2508 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2509 if (timewait_info) {
2510 cm_remove_remote(cm_id_priv);
2511 cur_cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2512 timewait_info->work.remote_id);
2514 spin_unlock(&cm.lock);
2515 spin_unlock_irq(&cm_id_priv->lock);
2516 cm_issue_rej(work->port, work->mad_recv_wc,
2517 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2521 "%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
2522 __func__, IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg),
2523 IBA_GET(CM_REP_REMOTE_COMM_ID, rep_msg));
2525 if (cur_cm_id_priv) {
2526 ib_send_cm_dreq(&cur_cm_id_priv->id, NULL, 0);
2527 cm_deref_id(cur_cm_id_priv);
2532 spin_unlock(&cm.lock);
2534 cm_id_priv->id.state = IB_CM_REP_RCVD;
2535 cm_id_priv->id.remote_id =
2536 cpu_to_be32(IBA_GET(CM_REP_LOCAL_COMM_ID, rep_msg));
2537 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2538 cm_id_priv->initiator_depth =
2539 IBA_GET(CM_REP_RESPONDER_RESOURCES, rep_msg);
2540 cm_id_priv->responder_resources =
2541 IBA_GET(CM_REP_INITIATOR_DEPTH, rep_msg);
2542 cm_id_priv->sq_psn = cpu_to_be32(IBA_GET(CM_REP_STARTING_PSN, rep_msg));
2543 cm_id_priv->rnr_retry_count = IBA_GET(CM_REP_RNR_RETRY_COUNT, rep_msg);
2544 cm_id_priv->target_ack_delay =
2545 IBA_GET(CM_REP_TARGET_ACK_DELAY, rep_msg);
2546 cm_id_priv->av.timeout =
2547 cm_ack_timeout(cm_id_priv->target_ack_delay,
2548 cm_id_priv->av.timeout - 1);
2549 cm_id_priv->alt_av.timeout =
2550 cm_ack_timeout(cm_id_priv->target_ack_delay,
2551 cm_id_priv->alt_av.timeout - 1);
2553 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2554 cm_queue_work_unlock(cm_id_priv, work);
2558 cm_deref_id(cm_id_priv);
2562 static int cm_establish_handler(struct cm_work *work)
2564 struct cm_id_private *cm_id_priv;
2566 /* See comment in cm_establish about lookup. */
2567 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2571 spin_lock_irq(&cm_id_priv->lock);
2572 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2573 spin_unlock_irq(&cm_id_priv->lock);
2577 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2578 cm_queue_work_unlock(cm_id_priv, work);
2581 cm_deref_id(cm_id_priv);
2585 static int cm_rtu_handler(struct cm_work *work)
2587 struct cm_id_private *cm_id_priv;
2588 struct cm_rtu_msg *rtu_msg;
2590 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2591 cm_id_priv = cm_acquire_id(
2592 cpu_to_be32(IBA_GET(CM_RTU_REMOTE_COMM_ID, rtu_msg)),
2593 cpu_to_be32(IBA_GET(CM_RTU_LOCAL_COMM_ID, rtu_msg)));
2597 work->cm_event.private_data =
2598 IBA_GET_MEM_PTR(CM_RTU_PRIVATE_DATA, rtu_msg);
2600 spin_lock_irq(&cm_id_priv->lock);
2601 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2602 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2603 spin_unlock_irq(&cm_id_priv->lock);
2604 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2605 counter[CM_RTU_COUNTER]);
2608 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2610 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2611 cm_queue_work_unlock(cm_id_priv, work);
2614 cm_deref_id(cm_id_priv);
2618 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2619 struct cm_id_private *cm_id_priv,
2620 const void *private_data,
2621 u8 private_data_len)
2623 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2624 cm_form_tid(cm_id_priv));
2625 IBA_SET(CM_DREQ_LOCAL_COMM_ID, dreq_msg,
2626 be32_to_cpu(cm_id_priv->id.local_id));
2627 IBA_SET(CM_DREQ_REMOTE_COMM_ID, dreq_msg,
2628 be32_to_cpu(cm_id_priv->id.remote_id));
2629 IBA_SET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg,
2630 be32_to_cpu(cm_id_priv->remote_qpn));
2632 if (private_data && private_data_len)
2633 IBA_SET_MEM(CM_DREQ_PRIVATE_DATA, dreq_msg, private_data,
2637 static int cm_send_dreq_locked(struct cm_id_private *cm_id_priv,
2638 const void *private_data, u8 private_data_len)
2640 struct ib_mad_send_buf *msg;
2643 lockdep_assert_held(&cm_id_priv->lock);
2645 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2648 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2649 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2650 be32_to_cpu(cm_id_priv->id.local_id),
2651 cm_id_priv->id.state);
2655 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2656 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2657 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2659 ret = cm_alloc_msg(cm_id_priv, &msg);
2661 cm_enter_timewait(cm_id_priv);
2665 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2666 private_data, private_data_len);
2667 msg->timeout_ms = cm_id_priv->timeout_ms;
2668 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2670 ret = ib_post_send_mad(msg, NULL);
2672 cm_enter_timewait(cm_id_priv);
2677 cm_id_priv->id.state = IB_CM_DREQ_SENT;
2678 cm_id_priv->msg = msg;
2682 int ib_send_cm_dreq(struct ib_cm_id *cm_id, const void *private_data,
2683 u8 private_data_len)
2685 struct cm_id_private *cm_id_priv =
2686 container_of(cm_id, struct cm_id_private, id);
2687 unsigned long flags;
2690 spin_lock_irqsave(&cm_id_priv->lock, flags);
2691 ret = cm_send_dreq_locked(cm_id_priv, private_data, private_data_len);
2692 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2695 EXPORT_SYMBOL(ib_send_cm_dreq);
2697 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2698 struct cm_id_private *cm_id_priv,
2699 const void *private_data,
2700 u8 private_data_len)
2702 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2703 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2704 be32_to_cpu(cm_id_priv->id.local_id));
2705 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2706 be32_to_cpu(cm_id_priv->id.remote_id));
2708 if (private_data && private_data_len)
2709 IBA_SET_MEM(CM_DREP_PRIVATE_DATA, drep_msg, private_data,
2713 static int cm_send_drep_locked(struct cm_id_private *cm_id_priv,
2714 void *private_data, u8 private_data_len)
2716 struct ib_mad_send_buf *msg;
2719 lockdep_assert_held(&cm_id_priv->lock);
2721 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2724 if (cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2726 "%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
2727 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2728 cm_id_priv->id.state);
2729 kfree(private_data);
2733 cm_set_private_data(cm_id_priv, private_data, private_data_len);
2734 cm_enter_timewait(cm_id_priv);
2736 ret = cm_alloc_msg(cm_id_priv, &msg);
2740 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2741 private_data, private_data_len);
2743 ret = ib_post_send_mad(msg, NULL);
2751 int ib_send_cm_drep(struct ib_cm_id *cm_id, const void *private_data,
2752 u8 private_data_len)
2754 struct cm_id_private *cm_id_priv =
2755 container_of(cm_id, struct cm_id_private, id);
2756 unsigned long flags;
2760 data = cm_copy_private_data(private_data, private_data_len);
2762 return PTR_ERR(data);
2764 spin_lock_irqsave(&cm_id_priv->lock, flags);
2765 ret = cm_send_drep_locked(cm_id_priv, data, private_data_len);
2766 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2769 EXPORT_SYMBOL(ib_send_cm_drep);
2771 static int cm_issue_drep(struct cm_port *port,
2772 struct ib_mad_recv_wc *mad_recv_wc)
2774 struct ib_mad_send_buf *msg = NULL;
2775 struct cm_dreq_msg *dreq_msg;
2776 struct cm_drep_msg *drep_msg;
2779 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2783 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2784 drep_msg = (struct cm_drep_msg *) msg->mad;
2786 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2787 IBA_SET(CM_DREP_REMOTE_COMM_ID, drep_msg,
2788 IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg));
2789 IBA_SET(CM_DREP_LOCAL_COMM_ID, drep_msg,
2790 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2792 ret = ib_post_send_mad(msg, NULL);
2799 static int cm_dreq_handler(struct cm_work *work)
2801 struct cm_id_private *cm_id_priv;
2802 struct cm_dreq_msg *dreq_msg;
2803 struct ib_mad_send_buf *msg = NULL;
2805 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2806 cm_id_priv = cm_acquire_id(
2807 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg)),
2808 cpu_to_be32(IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg)));
2810 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2811 counter[CM_DREQ_COUNTER]);
2812 cm_issue_drep(work->port, work->mad_recv_wc);
2814 "%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
2815 __func__, IBA_GET(CM_DREQ_LOCAL_COMM_ID, dreq_msg),
2816 IBA_GET(CM_DREQ_REMOTE_COMM_ID, dreq_msg));
2820 work->cm_event.private_data =
2821 IBA_GET_MEM_PTR(CM_DREQ_PRIVATE_DATA, dreq_msg);
2823 spin_lock_irq(&cm_id_priv->lock);
2824 if (cm_id_priv->local_qpn !=
2825 cpu_to_be32(IBA_GET(CM_DREQ_REMOTE_QPN_EECN, dreq_msg)))
2828 switch (cm_id_priv->id.state) {
2829 case IB_CM_REP_SENT:
2830 case IB_CM_DREQ_SENT:
2831 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2833 case IB_CM_ESTABLISHED:
2834 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2835 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2836 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2838 case IB_CM_MRA_REP_RCVD:
2840 case IB_CM_TIMEWAIT:
2841 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2842 counter[CM_DREQ_COUNTER]);
2843 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2847 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2848 cm_id_priv->private_data,
2849 cm_id_priv->private_data_len);
2850 spin_unlock_irq(&cm_id_priv->lock);
2852 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2853 ib_post_send_mad(msg, NULL))
2856 case IB_CM_DREQ_RCVD:
2857 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2858 counter[CM_DREQ_COUNTER]);
2861 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2862 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2863 cm_id_priv->id.state);
2866 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2867 cm_id_priv->tid = dreq_msg->hdr.tid;
2868 cm_queue_work_unlock(cm_id_priv, work);
2871 unlock: spin_unlock_irq(&cm_id_priv->lock);
2872 deref: cm_deref_id(cm_id_priv);
2876 static int cm_drep_handler(struct cm_work *work)
2878 struct cm_id_private *cm_id_priv;
2879 struct cm_drep_msg *drep_msg;
2881 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2882 cm_id_priv = cm_acquire_id(
2883 cpu_to_be32(IBA_GET(CM_DREP_REMOTE_COMM_ID, drep_msg)),
2884 cpu_to_be32(IBA_GET(CM_DREP_LOCAL_COMM_ID, drep_msg)));
2888 work->cm_event.private_data =
2889 IBA_GET_MEM_PTR(CM_DREP_PRIVATE_DATA, drep_msg);
2891 spin_lock_irq(&cm_id_priv->lock);
2892 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2893 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2894 spin_unlock_irq(&cm_id_priv->lock);
2897 cm_enter_timewait(cm_id_priv);
2899 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2900 cm_queue_work_unlock(cm_id_priv, work);
2903 cm_deref_id(cm_id_priv);
2907 static int cm_send_rej_locked(struct cm_id_private *cm_id_priv,
2908 enum ib_cm_rej_reason reason, void *ari,
2909 u8 ari_length, const void *private_data,
2910 u8 private_data_len)
2912 enum ib_cm_state state = cm_id_priv->id.state;
2913 struct ib_mad_send_buf *msg;
2916 lockdep_assert_held(&cm_id_priv->lock);
2918 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2919 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2923 case IB_CM_REQ_SENT:
2924 case IB_CM_MRA_REQ_RCVD:
2925 case IB_CM_REQ_RCVD:
2926 case IB_CM_MRA_REQ_SENT:
2927 case IB_CM_REP_RCVD:
2928 case IB_CM_MRA_REP_SENT:
2929 cm_reset_to_idle(cm_id_priv);
2930 ret = cm_alloc_msg(cm_id_priv, &msg);
2933 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2934 ari, ari_length, private_data, private_data_len,
2937 case IB_CM_REP_SENT:
2938 case IB_CM_MRA_REP_RCVD:
2939 cm_enter_timewait(cm_id_priv);
2940 ret = cm_alloc_msg(cm_id_priv, &msg);
2943 cm_format_rej((struct cm_rej_msg *)msg->mad, cm_id_priv, reason,
2944 ari, ari_length, private_data, private_data_len,
2948 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2949 be32_to_cpu(cm_id_priv->id.local_id),
2950 cm_id_priv->id.state);
2954 ret = ib_post_send_mad(msg, NULL);
2963 int ib_send_cm_rej(struct ib_cm_id *cm_id, enum ib_cm_rej_reason reason,
2964 void *ari, u8 ari_length, const void *private_data,
2965 u8 private_data_len)
2967 struct cm_id_private *cm_id_priv =
2968 container_of(cm_id, struct cm_id_private, id);
2969 unsigned long flags;
2972 spin_lock_irqsave(&cm_id_priv->lock, flags);
2973 ret = cm_send_rej_locked(cm_id_priv, reason, ari, ari_length,
2974 private_data, private_data_len);
2975 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2978 EXPORT_SYMBOL(ib_send_cm_rej);
2980 static void cm_format_rej_event(struct cm_work *work)
2982 struct cm_rej_msg *rej_msg;
2983 struct ib_cm_rej_event_param *param;
2985 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2986 param = &work->cm_event.param.rej_rcvd;
2987 param->ari = IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg);
2988 param->ari_length = IBA_GET(CM_REJ_REJECTED_INFO_LENGTH, rej_msg);
2989 param->reason = IBA_GET(CM_REJ_REASON, rej_msg);
2990 work->cm_event.private_data =
2991 IBA_GET_MEM_PTR(CM_REJ_PRIVATE_DATA, rej_msg);
2994 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2996 struct cm_id_private *cm_id_priv;
2999 remote_id = cpu_to_be32(IBA_GET(CM_REJ_LOCAL_COMM_ID, rej_msg));
3001 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_TIMEOUT) {
3002 cm_id_priv = cm_find_remote_id(
3003 *((__be64 *)IBA_GET_MEM_PTR(CM_REJ_ARI, rej_msg)),
3005 } else if (IBA_GET(CM_REJ_MESSAGE_REJECTED, rej_msg) ==
3006 CM_MSG_RESPONSE_REQ)
3007 cm_id_priv = cm_acquire_id(
3008 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3011 cm_id_priv = cm_acquire_id(
3012 cpu_to_be32(IBA_GET(CM_REJ_REMOTE_COMM_ID, rej_msg)),
3018 static int cm_rej_handler(struct cm_work *work)
3020 struct cm_id_private *cm_id_priv;
3021 struct cm_rej_msg *rej_msg;
3023 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
3024 cm_id_priv = cm_acquire_rejected_id(rej_msg);
3028 cm_format_rej_event(work);
3030 spin_lock_irq(&cm_id_priv->lock);
3031 switch (cm_id_priv->id.state) {
3032 case IB_CM_REQ_SENT:
3033 case IB_CM_MRA_REQ_RCVD:
3034 case IB_CM_REP_SENT:
3035 case IB_CM_MRA_REP_RCVD:
3036 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3038 case IB_CM_REQ_RCVD:
3039 case IB_CM_MRA_REQ_SENT:
3040 if (IBA_GET(CM_REJ_REASON, rej_msg) == IB_CM_REJ_STALE_CONN)
3041 cm_enter_timewait(cm_id_priv);
3043 cm_reset_to_idle(cm_id_priv);
3045 case IB_CM_DREQ_SENT:
3046 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3048 case IB_CM_REP_RCVD:
3049 case IB_CM_MRA_REP_SENT:
3050 cm_enter_timewait(cm_id_priv);
3052 case IB_CM_ESTABLISHED:
3053 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
3054 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
3055 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
3056 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
3058 cm_enter_timewait(cm_id_priv);
3063 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
3064 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3065 cm_id_priv->id.state);
3066 spin_unlock_irq(&cm_id_priv->lock);
3070 cm_queue_work_unlock(cm_id_priv, work);
3073 cm_deref_id(cm_id_priv);
3077 int ib_send_cm_mra(struct ib_cm_id *cm_id,
3079 const void *private_data,
3080 u8 private_data_len)
3082 struct cm_id_private *cm_id_priv;
3083 struct ib_mad_send_buf *msg;
3084 enum ib_cm_state cm_state;
3085 enum ib_cm_lap_state lap_state;
3086 enum cm_msg_response msg_response;
3088 unsigned long flags;
3091 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
3094 data = cm_copy_private_data(private_data, private_data_len);
3096 return PTR_ERR(data);
3098 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3100 spin_lock_irqsave(&cm_id_priv->lock, flags);
3101 switch(cm_id_priv->id.state) {
3102 case IB_CM_REQ_RCVD:
3103 cm_state = IB_CM_MRA_REQ_SENT;
3104 lap_state = cm_id->lap_state;
3105 msg_response = CM_MSG_RESPONSE_REQ;
3107 case IB_CM_REP_RCVD:
3108 cm_state = IB_CM_MRA_REP_SENT;
3109 lap_state = cm_id->lap_state;
3110 msg_response = CM_MSG_RESPONSE_REP;
3112 case IB_CM_ESTABLISHED:
3113 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
3114 cm_state = cm_id->state;
3115 lap_state = IB_CM_MRA_LAP_SENT;
3116 msg_response = CM_MSG_RESPONSE_OTHER;
3121 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
3122 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3123 cm_id_priv->id.state);
3128 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
3129 ret = cm_alloc_msg(cm_id_priv, &msg);
3133 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3134 msg_response, service_timeout,
3135 private_data, private_data_len);
3136 ret = ib_post_send_mad(msg, NULL);
3141 cm_id->state = cm_state;
3142 cm_id->lap_state = lap_state;
3143 cm_id_priv->service_timeout = service_timeout;
3144 cm_set_private_data(cm_id_priv, data, private_data_len);
3145 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3148 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3152 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3157 EXPORT_SYMBOL(ib_send_cm_mra);
3159 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
3161 switch (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg)) {
3162 case CM_MSG_RESPONSE_REQ:
3163 return cm_acquire_id(
3164 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3166 case CM_MSG_RESPONSE_REP:
3167 case CM_MSG_RESPONSE_OTHER:
3168 return cm_acquire_id(
3169 cpu_to_be32(IBA_GET(CM_MRA_REMOTE_COMM_ID, mra_msg)),
3170 cpu_to_be32(IBA_GET(CM_MRA_LOCAL_COMM_ID, mra_msg)));
3176 static int cm_mra_handler(struct cm_work *work)
3178 struct cm_id_private *cm_id_priv;
3179 struct cm_mra_msg *mra_msg;
3182 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3183 cm_id_priv = cm_acquire_mraed_id(mra_msg);
3187 work->cm_event.private_data =
3188 IBA_GET_MEM_PTR(CM_MRA_PRIVATE_DATA, mra_msg);
3189 work->cm_event.param.mra_rcvd.service_timeout =
3190 IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg);
3191 timeout = cm_convert_to_ms(IBA_GET(CM_MRA_SERVICE_TIMEOUT, mra_msg)) +
3192 cm_convert_to_ms(cm_id_priv->av.timeout);
3194 spin_lock_irq(&cm_id_priv->lock);
3195 switch (cm_id_priv->id.state) {
3196 case IB_CM_REQ_SENT:
3197 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3198 CM_MSG_RESPONSE_REQ ||
3199 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3200 cm_id_priv->msg, timeout))
3202 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3204 case IB_CM_REP_SENT:
3205 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3206 CM_MSG_RESPONSE_REP ||
3207 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3208 cm_id_priv->msg, timeout))
3210 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3212 case IB_CM_ESTABLISHED:
3213 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) !=
3214 CM_MSG_RESPONSE_OTHER ||
3215 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3216 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3217 cm_id_priv->msg, timeout)) {
3218 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3219 atomic_long_inc(&work->port->
3220 counter_group[CM_RECV_DUPLICATES].
3221 counter[CM_MRA_COUNTER]);
3224 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3226 case IB_CM_MRA_REQ_RCVD:
3227 case IB_CM_MRA_REP_RCVD:
3228 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3229 counter[CM_MRA_COUNTER]);
3232 pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
3233 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3234 cm_id_priv->id.state);
3238 cm_id_priv->msg->context[1] = (void *) (unsigned long)
3239 cm_id_priv->id.state;
3240 cm_queue_work_unlock(cm_id_priv, work);
3243 spin_unlock_irq(&cm_id_priv->lock);
3244 cm_deref_id(cm_id_priv);
3248 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3249 struct sa_path_rec *path)
3253 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3254 sa_path_set_dlid(path, IBA_GET(CM_LAP_ALTERNATE_LOCAL_PORT_LID,
3256 sa_path_set_slid(path, IBA_GET(CM_LAP_ALTERNATE_REMOTE_PORT_LID,
3259 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3260 CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg));
3261 sa_path_set_dlid(path, lid);
3263 lid = opa_get_lid_from_gid(IBA_GET_MEM_PTR(
3264 CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg));
3265 sa_path_set_slid(path, lid);
3269 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3270 struct sa_path_rec *path,
3271 struct cm_lap_msg *lap_msg)
3273 path->dgid = *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID, lap_msg);
3275 *IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_REMOTE_PORT_GID, lap_msg);
3277 cpu_to_be32(IBA_GET(CM_LAP_ALTERNATE_FLOW_LABEL, lap_msg));
3278 path->hop_limit = IBA_GET(CM_LAP_ALTERNATE_HOP_LIMIT, lap_msg);
3279 path->traffic_class = IBA_GET(CM_LAP_ALTERNATE_TRAFFIC_CLASS, lap_msg);
3280 path->reversible = 1;
3281 path->pkey = cm_id_priv->pkey;
3282 path->sl = IBA_GET(CM_LAP_ALTERNATE_SL, lap_msg);
3283 path->mtu_selector = IB_SA_EQ;
3284 path->mtu = cm_id_priv->path_mtu;
3285 path->rate_selector = IB_SA_EQ;
3286 path->rate = IBA_GET(CM_LAP_ALTERNATE_PACKET_RATE, lap_msg);
3287 path->packet_life_time_selector = IB_SA_EQ;
3288 path->packet_life_time =
3289 IBA_GET(CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT, lap_msg);
3290 path->packet_life_time -= (path->packet_life_time > 0);
3291 cm_format_path_lid_from_lap(lap_msg, path);
3294 static int cm_lap_handler(struct cm_work *work)
3296 struct cm_id_private *cm_id_priv;
3297 struct cm_lap_msg *lap_msg;
3298 struct ib_cm_lap_event_param *param;
3299 struct ib_mad_send_buf *msg = NULL;
3302 /* Currently Alternate path messages are not supported for
3305 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3306 work->port->port_num))
3309 /* todo: verify LAP request and send reject APR if invalid. */
3310 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3311 cm_id_priv = cm_acquire_id(
3312 cpu_to_be32(IBA_GET(CM_LAP_REMOTE_COMM_ID, lap_msg)),
3313 cpu_to_be32(IBA_GET(CM_LAP_LOCAL_COMM_ID, lap_msg)));
3317 param = &work->cm_event.param.lap_rcvd;
3318 memset(&work->path[0], 0, sizeof(work->path[1]));
3319 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3320 work->port->port_num, &work->path[0],
3321 IBA_GET_MEM_PTR(CM_LAP_ALTERNATE_LOCAL_PORT_GID,
3323 param->alternate_path = &work->path[0];
3324 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3325 work->cm_event.private_data =
3326 IBA_GET_MEM_PTR(CM_LAP_PRIVATE_DATA, lap_msg);
3328 spin_lock_irq(&cm_id_priv->lock);
3329 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3332 switch (cm_id_priv->id.lap_state) {
3333 case IB_CM_LAP_UNINIT:
3334 case IB_CM_LAP_IDLE:
3336 case IB_CM_MRA_LAP_SENT:
3337 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3338 counter[CM_LAP_COUNTER]);
3339 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3343 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3344 CM_MSG_RESPONSE_OTHER,
3345 cm_id_priv->service_timeout,
3346 cm_id_priv->private_data,
3347 cm_id_priv->private_data_len);
3348 spin_unlock_irq(&cm_id_priv->lock);
3350 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3351 ib_post_send_mad(msg, NULL))
3354 case IB_CM_LAP_RCVD:
3355 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3356 counter[CM_LAP_COUNTER]);
3362 ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3363 work->mad_recv_wc->recv_buf.grh,
3368 ret = cm_init_av_by_path(param->alternate_path, NULL,
3369 &cm_id_priv->alt_av, cm_id_priv);
3373 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3374 cm_id_priv->tid = lap_msg->hdr.tid;
3375 cm_queue_work_unlock(cm_id_priv, work);
3378 unlock: spin_unlock_irq(&cm_id_priv->lock);
3379 deref: cm_deref_id(cm_id_priv);
3383 static int cm_apr_handler(struct cm_work *work)
3385 struct cm_id_private *cm_id_priv;
3386 struct cm_apr_msg *apr_msg;
3388 /* Currently Alternate path messages are not supported for
3391 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3392 work->port->port_num))
3395 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3396 cm_id_priv = cm_acquire_id(
3397 cpu_to_be32(IBA_GET(CM_APR_REMOTE_COMM_ID, apr_msg)),
3398 cpu_to_be32(IBA_GET(CM_APR_LOCAL_COMM_ID, apr_msg)));
3400 return -EINVAL; /* Unmatched reply. */
3402 work->cm_event.param.apr_rcvd.ap_status =
3403 IBA_GET(CM_APR_AR_STATUS, apr_msg);
3404 work->cm_event.param.apr_rcvd.apr_info =
3405 IBA_GET_MEM_PTR(CM_APR_ADDITIONAL_INFORMATION, apr_msg);
3406 work->cm_event.param.apr_rcvd.info_len =
3407 IBA_GET(CM_APR_ADDITIONAL_INFORMATION_LENGTH, apr_msg);
3408 work->cm_event.private_data =
3409 IBA_GET_MEM_PTR(CM_APR_PRIVATE_DATA, apr_msg);
3411 spin_lock_irq(&cm_id_priv->lock);
3412 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3413 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3414 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3415 spin_unlock_irq(&cm_id_priv->lock);
3418 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3419 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3420 cm_id_priv->msg = NULL;
3421 cm_queue_work_unlock(cm_id_priv, work);
3424 cm_deref_id(cm_id_priv);
3428 static int cm_timewait_handler(struct cm_work *work)
3430 struct cm_timewait_info *timewait_info;
3431 struct cm_id_private *cm_id_priv;
3433 timewait_info = container_of(work, struct cm_timewait_info, work);
3434 spin_lock_irq(&cm.lock);
3435 list_del(&timewait_info->list);
3436 spin_unlock_irq(&cm.lock);
3438 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3439 timewait_info->work.remote_id);
3443 spin_lock_irq(&cm_id_priv->lock);
3444 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3445 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3446 spin_unlock_irq(&cm_id_priv->lock);
3449 cm_id_priv->id.state = IB_CM_IDLE;
3450 cm_queue_work_unlock(cm_id_priv, work);
3453 cm_deref_id(cm_id_priv);
3457 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3458 struct cm_id_private *cm_id_priv,
3459 struct ib_cm_sidr_req_param *param)
3461 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3462 cm_form_tid(cm_id_priv));
3463 IBA_SET(CM_SIDR_REQ_REQUESTID, sidr_req_msg,
3464 be32_to_cpu(cm_id_priv->id.local_id));
3465 IBA_SET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg,
3466 be16_to_cpu(param->path->pkey));
3467 IBA_SET(CM_SIDR_REQ_SERVICEID, sidr_req_msg,
3468 be64_to_cpu(param->service_id));
3470 if (param->private_data && param->private_data_len)
3471 IBA_SET_MEM(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg,
3472 param->private_data, param->private_data_len);
3475 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3476 struct ib_cm_sidr_req_param *param)
3478 struct cm_id_private *cm_id_priv;
3479 struct ib_mad_send_buf *msg;
3480 unsigned long flags;
3483 if (!param->path || (param->private_data &&
3484 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3487 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3488 ret = cm_init_av_by_path(param->path, param->sgid_attr,
3494 cm_id->service_id = param->service_id;
3495 cm_id->service_mask = ~cpu_to_be64(0);
3496 cm_id_priv->timeout_ms = param->timeout_ms;
3497 cm_id_priv->max_cm_retries = param->max_cm_retries;
3498 ret = cm_alloc_msg(cm_id_priv, &msg);
3502 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3504 msg->timeout_ms = cm_id_priv->timeout_ms;
3505 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3507 spin_lock_irqsave(&cm_id_priv->lock, flags);
3508 if (cm_id->state == IB_CM_IDLE)
3509 ret = ib_post_send_mad(msg, NULL);
3514 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3518 cm_id->state = IB_CM_SIDR_REQ_SENT;
3519 cm_id_priv->msg = msg;
3520 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3524 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3526 static void cm_format_sidr_req_event(struct cm_work *work,
3527 const struct cm_id_private *rx_cm_id,
3528 struct ib_cm_id *listen_id)
3530 struct cm_sidr_req_msg *sidr_req_msg;
3531 struct ib_cm_sidr_req_event_param *param;
3533 sidr_req_msg = (struct cm_sidr_req_msg *)
3534 work->mad_recv_wc->recv_buf.mad;
3535 param = &work->cm_event.param.sidr_req_rcvd;
3536 param->pkey = IBA_GET(CM_SIDR_REQ_PARTITION_KEY, sidr_req_msg);
3537 param->listen_id = listen_id;
3539 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3540 param->bth_pkey = cm_get_bth_pkey(work);
3541 param->port = work->port->port_num;
3542 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3543 work->cm_event.private_data =
3544 IBA_GET_MEM_PTR(CM_SIDR_REQ_PRIVATE_DATA, sidr_req_msg);
3547 static int cm_sidr_req_handler(struct cm_work *work)
3549 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
3550 struct cm_sidr_req_msg *sidr_req_msg;
3555 cm_alloc_id_priv(work->port->cm_dev->ib_device, NULL, NULL);
3556 if (IS_ERR(cm_id_priv))
3557 return PTR_ERR(cm_id_priv);
3559 /* Record SGID/SLID and request ID for lookup. */
3560 sidr_req_msg = (struct cm_sidr_req_msg *)
3561 work->mad_recv_wc->recv_buf.mad;
3563 cm_id_priv->id.remote_id =
3564 cpu_to_be32(IBA_GET(CM_SIDR_REQ_REQUESTID, sidr_req_msg));
3565 cm_id_priv->id.service_id =
3566 cpu_to_be64(IBA_GET(CM_SIDR_REQ_SERVICEID, sidr_req_msg));
3567 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3568 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3570 wc = work->mad_recv_wc->wc;
3571 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3572 cm_id_priv->av.dgid.global.interface_id = 0;
3573 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3574 work->mad_recv_wc->recv_buf.grh,
3579 spin_lock_irq(&cm.lock);
3580 listen_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3581 if (listen_cm_id_priv) {
3582 spin_unlock_irq(&cm.lock);
3583 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3584 counter[CM_SIDR_REQ_COUNTER]);
3585 goto out; /* Duplicate message. */
3587 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3588 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
3589 cm_id_priv->id.service_id);
3590 if (!listen_cm_id_priv) {
3591 spin_unlock_irq(&cm.lock);
3592 ib_send_cm_sidr_rep(&cm_id_priv->id,
3593 &(struct ib_cm_sidr_rep_param){
3594 .status = IB_SIDR_UNSUPPORTED });
3595 goto out; /* No match. */
3597 spin_unlock_irq(&cm.lock);
3599 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
3600 cm_id_priv->id.context = listen_cm_id_priv->id.context;
3603 * A SIDR ID does not need to be in the xarray since it does not receive
3604 * mads, is not placed in the remote_id or remote_qpn rbtree, and does
3605 * not enter timewait.
3608 cm_format_sidr_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
3609 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
3612 * A pointer to the listen_cm_id is held in the event, so this deref
3613 * must be after the event is delivered above.
3615 cm_deref_id(listen_cm_id_priv);
3617 cm_destroy_id(&cm_id_priv->id, ret);
3620 ib_destroy_cm_id(&cm_id_priv->id);
3624 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3625 struct cm_id_private *cm_id_priv,
3626 struct ib_cm_sidr_rep_param *param)
3628 cm_format_mad_ece_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3629 cm_id_priv->tid, param->ece.attr_mod);
3630 IBA_SET(CM_SIDR_REP_REQUESTID, sidr_rep_msg,
3631 be32_to_cpu(cm_id_priv->id.remote_id));
3632 IBA_SET(CM_SIDR_REP_STATUS, sidr_rep_msg, param->status);
3633 IBA_SET(CM_SIDR_REP_QPN, sidr_rep_msg, param->qp_num);
3634 IBA_SET(CM_SIDR_REP_SERVICEID, sidr_rep_msg,
3635 be64_to_cpu(cm_id_priv->id.service_id));
3636 IBA_SET(CM_SIDR_REP_Q_KEY, sidr_rep_msg, param->qkey);
3637 IBA_SET(CM_SIDR_REP_VENDOR_ID_L, sidr_rep_msg,
3638 param->ece.vendor_id & 0xFF);
3639 IBA_SET(CM_SIDR_REP_VENDOR_ID_H, sidr_rep_msg,
3640 (param->ece.vendor_id >> 8) & 0xFF);
3642 if (param->info && param->info_length)
3643 IBA_SET_MEM(CM_SIDR_REP_ADDITIONAL_INFORMATION, sidr_rep_msg,
3644 param->info, param->info_length);
3646 if (param->private_data && param->private_data_len)
3647 IBA_SET_MEM(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg,
3648 param->private_data, param->private_data_len);
3651 static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
3652 struct ib_cm_sidr_rep_param *param)
3654 struct ib_mad_send_buf *msg;
3657 lockdep_assert_held(&cm_id_priv->lock);
3659 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3660 (param->private_data &&
3661 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3664 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_RCVD)
3667 ret = cm_alloc_msg(cm_id_priv, &msg);
3671 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3673 ret = ib_post_send_mad(msg, NULL);
3678 cm_id_priv->id.state = IB_CM_IDLE;
3679 spin_lock_irq(&cm.lock);
3680 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3681 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3682 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3684 spin_unlock_irq(&cm.lock);
3688 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3689 struct ib_cm_sidr_rep_param *param)
3691 struct cm_id_private *cm_id_priv =
3692 container_of(cm_id, struct cm_id_private, id);
3693 unsigned long flags;
3696 spin_lock_irqsave(&cm_id_priv->lock, flags);
3697 ret = cm_send_sidr_rep_locked(cm_id_priv, param);
3698 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3701 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3703 static void cm_format_sidr_rep_event(struct cm_work *work,
3704 const struct cm_id_private *cm_id_priv)
3706 struct cm_sidr_rep_msg *sidr_rep_msg;
3707 struct ib_cm_sidr_rep_event_param *param;
3709 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3710 work->mad_recv_wc->recv_buf.mad;
3711 param = &work->cm_event.param.sidr_rep_rcvd;
3712 param->status = IBA_GET(CM_SIDR_REP_STATUS, sidr_rep_msg);
3713 param->qkey = IBA_GET(CM_SIDR_REP_Q_KEY, sidr_rep_msg);
3714 param->qpn = IBA_GET(CM_SIDR_REP_QPN, sidr_rep_msg);
3715 param->info = IBA_GET_MEM_PTR(CM_SIDR_REP_ADDITIONAL_INFORMATION,
3717 param->info_len = IBA_GET(CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH,
3719 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3720 work->cm_event.private_data =
3721 IBA_GET_MEM_PTR(CM_SIDR_REP_PRIVATE_DATA, sidr_rep_msg);
3724 static int cm_sidr_rep_handler(struct cm_work *work)
3726 struct cm_sidr_rep_msg *sidr_rep_msg;
3727 struct cm_id_private *cm_id_priv;
3729 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3730 work->mad_recv_wc->recv_buf.mad;
3731 cm_id_priv = cm_acquire_id(
3732 cpu_to_be32(IBA_GET(CM_SIDR_REP_REQUESTID, sidr_rep_msg)), 0);
3734 return -EINVAL; /* Unmatched reply. */
3736 spin_lock_irq(&cm_id_priv->lock);
3737 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3738 spin_unlock_irq(&cm_id_priv->lock);
3741 cm_id_priv->id.state = IB_CM_IDLE;
3742 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3743 spin_unlock_irq(&cm_id_priv->lock);
3745 cm_format_sidr_rep_event(work, cm_id_priv);
3746 cm_process_work(cm_id_priv, work);
3749 cm_deref_id(cm_id_priv);
3753 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3754 enum ib_wc_status wc_status)
3756 struct cm_id_private *cm_id_priv;
3757 struct ib_cm_event cm_event;
3758 enum ib_cm_state state;
3761 memset(&cm_event, 0, sizeof cm_event);
3762 cm_id_priv = msg->context[0];
3764 /* Discard old sends or ones without a response. */
3765 spin_lock_irq(&cm_id_priv->lock);
3766 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3767 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3770 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3771 state, ib_wc_status_msg(wc_status));
3773 case IB_CM_REQ_SENT:
3774 case IB_CM_MRA_REQ_RCVD:
3775 cm_reset_to_idle(cm_id_priv);
3776 cm_event.event = IB_CM_REQ_ERROR;
3778 case IB_CM_REP_SENT:
3779 case IB_CM_MRA_REP_RCVD:
3780 cm_reset_to_idle(cm_id_priv);
3781 cm_event.event = IB_CM_REP_ERROR;
3783 case IB_CM_DREQ_SENT:
3784 cm_enter_timewait(cm_id_priv);
3785 cm_event.event = IB_CM_DREQ_ERROR;
3787 case IB_CM_SIDR_REQ_SENT:
3788 cm_id_priv->id.state = IB_CM_IDLE;
3789 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3794 spin_unlock_irq(&cm_id_priv->lock);
3795 cm_event.param.send_status = wc_status;
3797 /* No other events can occur on the cm_id at this point. */
3798 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3801 ib_destroy_cm_id(&cm_id_priv->id);
3804 spin_unlock_irq(&cm_id_priv->lock);
3808 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3809 struct ib_mad_send_wc *mad_send_wc)
3811 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3812 struct cm_port *port;
3815 port = mad_agent->context;
3816 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3817 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3820 * If the send was in response to a received message (context[0] is not
3821 * set to a cm_id), and is not a REJ, then it is a send that was
3824 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3827 atomic_long_add(1 + msg->retries,
3828 &port->counter_group[CM_XMIT].counter[attr_index]);
3830 atomic_long_add(msg->retries,
3831 &port->counter_group[CM_XMIT_RETRIES].
3832 counter[attr_index]);
3834 switch (mad_send_wc->status) {
3836 case IB_WC_WR_FLUSH_ERR:
3840 if (msg->context[0] && msg->context[1])
3841 cm_process_send_error(msg, mad_send_wc->status);
3848 static void cm_work_handler(struct work_struct *_work)
3850 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3853 switch (work->cm_event.event) {
3854 case IB_CM_REQ_RECEIVED:
3855 ret = cm_req_handler(work);
3857 case IB_CM_MRA_RECEIVED:
3858 ret = cm_mra_handler(work);
3860 case IB_CM_REJ_RECEIVED:
3861 ret = cm_rej_handler(work);
3863 case IB_CM_REP_RECEIVED:
3864 ret = cm_rep_handler(work);
3866 case IB_CM_RTU_RECEIVED:
3867 ret = cm_rtu_handler(work);
3869 case IB_CM_USER_ESTABLISHED:
3870 ret = cm_establish_handler(work);
3872 case IB_CM_DREQ_RECEIVED:
3873 ret = cm_dreq_handler(work);
3875 case IB_CM_DREP_RECEIVED:
3876 ret = cm_drep_handler(work);
3878 case IB_CM_SIDR_REQ_RECEIVED:
3879 ret = cm_sidr_req_handler(work);
3881 case IB_CM_SIDR_REP_RECEIVED:
3882 ret = cm_sidr_rep_handler(work);
3884 case IB_CM_LAP_RECEIVED:
3885 ret = cm_lap_handler(work);
3887 case IB_CM_APR_RECEIVED:
3888 ret = cm_apr_handler(work);
3890 case IB_CM_TIMEWAIT_EXIT:
3891 ret = cm_timewait_handler(work);
3894 pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
3902 static int cm_establish(struct ib_cm_id *cm_id)
3904 struct cm_id_private *cm_id_priv;
3905 struct cm_work *work;
3906 unsigned long flags;
3908 struct cm_device *cm_dev;
3910 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3914 work = kmalloc(sizeof *work, GFP_ATOMIC);
3918 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3919 spin_lock_irqsave(&cm_id_priv->lock, flags);
3920 switch (cm_id->state)
3922 case IB_CM_REP_SENT:
3923 case IB_CM_MRA_REP_RCVD:
3924 cm_id->state = IB_CM_ESTABLISHED;
3926 case IB_CM_ESTABLISHED:
3930 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
3931 be32_to_cpu(cm_id->local_id), cm_id->state);
3935 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3943 * The CM worker thread may try to destroy the cm_id before it
3944 * can execute this work item. To prevent potential deadlock,
3945 * we need to find the cm_id once we're in the context of the
3946 * worker thread, rather than holding a reference on it.
3948 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3949 work->local_id = cm_id->local_id;
3950 work->remote_id = cm_id->remote_id;
3951 work->mad_recv_wc = NULL;
3952 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3954 /* Check if the device started its remove_one */
3955 spin_lock_irqsave(&cm.lock, flags);
3956 if (!cm_dev->going_down) {
3957 queue_delayed_work(cm.wq, &work->work, 0);
3962 spin_unlock_irqrestore(&cm.lock, flags);
3968 static int cm_migrate(struct ib_cm_id *cm_id)
3970 struct cm_id_private *cm_id_priv;
3971 struct cm_av tmp_av;
3972 unsigned long flags;
3973 int tmp_send_port_not_ready;
3976 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3977 spin_lock_irqsave(&cm_id_priv->lock, flags);
3978 if (cm_id->state == IB_CM_ESTABLISHED &&
3979 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3980 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3981 cm_id->lap_state = IB_CM_LAP_IDLE;
3982 /* Swap address vector */
3983 tmp_av = cm_id_priv->av;
3984 cm_id_priv->av = cm_id_priv->alt_av;
3985 cm_id_priv->alt_av = tmp_av;
3986 /* Swap port send ready state */
3987 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3988 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3989 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3992 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3997 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
4002 case IB_EVENT_COMM_EST:
4003 ret = cm_establish(cm_id);
4005 case IB_EVENT_PATH_MIG:
4006 ret = cm_migrate(cm_id);
4013 EXPORT_SYMBOL(ib_cm_notify);
4015 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
4016 struct ib_mad_send_buf *send_buf,
4017 struct ib_mad_recv_wc *mad_recv_wc)
4019 struct cm_port *port = mad_agent->context;
4020 struct cm_work *work;
4021 enum ib_cm_event_type event;
4022 bool alt_path = false;
4027 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4028 case CM_REQ_ATTR_ID:
4029 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4030 mad_recv_wc->recv_buf.mad);
4031 paths = 1 + (alt_path != 0);
4032 event = IB_CM_REQ_RECEIVED;
4034 case CM_MRA_ATTR_ID:
4035 event = IB_CM_MRA_RECEIVED;
4037 case CM_REJ_ATTR_ID:
4038 event = IB_CM_REJ_RECEIVED;
4040 case CM_REP_ATTR_ID:
4041 event = IB_CM_REP_RECEIVED;
4043 case CM_RTU_ATTR_ID:
4044 event = IB_CM_RTU_RECEIVED;
4046 case CM_DREQ_ATTR_ID:
4047 event = IB_CM_DREQ_RECEIVED;
4049 case CM_DREP_ATTR_ID:
4050 event = IB_CM_DREP_RECEIVED;
4052 case CM_SIDR_REQ_ATTR_ID:
4053 event = IB_CM_SIDR_REQ_RECEIVED;
4055 case CM_SIDR_REP_ATTR_ID:
4056 event = IB_CM_SIDR_REP_RECEIVED;
4058 case CM_LAP_ATTR_ID:
4060 event = IB_CM_LAP_RECEIVED;
4062 case CM_APR_ATTR_ID:
4063 event = IB_CM_APR_RECEIVED;
4066 ib_free_recv_mad(mad_recv_wc);
4070 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4071 atomic_long_inc(&port->counter_group[CM_RECV].
4072 counter[attr_id - CM_ATTR_ID_OFFSET]);
4074 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4076 ib_free_recv_mad(mad_recv_wc);
4080 INIT_DELAYED_WORK(&work->work, cm_work_handler);
4081 work->cm_event.event = event;
4082 work->mad_recv_wc = mad_recv_wc;
4085 /* Check if the device started its remove_one */
4086 spin_lock_irq(&cm.lock);
4087 if (!port->cm_dev->going_down)
4088 queue_delayed_work(cm.wq, &work->work, 0);
4091 spin_unlock_irq(&cm.lock);
4095 ib_free_recv_mad(mad_recv_wc);
4099 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4100 struct ib_qp_attr *qp_attr,
4103 unsigned long flags;
4106 spin_lock_irqsave(&cm_id_priv->lock, flags);
4107 switch (cm_id_priv->id.state) {
4108 case IB_CM_REQ_SENT:
4109 case IB_CM_MRA_REQ_RCVD:
4110 case IB_CM_REQ_RCVD:
4111 case IB_CM_MRA_REQ_SENT:
4112 case IB_CM_REP_RCVD:
4113 case IB_CM_MRA_REP_SENT:
4114 case IB_CM_REP_SENT:
4115 case IB_CM_MRA_REP_RCVD:
4116 case IB_CM_ESTABLISHED:
4117 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4118 IB_QP_PKEY_INDEX | IB_QP_PORT;
4119 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4120 if (cm_id_priv->responder_resources)
4121 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4122 IB_ACCESS_REMOTE_ATOMIC;
4123 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4124 qp_attr->port_num = cm_id_priv->av.port->port_num;
4128 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4129 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4130 cm_id_priv->id.state);
4134 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4138 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4139 struct ib_qp_attr *qp_attr,
4142 unsigned long flags;
4145 spin_lock_irqsave(&cm_id_priv->lock, flags);
4146 switch (cm_id_priv->id.state) {
4147 case IB_CM_REQ_RCVD:
4148 case IB_CM_MRA_REQ_SENT:
4149 case IB_CM_REP_RCVD:
4150 case IB_CM_MRA_REP_SENT:
4151 case IB_CM_REP_SENT:
4152 case IB_CM_MRA_REP_RCVD:
4153 case IB_CM_ESTABLISHED:
4154 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4155 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4156 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4157 qp_attr->path_mtu = cm_id_priv->path_mtu;
4158 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4159 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4160 if (cm_id_priv->qp_type == IB_QPT_RC ||
4161 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4162 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4163 IB_QP_MIN_RNR_TIMER;
4164 qp_attr->max_dest_rd_atomic =
4165 cm_id_priv->responder_resources;
4166 qp_attr->min_rnr_timer = 0;
4168 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4169 *qp_attr_mask |= IB_QP_ALT_PATH;
4170 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4171 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4172 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4173 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4178 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4179 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4180 cm_id_priv->id.state);
4184 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4188 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4189 struct ib_qp_attr *qp_attr,
4192 unsigned long flags;
4195 spin_lock_irqsave(&cm_id_priv->lock, flags);
4196 switch (cm_id_priv->id.state) {
4197 /* Allow transition to RTS before sending REP */
4198 case IB_CM_REQ_RCVD:
4199 case IB_CM_MRA_REQ_SENT:
4201 case IB_CM_REP_RCVD:
4202 case IB_CM_MRA_REP_SENT:
4203 case IB_CM_REP_SENT:
4204 case IB_CM_MRA_REP_RCVD:
4205 case IB_CM_ESTABLISHED:
4206 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4207 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4208 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4209 switch (cm_id_priv->qp_type) {
4211 case IB_QPT_XRC_INI:
4212 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4213 IB_QP_MAX_QP_RD_ATOMIC;
4214 qp_attr->retry_cnt = cm_id_priv->retry_count;
4215 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4216 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4218 case IB_QPT_XRC_TGT:
4219 *qp_attr_mask |= IB_QP_TIMEOUT;
4220 qp_attr->timeout = cm_id_priv->av.timeout;
4225 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4226 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4227 qp_attr->path_mig_state = IB_MIG_REARM;
4230 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4231 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4232 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4233 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4234 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4235 qp_attr->path_mig_state = IB_MIG_REARM;
4240 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4241 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4242 cm_id_priv->id.state);
4246 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4250 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4251 struct ib_qp_attr *qp_attr,
4254 struct cm_id_private *cm_id_priv;
4257 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4258 switch (qp_attr->qp_state) {
4260 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4263 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4266 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4274 EXPORT_SYMBOL(ib_cm_init_qp_attr);
4276 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4279 struct cm_counter_group *group;
4280 struct cm_counter_attribute *cm_attr;
4282 group = container_of(obj, struct cm_counter_group, obj);
4283 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4285 return sprintf(buf, "%ld\n",
4286 atomic_long_read(&group->counter[cm_attr->index]));
4289 static const struct sysfs_ops cm_counter_ops = {
4290 .show = cm_show_counter
4293 static struct kobj_type cm_counter_obj_type = {
4294 .sysfs_ops = &cm_counter_ops,
4295 .default_attrs = cm_counter_default_attrs
4298 static char *cm_devnode(struct device *dev, umode_t *mode)
4302 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
4305 struct class cm_class = {
4306 .owner = THIS_MODULE,
4307 .name = "infiniband_cm",
4308 .devnode = cm_devnode,
4310 EXPORT_SYMBOL(cm_class);
4312 static int cm_create_port_fs(struct cm_port *port)
4316 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4317 ret = ib_port_register_module_stat(port->cm_dev->ib_device,
4319 &port->counter_group[i].obj,
4320 &cm_counter_obj_type,
4321 counter_group_names[i]);
4330 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4335 static void cm_remove_port_fs(struct cm_port *port)
4339 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4340 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4344 static int cm_add_one(struct ib_device *ib_device)
4346 struct cm_device *cm_dev;
4347 struct cm_port *port;
4348 struct ib_mad_reg_req reg_req = {
4349 .mgmt_class = IB_MGMT_CLASS_CM,
4350 .mgmt_class_version = IB_CM_CLASS_VERSION,
4352 struct ib_port_modify port_modify = {
4353 .set_port_cap_mask = IB_PORT_CM_SUP
4355 unsigned long flags;
4360 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4365 cm_dev->ib_device = ib_device;
4366 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4367 cm_dev->going_down = 0;
4369 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4370 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4371 if (!rdma_cap_ib_cm(ib_device, i))
4374 port = kzalloc(sizeof *port, GFP_KERNEL);
4380 cm_dev->port[i-1] = port;
4381 port->cm_dev = cm_dev;
4384 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4385 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4387 ret = cm_create_port_fs(port);
4391 port->mad_agent = ib_register_mad_agent(ib_device, i,
4399 if (IS_ERR(port->mad_agent)) {
4400 ret = PTR_ERR(port->mad_agent);
4404 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4416 ib_set_client_data(ib_device, &cm_client, cm_dev);
4418 write_lock_irqsave(&cm.device_lock, flags);
4419 list_add_tail(&cm_dev->list, &cm.device_list);
4420 write_unlock_irqrestore(&cm.device_lock, flags);
4424 ib_unregister_mad_agent(port->mad_agent);
4426 cm_remove_port_fs(port);
4428 port_modify.set_port_cap_mask = 0;
4429 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4432 if (!rdma_cap_ib_cm(ib_device, i))
4435 port = cm_dev->port[i-1];
4436 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4437 ib_unregister_mad_agent(port->mad_agent);
4438 cm_remove_port_fs(port);
4446 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4448 struct cm_device *cm_dev = client_data;
4449 struct cm_port *port;
4450 struct cm_id_private *cm_id_priv;
4451 struct ib_mad_agent *cur_mad_agent;
4452 struct ib_port_modify port_modify = {
4453 .clr_port_cap_mask = IB_PORT_CM_SUP
4455 unsigned long flags;
4458 write_lock_irqsave(&cm.device_lock, flags);
4459 list_del(&cm_dev->list);
4460 write_unlock_irqrestore(&cm.device_lock, flags);
4462 spin_lock_irq(&cm.lock);
4463 cm_dev->going_down = 1;
4464 spin_unlock_irq(&cm.lock);
4466 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4467 if (!rdma_cap_ib_cm(ib_device, i))
4470 port = cm_dev->port[i-1];
4471 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4472 /* Mark all the cm_id's as not valid */
4473 spin_lock_irq(&cm.lock);
4474 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4475 cm_id_priv->altr_send_port_not_ready = 1;
4476 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4477 cm_id_priv->prim_send_port_not_ready = 1;
4478 spin_unlock_irq(&cm.lock);
4480 * We flush the queue here after the going_down set, this
4481 * verify that no new works will be queued in the recv handler,
4482 * after that we can call the unregister_mad_agent
4484 flush_workqueue(cm.wq);
4485 spin_lock_irq(&cm.state_lock);
4486 cur_mad_agent = port->mad_agent;
4487 port->mad_agent = NULL;
4488 spin_unlock_irq(&cm.state_lock);
4489 ib_unregister_mad_agent(cur_mad_agent);
4490 cm_remove_port_fs(port);
4497 static int __init ib_cm_init(void)
4501 INIT_LIST_HEAD(&cm.device_list);
4502 rwlock_init(&cm.device_lock);
4503 spin_lock_init(&cm.lock);
4504 spin_lock_init(&cm.state_lock);
4505 cm.listen_service_table = RB_ROOT;
4506 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4507 cm.remote_id_table = RB_ROOT;
4508 cm.remote_qp_table = RB_ROOT;
4509 cm.remote_sidr_table = RB_ROOT;
4510 xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
4511 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4512 INIT_LIST_HEAD(&cm.timewait_list);
4514 ret = class_register(&cm_class);
4520 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4526 ret = ib_register_client(&cm_client);
4532 destroy_workqueue(cm.wq);
4534 class_unregister(&cm_class);
4539 static void __exit ib_cm_cleanup(void)
4541 struct cm_timewait_info *timewait_info, *tmp;
4543 spin_lock_irq(&cm.lock);
4544 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4545 cancel_delayed_work(&timewait_info->work.work);
4546 spin_unlock_irq(&cm.lock);
4548 ib_unregister_client(&cm_client);
4549 destroy_workqueue(cm.wq);
4551 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4552 list_del(&timewait_info->list);
4553 kfree(timewait_info);
4556 class_unregister(&cm_class);
4557 WARN_ON(!xa_empty(&cm.local_id_table));
4560 module_init(ib_cm_init);
4561 module_exit(ib_cm_cleanup);