2 * Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/err.h>
41 #include <linux/idr.h>
42 #include <linux/interrupt.h>
43 #include <linux/random.h>
44 #include <linux/rbtree.h>
45 #include <linux/spinlock.h>
46 #include <linux/slab.h>
47 #include <linux/sysfs.h>
48 #include <linux/workqueue.h>
49 #include <linux/kdev_t.h>
50 #include <linux/etherdevice.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_cm.h>
55 #include "core_priv.h"
57 MODULE_AUTHOR("Sean Hefty");
58 MODULE_DESCRIPTION("InfiniBand CM");
59 MODULE_LICENSE("Dual BSD/GPL");
61 static const char * const ibcm_rej_reason_strs[] = {
62 [IB_CM_REJ_NO_QP] = "no QP",
63 [IB_CM_REJ_NO_EEC] = "no EEC",
64 [IB_CM_REJ_NO_RESOURCES] = "no resources",
65 [IB_CM_REJ_TIMEOUT] = "timeout",
66 [IB_CM_REJ_UNSUPPORTED] = "unsupported",
67 [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID",
68 [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance",
69 [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID",
70 [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type",
71 [IB_CM_REJ_STALE_CONN] = "stale conn",
72 [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist",
73 [IB_CM_REJ_INVALID_GID] = "invalid GID",
74 [IB_CM_REJ_INVALID_LID] = "invalid LID",
75 [IB_CM_REJ_INVALID_SL] = "invalid SL",
76 [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class",
77 [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit",
78 [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate",
79 [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID",
80 [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID",
81 [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL",
82 [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class",
83 [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit",
84 [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate",
85 [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect",
86 [IB_CM_REJ_PORT_REDIRECT] = "port redirect",
87 [IB_CM_REJ_INVALID_MTU] = "invalid MTU",
88 [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources",
89 [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined",
90 [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry",
91 [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID",
92 [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version",
93 [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label",
94 [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label",
97 const char *__attribute_const__ ibcm_reject_msg(int reason)
99 size_t index = reason;
101 if (index < ARRAY_SIZE(ibcm_rej_reason_strs) &&
102 ibcm_rej_reason_strs[index])
103 return ibcm_rej_reason_strs[index];
105 return "unrecognized reason";
107 EXPORT_SYMBOL(ibcm_reject_msg);
109 static void cm_add_one(struct ib_device *device);
110 static void cm_remove_one(struct ib_device *device, void *client_data);
112 static struct ib_client cm_client = {
115 .remove = cm_remove_one
118 static struct ib_cm {
120 struct list_head device_list;
121 rwlock_t device_lock;
122 struct rb_root listen_service_table;
123 u64 listen_service_id;
124 /* struct rb_root peer_service_table; todo: fix peer to peer */
125 struct rb_root remote_qp_table;
126 struct rb_root remote_id_table;
127 struct rb_root remote_sidr_table;
128 struct xarray local_id_table;
130 __be32 random_id_operand;
131 struct list_head timewait_list;
132 struct workqueue_struct *wq;
133 /* Sync on cm change port state */
134 spinlock_t state_lock;
137 /* Counter indexes ordered by attribute ID */
151 CM_ATTR_ID_OFFSET = 0x0010,
162 static char const counter_group_names[CM_COUNTER_GROUPS]
163 [sizeof("cm_rx_duplicates")] = {
164 "cm_tx_msgs", "cm_tx_retries",
165 "cm_rx_msgs", "cm_rx_duplicates"
168 struct cm_counter_group {
170 atomic_long_t counter[CM_ATTR_COUNT];
173 struct cm_counter_attribute {
174 struct attribute attr;
178 #define CM_COUNTER_ATTR(_name, _index) \
179 struct cm_counter_attribute cm_##_name##_counter_attr = { \
180 .attr = { .name = __stringify(_name), .mode = 0444 }, \
184 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
185 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
186 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
187 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
188 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
189 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
190 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
191 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
192 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
193 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
194 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
196 static struct attribute *cm_counter_default_attrs[] = {
197 &cm_req_counter_attr.attr,
198 &cm_mra_counter_attr.attr,
199 &cm_rej_counter_attr.attr,
200 &cm_rep_counter_attr.attr,
201 &cm_rtu_counter_attr.attr,
202 &cm_dreq_counter_attr.attr,
203 &cm_drep_counter_attr.attr,
204 &cm_sidr_req_counter_attr.attr,
205 &cm_sidr_rep_counter_attr.attr,
206 &cm_lap_counter_attr.attr,
207 &cm_apr_counter_attr.attr,
212 struct cm_device *cm_dev;
213 struct ib_mad_agent *mad_agent;
214 struct kobject port_obj;
216 struct list_head cm_priv_prim_list;
217 struct list_head cm_priv_altr_list;
218 struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
222 struct list_head list;
223 struct ib_device *ib_device;
226 struct cm_port *port[0];
230 struct cm_port *port;
232 struct rdma_ah_attr ah_attr;
238 struct delayed_work work;
239 struct list_head list;
240 struct cm_port *port;
241 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
242 __be32 local_id; /* Established / timewait */
244 struct ib_cm_event cm_event;
245 struct sa_path_rec path[0];
248 struct cm_timewait_info {
249 struct cm_work work; /* Must be first. */
250 struct list_head list;
251 struct rb_node remote_qp_node;
252 struct rb_node remote_id_node;
253 __be64 remote_ca_guid;
255 u8 inserted_remote_qp;
256 u8 inserted_remote_id;
259 struct cm_id_private {
262 struct rb_node service_node;
263 struct rb_node sidr_id_node;
264 spinlock_t lock; /* Do not acquire inside cm.lock */
265 struct completion comp;
267 /* Number of clients sharing this ib_cm_id. Only valid for listeners.
268 * Protected by the cm.lock spinlock. */
269 int listen_sharecount;
271 struct ib_mad_send_buf *msg;
272 struct cm_timewait_info *timewait_info;
273 /* todo: use alternate port on send failure */
281 enum ib_qp_type qp_type;
285 enum ib_mtu path_mtu;
290 u8 responder_resources;
297 struct list_head prim_list;
298 struct list_head altr_list;
299 /* Indicates that the send port mad is registered and av is set */
300 int prim_send_port_not_ready;
301 int altr_send_port_not_ready;
303 struct list_head work_list;
307 static void cm_work_handler(struct work_struct *work);
309 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
311 if (atomic_dec_and_test(&cm_id_priv->refcount))
312 complete(&cm_id_priv->comp);
315 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
316 struct ib_mad_send_buf **msg)
318 struct ib_mad_agent *mad_agent;
319 struct ib_mad_send_buf *m;
322 unsigned long flags, flags2;
325 /* don't let the port to be released till the agent is down */
326 spin_lock_irqsave(&cm.state_lock, flags2);
327 spin_lock_irqsave(&cm.lock, flags);
328 if (!cm_id_priv->prim_send_port_not_ready)
329 av = &cm_id_priv->av;
330 else if (!cm_id_priv->altr_send_port_not_ready &&
331 (cm_id_priv->alt_av.port))
332 av = &cm_id_priv->alt_av;
334 pr_info("%s: not valid CM id\n", __func__);
336 spin_unlock_irqrestore(&cm.lock, flags);
339 spin_unlock_irqrestore(&cm.lock, flags);
340 /* Make sure the port haven't released the mad yet */
341 mad_agent = cm_id_priv->av.port->mad_agent;
343 pr_info("%s: not a valid MAD agent\n", __func__);
347 ah = rdma_create_ah(mad_agent->qp->pd, &av->ah_attr, 0);
353 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
355 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
357 IB_MGMT_BASE_VERSION);
359 rdma_destroy_ah(ah, 0);
364 /* Timeout set by caller if response is expected. */
366 m->retries = cm_id_priv->max_cm_retries;
368 atomic_inc(&cm_id_priv->refcount);
369 m->context[0] = cm_id_priv;
373 spin_unlock_irqrestore(&cm.state_lock, flags2);
377 static struct ib_mad_send_buf *cm_alloc_response_msg_no_ah(struct cm_port *port,
378 struct ib_mad_recv_wc *mad_recv_wc)
380 return ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
381 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
383 IB_MGMT_BASE_VERSION);
386 static int cm_create_response_msg_ah(struct cm_port *port,
387 struct ib_mad_recv_wc *mad_recv_wc,
388 struct ib_mad_send_buf *msg)
392 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
393 mad_recv_wc->recv_buf.grh, port->port_num);
401 static void cm_free_msg(struct ib_mad_send_buf *msg)
404 rdma_destroy_ah(msg->ah, 0);
406 cm_deref_id(msg->context[0]);
407 ib_free_send_mad(msg);
410 static int cm_alloc_response_msg(struct cm_port *port,
411 struct ib_mad_recv_wc *mad_recv_wc,
412 struct ib_mad_send_buf **msg)
414 struct ib_mad_send_buf *m;
417 m = cm_alloc_response_msg_no_ah(port, mad_recv_wc);
421 ret = cm_create_response_msg_ah(port, mad_recv_wc, m);
431 static void * cm_copy_private_data(const void *private_data,
436 if (!private_data || !private_data_len)
439 data = kmemdup(private_data, private_data_len, GFP_KERNEL);
441 return ERR_PTR(-ENOMEM);
446 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
447 void *private_data, u8 private_data_len)
449 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
450 kfree(cm_id_priv->private_data);
452 cm_id_priv->private_data = private_data;
453 cm_id_priv->private_data_len = private_data_len;
456 static int cm_init_av_for_lap(struct cm_port *port, struct ib_wc *wc,
457 struct ib_grh *grh, struct cm_av *av)
459 struct rdma_ah_attr new_ah_attr;
463 av->pkey_index = wc->pkey_index;
466 * av->ah_attr might be initialized based on past wc during incoming
467 * connect request or while sending out connect request. So initialize
468 * a new ah_attr on stack. If initialization fails, old ah_attr is
469 * used for sending any responses. If initialization is successful,
470 * than new ah_attr is used by overwriting old one.
472 ret = ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
478 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
482 static int cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
483 struct ib_grh *grh, struct cm_av *av)
486 av->pkey_index = wc->pkey_index;
487 return ib_init_ah_attr_from_wc(port->cm_dev->ib_device,
492 static int add_cm_id_to_port_list(struct cm_id_private *cm_id_priv,
494 struct cm_port *port)
499 spin_lock_irqsave(&cm.lock, flags);
501 if (&cm_id_priv->av == av)
502 list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list);
503 else if (&cm_id_priv->alt_av == av)
504 list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list);
508 spin_unlock_irqrestore(&cm.lock, flags);
512 static struct cm_port *
513 get_cm_port_from_path(struct sa_path_rec *path, const struct ib_gid_attr *attr)
515 struct cm_device *cm_dev;
516 struct cm_port *port = NULL;
520 read_lock_irqsave(&cm.device_lock, flags);
521 list_for_each_entry(cm_dev, &cm.device_list, list) {
522 if (cm_dev->ib_device == attr->device) {
523 port = cm_dev->port[attr->port_num - 1];
527 read_unlock_irqrestore(&cm.device_lock, flags);
529 /* SGID attribute can be NULL in following
531 * (a) Alternative path
532 * (b) IB link layer without GRH
533 * (c) LAP send messages
535 read_lock_irqsave(&cm.device_lock, flags);
536 list_for_each_entry(cm_dev, &cm.device_list, list) {
537 attr = rdma_find_gid(cm_dev->ib_device,
539 sa_conv_pathrec_to_gid_type(path),
542 port = cm_dev->port[attr->port_num - 1];
546 read_unlock_irqrestore(&cm.device_lock, flags);
548 rdma_put_gid_attr(attr);
553 static int cm_init_av_by_path(struct sa_path_rec *path,
554 const struct ib_gid_attr *sgid_attr,
556 struct cm_id_private *cm_id_priv)
558 struct rdma_ah_attr new_ah_attr;
559 struct cm_device *cm_dev;
560 struct cm_port *port;
563 port = get_cm_port_from_path(path, sgid_attr);
566 cm_dev = port->cm_dev;
568 ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
569 be16_to_cpu(path->pkey), &av->pkey_index);
576 * av->ah_attr might be initialized based on wc or during
577 * request processing time which might have reference to sgid_attr.
578 * So initialize a new ah_attr on stack.
579 * If initialization fails, old ah_attr is used for sending any
580 * responses. If initialization is successful, than new ah_attr
581 * is used by overwriting the old one. So that right ah_attr
582 * can be used to return an error response.
584 ret = ib_init_ah_attr_from_path(cm_dev->ib_device, port->port_num, path,
585 &new_ah_attr, sgid_attr);
589 av->timeout = path->packet_life_time + 1;
591 ret = add_cm_id_to_port_list(cm_id_priv, av, port);
593 rdma_destroy_ah_attr(&new_ah_attr);
596 rdma_move_ah_attr(&av->ah_attr, &new_ah_attr);
600 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
605 err = xa_alloc_cyclic_irq(&cm.local_id_table, &id, cm_id_priv,
606 xa_limit_32b, &cm.local_id_next, GFP_KERNEL);
608 cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
612 static u32 cm_local_id(__be32 local_id)
614 return (__force u32) (local_id ^ cm.random_id_operand);
617 static void cm_free_id(__be32 local_id)
619 xa_erase_irq(&cm.local_id_table, cm_local_id(local_id));
622 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
624 struct cm_id_private *cm_id_priv;
626 cm_id_priv = xa_load(&cm.local_id_table, cm_local_id(local_id));
628 if (cm_id_priv->id.remote_id == remote_id)
629 atomic_inc(&cm_id_priv->refcount);
637 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
639 struct cm_id_private *cm_id_priv;
641 spin_lock_irq(&cm.lock);
642 cm_id_priv = cm_get_id(local_id, remote_id);
643 spin_unlock_irq(&cm.lock);
649 * Trivial helpers to strip endian annotation and compare; the
650 * endianness doesn't actually matter since we just need a stable
651 * order for the RB tree.
653 static int be32_lt(__be32 a, __be32 b)
655 return (__force u32) a < (__force u32) b;
658 static int be32_gt(__be32 a, __be32 b)
660 return (__force u32) a > (__force u32) b;
663 static int be64_lt(__be64 a, __be64 b)
665 return (__force u64) a < (__force u64) b;
668 static int be64_gt(__be64 a, __be64 b)
670 return (__force u64) a > (__force u64) b;
673 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
675 struct rb_node **link = &cm.listen_service_table.rb_node;
676 struct rb_node *parent = NULL;
677 struct cm_id_private *cur_cm_id_priv;
678 __be64 service_id = cm_id_priv->id.service_id;
679 __be64 service_mask = cm_id_priv->id.service_mask;
683 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
685 if ((cur_cm_id_priv->id.service_mask & service_id) ==
686 (service_mask & cur_cm_id_priv->id.service_id) &&
687 (cm_id_priv->id.device == cur_cm_id_priv->id.device))
688 return cur_cm_id_priv;
690 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
691 link = &(*link)->rb_left;
692 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
693 link = &(*link)->rb_right;
694 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
695 link = &(*link)->rb_left;
696 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
697 link = &(*link)->rb_right;
699 link = &(*link)->rb_right;
701 rb_link_node(&cm_id_priv->service_node, parent, link);
702 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
706 static struct cm_id_private * cm_find_listen(struct ib_device *device,
709 struct rb_node *node = cm.listen_service_table.rb_node;
710 struct cm_id_private *cm_id_priv;
713 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
714 if ((cm_id_priv->id.service_mask & service_id) ==
715 cm_id_priv->id.service_id &&
716 (cm_id_priv->id.device == device))
719 if (device < cm_id_priv->id.device)
720 node = node->rb_left;
721 else if (device > cm_id_priv->id.device)
722 node = node->rb_right;
723 else if (be64_lt(service_id, cm_id_priv->id.service_id))
724 node = node->rb_left;
725 else if (be64_gt(service_id, cm_id_priv->id.service_id))
726 node = node->rb_right;
728 node = node->rb_right;
733 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
736 struct rb_node **link = &cm.remote_id_table.rb_node;
737 struct rb_node *parent = NULL;
738 struct cm_timewait_info *cur_timewait_info;
739 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
740 __be32 remote_id = timewait_info->work.remote_id;
744 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
746 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
747 link = &(*link)->rb_left;
748 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
749 link = &(*link)->rb_right;
750 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
751 link = &(*link)->rb_left;
752 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
753 link = &(*link)->rb_right;
755 return cur_timewait_info;
757 timewait_info->inserted_remote_id = 1;
758 rb_link_node(&timewait_info->remote_id_node, parent, link);
759 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
763 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
766 struct rb_node *node = cm.remote_id_table.rb_node;
767 struct cm_timewait_info *timewait_info;
770 timewait_info = rb_entry(node, struct cm_timewait_info,
772 if (be32_lt(remote_id, timewait_info->work.remote_id))
773 node = node->rb_left;
774 else if (be32_gt(remote_id, timewait_info->work.remote_id))
775 node = node->rb_right;
776 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
777 node = node->rb_left;
778 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
779 node = node->rb_right;
781 return timewait_info;
786 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
789 struct rb_node **link = &cm.remote_qp_table.rb_node;
790 struct rb_node *parent = NULL;
791 struct cm_timewait_info *cur_timewait_info;
792 __be64 remote_ca_guid = timewait_info->remote_ca_guid;
793 __be32 remote_qpn = timewait_info->remote_qpn;
797 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
799 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
800 link = &(*link)->rb_left;
801 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
802 link = &(*link)->rb_right;
803 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
804 link = &(*link)->rb_left;
805 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
806 link = &(*link)->rb_right;
808 return cur_timewait_info;
810 timewait_info->inserted_remote_qp = 1;
811 rb_link_node(&timewait_info->remote_qp_node, parent, link);
812 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
816 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
819 struct rb_node **link = &cm.remote_sidr_table.rb_node;
820 struct rb_node *parent = NULL;
821 struct cm_id_private *cur_cm_id_priv;
822 union ib_gid *port_gid = &cm_id_priv->av.dgid;
823 __be32 remote_id = cm_id_priv->id.remote_id;
827 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
829 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
830 link = &(*link)->rb_left;
831 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
832 link = &(*link)->rb_right;
835 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
838 link = &(*link)->rb_left;
840 link = &(*link)->rb_right;
842 return cur_cm_id_priv;
845 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
846 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
850 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
851 enum ib_cm_sidr_status status)
853 struct ib_cm_sidr_rep_param param;
855 memset(¶m, 0, sizeof param);
856 param.status = status;
857 ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
860 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
861 ib_cm_handler cm_handler,
864 struct cm_id_private *cm_id_priv;
867 cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
869 return ERR_PTR(-ENOMEM);
871 cm_id_priv->id.state = IB_CM_IDLE;
872 cm_id_priv->id.device = device;
873 cm_id_priv->id.cm_handler = cm_handler;
874 cm_id_priv->id.context = context;
875 cm_id_priv->id.remote_cm_qpn = 1;
876 ret = cm_alloc_id(cm_id_priv);
880 spin_lock_init(&cm_id_priv->lock);
881 init_completion(&cm_id_priv->comp);
882 INIT_LIST_HEAD(&cm_id_priv->work_list);
883 INIT_LIST_HEAD(&cm_id_priv->prim_list);
884 INIT_LIST_HEAD(&cm_id_priv->altr_list);
885 atomic_set(&cm_id_priv->work_count, -1);
886 atomic_set(&cm_id_priv->refcount, 1);
887 return &cm_id_priv->id;
891 return ERR_PTR(-ENOMEM);
893 EXPORT_SYMBOL(ib_create_cm_id);
895 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
897 struct cm_work *work;
899 if (list_empty(&cm_id_priv->work_list))
902 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
903 list_del(&work->list);
907 static void cm_free_work(struct cm_work *work)
909 if (work->mad_recv_wc)
910 ib_free_recv_mad(work->mad_recv_wc);
914 static inline int cm_convert_to_ms(int iba_time)
916 /* approximate conversion to ms from 4.096us x 2^iba_time */
917 return 1 << max(iba_time - 8, 0);
921 * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
922 * Because of how ack_timeout is stored, adding one doubles the timeout.
923 * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
924 * increment it (round up) only if the other is within 50%.
926 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
928 int ack_timeout = packet_life_time + 1;
930 if (ack_timeout >= ca_ack_delay)
931 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
933 ack_timeout = ca_ack_delay +
934 (ack_timeout >= (ca_ack_delay - 1));
936 return min(31, ack_timeout);
939 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
941 if (timewait_info->inserted_remote_id) {
942 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
943 timewait_info->inserted_remote_id = 0;
946 if (timewait_info->inserted_remote_qp) {
947 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
948 timewait_info->inserted_remote_qp = 0;
952 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
954 struct cm_timewait_info *timewait_info;
956 timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
958 return ERR_PTR(-ENOMEM);
960 timewait_info->work.local_id = local_id;
961 INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
962 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
963 return timewait_info;
966 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
970 struct cm_device *cm_dev;
972 cm_dev = ib_get_client_data(cm_id_priv->id.device, &cm_client);
976 spin_lock_irqsave(&cm.lock, flags);
977 cm_cleanup_timewait(cm_id_priv->timewait_info);
978 list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
979 spin_unlock_irqrestore(&cm.lock, flags);
982 * The cm_id could be destroyed by the user before we exit timewait.
983 * To protect against this, we search for the cm_id after exiting
984 * timewait before notifying the user that we've exited timewait.
986 cm_id_priv->id.state = IB_CM_TIMEWAIT;
987 wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
989 /* Check if the device started its remove_one */
990 spin_lock_irqsave(&cm.lock, flags);
991 if (!cm_dev->going_down)
992 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
993 msecs_to_jiffies(wait_time));
994 spin_unlock_irqrestore(&cm.lock, flags);
996 cm_id_priv->timewait_info = NULL;
999 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
1001 unsigned long flags;
1003 cm_id_priv->id.state = IB_CM_IDLE;
1004 if (cm_id_priv->timewait_info) {
1005 spin_lock_irqsave(&cm.lock, flags);
1006 cm_cleanup_timewait(cm_id_priv->timewait_info);
1007 spin_unlock_irqrestore(&cm.lock, flags);
1008 kfree(cm_id_priv->timewait_info);
1009 cm_id_priv->timewait_info = NULL;
1013 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
1015 struct cm_id_private *cm_id_priv;
1016 struct cm_work *work;
1018 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1020 spin_lock_irq(&cm_id_priv->lock);
1021 switch (cm_id->state) {
1023 spin_unlock_irq(&cm_id_priv->lock);
1025 spin_lock_irq(&cm.lock);
1026 if (--cm_id_priv->listen_sharecount > 0) {
1027 /* The id is still shared. */
1028 cm_deref_id(cm_id_priv);
1029 spin_unlock_irq(&cm.lock);
1032 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
1033 spin_unlock_irq(&cm.lock);
1035 case IB_CM_SIDR_REQ_SENT:
1036 cm_id->state = IB_CM_IDLE;
1037 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1038 spin_unlock_irq(&cm_id_priv->lock);
1040 case IB_CM_SIDR_REQ_RCVD:
1041 spin_unlock_irq(&cm_id_priv->lock);
1042 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
1043 spin_lock_irq(&cm.lock);
1044 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
1045 rb_erase(&cm_id_priv->sidr_id_node,
1046 &cm.remote_sidr_table);
1047 spin_unlock_irq(&cm.lock);
1049 case IB_CM_REQ_SENT:
1050 case IB_CM_MRA_REQ_RCVD:
1051 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1052 spin_unlock_irq(&cm_id_priv->lock);
1053 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
1054 &cm_id_priv->id.device->node_guid,
1055 sizeof cm_id_priv->id.device->node_guid,
1058 case IB_CM_REQ_RCVD:
1059 if (err == -ENOMEM) {
1060 /* Do not reject to allow future retries. */
1061 cm_reset_to_idle(cm_id_priv);
1062 spin_unlock_irq(&cm_id_priv->lock);
1064 spin_unlock_irq(&cm_id_priv->lock);
1065 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1069 case IB_CM_REP_SENT:
1070 case IB_CM_MRA_REP_RCVD:
1071 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1073 case IB_CM_MRA_REQ_SENT:
1074 case IB_CM_REP_RCVD:
1075 case IB_CM_MRA_REP_SENT:
1076 spin_unlock_irq(&cm_id_priv->lock);
1077 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1080 case IB_CM_ESTABLISHED:
1081 spin_unlock_irq(&cm_id_priv->lock);
1082 if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
1084 ib_send_cm_dreq(cm_id, NULL, 0);
1086 case IB_CM_DREQ_SENT:
1087 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1088 cm_enter_timewait(cm_id_priv);
1089 spin_unlock_irq(&cm_id_priv->lock);
1091 case IB_CM_DREQ_RCVD:
1092 spin_unlock_irq(&cm_id_priv->lock);
1093 ib_send_cm_drep(cm_id, NULL, 0);
1096 spin_unlock_irq(&cm_id_priv->lock);
1100 spin_lock_irq(&cm.lock);
1101 if (!list_empty(&cm_id_priv->altr_list) &&
1102 (!cm_id_priv->altr_send_port_not_ready))
1103 list_del(&cm_id_priv->altr_list);
1104 if (!list_empty(&cm_id_priv->prim_list) &&
1105 (!cm_id_priv->prim_send_port_not_ready))
1106 list_del(&cm_id_priv->prim_list);
1107 spin_unlock_irq(&cm.lock);
1109 cm_free_id(cm_id->local_id);
1110 cm_deref_id(cm_id_priv);
1111 wait_for_completion(&cm_id_priv->comp);
1112 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
1115 rdma_destroy_ah_attr(&cm_id_priv->av.ah_attr);
1116 rdma_destroy_ah_attr(&cm_id_priv->alt_av.ah_attr);
1117 kfree(cm_id_priv->private_data);
1121 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
1123 cm_destroy_id(cm_id, 0);
1125 EXPORT_SYMBOL(ib_destroy_cm_id);
1128 * __ib_cm_listen - Initiates listening on the specified service ID for
1129 * connection and service ID resolution requests.
1130 * @cm_id: Connection identifier associated with the listen request.
1131 * @service_id: Service identifier matched against incoming connection
1132 * and service ID resolution requests. The service ID should be specified
1133 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1134 * assign a service ID to the caller.
1135 * @service_mask: Mask applied to service ID used to listen across a
1136 * range of service IDs. If set to 0, the service ID is matched
1137 * exactly. This parameter is ignored if %service_id is set to
1138 * IB_CM_ASSIGN_SERVICE_ID.
1140 static int __ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
1141 __be64 service_mask)
1143 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
1146 service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
1147 service_id &= service_mask;
1148 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
1149 (service_id != IB_CM_ASSIGN_SERVICE_ID))
1152 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1153 if (cm_id->state != IB_CM_IDLE)
1156 cm_id->state = IB_CM_LISTEN;
1157 ++cm_id_priv->listen_sharecount;
1159 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
1160 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
1161 cm_id->service_mask = ~cpu_to_be64(0);
1163 cm_id->service_id = service_id;
1164 cm_id->service_mask = service_mask;
1166 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
1168 if (cur_cm_id_priv) {
1169 cm_id->state = IB_CM_IDLE;
1170 --cm_id_priv->listen_sharecount;
1176 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask)
1178 unsigned long flags;
1181 spin_lock_irqsave(&cm.lock, flags);
1182 ret = __ib_cm_listen(cm_id, service_id, service_mask);
1183 spin_unlock_irqrestore(&cm.lock, flags);
1187 EXPORT_SYMBOL(ib_cm_listen);
1190 * Create a new listening ib_cm_id and listen on the given service ID.
1192 * If there's an existing ID listening on that same device and service ID,
1195 * @device: Device associated with the cm_id. All related communication will
1196 * be associated with the specified device.
1197 * @cm_handler: Callback invoked to notify the user of CM events.
1198 * @service_id: Service identifier matched against incoming connection
1199 * and service ID resolution requests. The service ID should be specified
1200 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
1201 * assign a service ID to the caller.
1203 * Callers should call ib_destroy_cm_id when done with the listener ID.
1205 struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
1206 ib_cm_handler cm_handler,
1209 struct cm_id_private *cm_id_priv;
1210 struct ib_cm_id *cm_id;
1211 unsigned long flags;
1214 /* Create an ID in advance, since the creation may sleep */
1215 cm_id = ib_create_cm_id(device, cm_handler, NULL);
1219 spin_lock_irqsave(&cm.lock, flags);
1221 if (service_id == IB_CM_ASSIGN_SERVICE_ID)
1224 /* Find an existing ID */
1225 cm_id_priv = cm_find_listen(device, service_id);
1227 if (cm_id->cm_handler != cm_handler || cm_id->context) {
1228 /* Sharing an ib_cm_id with different handlers is not
1230 spin_unlock_irqrestore(&cm.lock, flags);
1231 return ERR_PTR(-EINVAL);
1233 atomic_inc(&cm_id_priv->refcount);
1234 ++cm_id_priv->listen_sharecount;
1235 spin_unlock_irqrestore(&cm.lock, flags);
1237 ib_destroy_cm_id(cm_id);
1238 cm_id = &cm_id_priv->id;
1243 /* Use newly created ID */
1244 err = __ib_cm_listen(cm_id, service_id, 0);
1246 spin_unlock_irqrestore(&cm.lock, flags);
1249 ib_destroy_cm_id(cm_id);
1250 return ERR_PTR(err);
1254 EXPORT_SYMBOL(ib_cm_insert_listen);
1256 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv)
1258 u64 hi_tid, low_tid;
1260 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
1261 low_tid = (u64)cm_id_priv->id.local_id;
1262 return cpu_to_be64(hi_tid | low_tid);
1265 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
1266 __be16 attr_id, __be64 tid)
1268 hdr->base_version = IB_MGMT_BASE_VERSION;
1269 hdr->mgmt_class = IB_MGMT_CLASS_CM;
1270 hdr->class_version = IB_CM_CLASS_VERSION;
1271 hdr->method = IB_MGMT_METHOD_SEND;
1272 hdr->attr_id = attr_id;
1276 static void cm_format_req(struct cm_req_msg *req_msg,
1277 struct cm_id_private *cm_id_priv,
1278 struct ib_cm_req_param *param)
1280 struct sa_path_rec *pri_path = param->primary_path;
1281 struct sa_path_rec *alt_path = param->alternate_path;
1282 bool pri_ext = false;
1284 if (pri_path->rec_type == SA_PATH_REC_TYPE_OPA)
1285 pri_ext = opa_is_extended_lid(pri_path->opa.dlid,
1286 pri_path->opa.slid);
1288 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1289 cm_form_tid(cm_id_priv));
1291 req_msg->local_comm_id = cm_id_priv->id.local_id;
1292 req_msg->service_id = param->service_id;
1293 req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1294 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1295 cm_req_set_init_depth(req_msg, param->initiator_depth);
1296 cm_req_set_remote_resp_timeout(req_msg,
1297 param->remote_cm_response_timeout);
1298 cm_req_set_qp_type(req_msg, param->qp_type);
1299 cm_req_set_flow_ctrl(req_msg, param->flow_control);
1300 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1301 cm_req_set_local_resp_timeout(req_msg,
1302 param->local_cm_response_timeout);
1303 req_msg->pkey = param->primary_path->pkey;
1304 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1305 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1307 if (param->qp_type != IB_QPT_XRC_INI) {
1308 cm_req_set_resp_res(req_msg, param->responder_resources);
1309 cm_req_set_retry_count(req_msg, param->retry_count);
1310 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1311 cm_req_set_srq(req_msg, param->srq);
1314 req_msg->primary_local_gid = pri_path->sgid;
1315 req_msg->primary_remote_gid = pri_path->dgid;
1317 req_msg->primary_local_gid.global.interface_id
1318 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.slid));
1319 req_msg->primary_remote_gid.global.interface_id
1320 = OPA_MAKE_ID(be32_to_cpu(pri_path->opa.dlid));
1322 if (pri_path->hop_limit <= 1) {
1323 req_msg->primary_local_lid = pri_ext ? 0 :
1324 htons(ntohl(sa_path_get_slid(pri_path)));
1325 req_msg->primary_remote_lid = pri_ext ? 0 :
1326 htons(ntohl(sa_path_get_dlid(pri_path)));
1328 /* Work-around until there's a way to obtain remote LID info */
1329 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1330 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1332 cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1333 cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1334 req_msg->primary_traffic_class = pri_path->traffic_class;
1335 req_msg->primary_hop_limit = pri_path->hop_limit;
1336 cm_req_set_primary_sl(req_msg, pri_path->sl);
1337 cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1338 cm_req_set_primary_local_ack_timeout(req_msg,
1339 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1340 pri_path->packet_life_time));
1343 bool alt_ext = false;
1345 if (alt_path->rec_type == SA_PATH_REC_TYPE_OPA)
1346 alt_ext = opa_is_extended_lid(alt_path->opa.dlid,
1347 alt_path->opa.slid);
1349 req_msg->alt_local_gid = alt_path->sgid;
1350 req_msg->alt_remote_gid = alt_path->dgid;
1352 req_msg->alt_local_gid.global.interface_id
1353 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.slid));
1354 req_msg->alt_remote_gid.global.interface_id
1355 = OPA_MAKE_ID(be32_to_cpu(alt_path->opa.dlid));
1357 if (alt_path->hop_limit <= 1) {
1358 req_msg->alt_local_lid = alt_ext ? 0 :
1359 htons(ntohl(sa_path_get_slid(alt_path)));
1360 req_msg->alt_remote_lid = alt_ext ? 0 :
1361 htons(ntohl(sa_path_get_dlid(alt_path)));
1363 req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1364 req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1366 cm_req_set_alt_flow_label(req_msg,
1367 alt_path->flow_label);
1368 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1369 req_msg->alt_traffic_class = alt_path->traffic_class;
1370 req_msg->alt_hop_limit = alt_path->hop_limit;
1371 cm_req_set_alt_sl(req_msg, alt_path->sl);
1372 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1373 cm_req_set_alt_local_ack_timeout(req_msg,
1374 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1375 alt_path->packet_life_time));
1378 if (param->private_data && param->private_data_len)
1379 memcpy(req_msg->private_data, param->private_data,
1380 param->private_data_len);
1383 static int cm_validate_req_param(struct ib_cm_req_param *param)
1385 /* peer-to-peer not supported */
1386 if (param->peer_to_peer)
1389 if (!param->primary_path)
1392 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
1393 param->qp_type != IB_QPT_XRC_INI)
1396 if (param->private_data &&
1397 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1400 if (param->alternate_path &&
1401 (param->alternate_path->pkey != param->primary_path->pkey ||
1402 param->alternate_path->mtu != param->primary_path->mtu))
1408 int ib_send_cm_req(struct ib_cm_id *cm_id,
1409 struct ib_cm_req_param *param)
1411 struct cm_id_private *cm_id_priv;
1412 struct cm_req_msg *req_msg;
1413 unsigned long flags;
1416 ret = cm_validate_req_param(param);
1420 /* Verify that we're not in timewait. */
1421 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1422 spin_lock_irqsave(&cm_id_priv->lock, flags);
1423 if (cm_id->state != IB_CM_IDLE) {
1424 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1428 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1430 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1432 if (IS_ERR(cm_id_priv->timewait_info)) {
1433 ret = PTR_ERR(cm_id_priv->timewait_info);
1437 ret = cm_init_av_by_path(param->primary_path,
1438 param->ppath_sgid_attr, &cm_id_priv->av,
1442 if (param->alternate_path) {
1443 ret = cm_init_av_by_path(param->alternate_path, NULL,
1444 &cm_id_priv->alt_av, cm_id_priv);
1448 cm_id->service_id = param->service_id;
1449 cm_id->service_mask = ~cpu_to_be64(0);
1450 cm_id_priv->timeout_ms = cm_convert_to_ms(
1451 param->primary_path->packet_life_time) * 2 +
1453 param->remote_cm_response_timeout);
1454 cm_id_priv->max_cm_retries = param->max_cm_retries;
1455 cm_id_priv->initiator_depth = param->initiator_depth;
1456 cm_id_priv->responder_resources = param->responder_resources;
1457 cm_id_priv->retry_count = param->retry_count;
1458 cm_id_priv->path_mtu = param->primary_path->mtu;
1459 cm_id_priv->pkey = param->primary_path->pkey;
1460 cm_id_priv->qp_type = param->qp_type;
1462 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1466 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1467 cm_format_req(req_msg, cm_id_priv, param);
1468 cm_id_priv->tid = req_msg->hdr.tid;
1469 cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1470 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1472 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1473 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1475 spin_lock_irqsave(&cm_id_priv->lock, flags);
1476 ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1478 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1481 BUG_ON(cm_id->state != IB_CM_IDLE);
1482 cm_id->state = IB_CM_REQ_SENT;
1483 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1486 error2: cm_free_msg(cm_id_priv->msg);
1487 error1: kfree(cm_id_priv->timewait_info);
1490 EXPORT_SYMBOL(ib_send_cm_req);
1492 static int cm_issue_rej(struct cm_port *port,
1493 struct ib_mad_recv_wc *mad_recv_wc,
1494 enum ib_cm_rej_reason reason,
1495 enum cm_msg_response msg_rejected,
1496 void *ari, u8 ari_length)
1498 struct ib_mad_send_buf *msg = NULL;
1499 struct cm_rej_msg *rej_msg, *rcv_msg;
1502 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1506 /* We just need common CM header information. Cast to any message. */
1507 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1508 rej_msg = (struct cm_rej_msg *) msg->mad;
1510 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1511 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1512 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1513 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1514 rej_msg->reason = cpu_to_be16(reason);
1516 if (ari && ari_length) {
1517 cm_rej_set_reject_info_len(rej_msg, ari_length);
1518 memcpy(rej_msg->ari, ari, ari_length);
1521 ret = ib_post_send_mad(msg, NULL);
1528 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1529 __be32 local_qpn, __be32 remote_qpn)
1531 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1532 ((local_ca_guid == remote_ca_guid) &&
1533 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1536 static bool cm_req_has_alt_path(struct cm_req_msg *req_msg)
1538 return ((req_msg->alt_local_lid) ||
1539 (ib_is_opa_gid(&req_msg->alt_local_gid)));
1542 static void cm_path_set_rec_type(struct ib_device *ib_device, u8 port_num,
1543 struct sa_path_rec *path, union ib_gid *gid)
1545 if (ib_is_opa_gid(gid) && rdma_cap_opa_ah(ib_device, port_num))
1546 path->rec_type = SA_PATH_REC_TYPE_OPA;
1548 path->rec_type = SA_PATH_REC_TYPE_IB;
1551 static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
1552 struct sa_path_rec *primary_path,
1553 struct sa_path_rec *alt_path)
1557 if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1558 sa_path_set_dlid(primary_path,
1559 ntohs(req_msg->primary_local_lid));
1560 sa_path_set_slid(primary_path,
1561 ntohs(req_msg->primary_remote_lid));
1563 lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
1564 sa_path_set_dlid(primary_path, lid);
1566 lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
1567 sa_path_set_slid(primary_path, lid);
1570 if (!cm_req_has_alt_path(req_msg))
1573 if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
1574 sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
1575 sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
1577 lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
1578 sa_path_set_dlid(alt_path, lid);
1580 lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
1581 sa_path_set_slid(alt_path, lid);
1585 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1586 struct sa_path_rec *primary_path,
1587 struct sa_path_rec *alt_path)
1589 primary_path->dgid = req_msg->primary_local_gid;
1590 primary_path->sgid = req_msg->primary_remote_gid;
1591 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1592 primary_path->hop_limit = req_msg->primary_hop_limit;
1593 primary_path->traffic_class = req_msg->primary_traffic_class;
1594 primary_path->reversible = 1;
1595 primary_path->pkey = req_msg->pkey;
1596 primary_path->sl = cm_req_get_primary_sl(req_msg);
1597 primary_path->mtu_selector = IB_SA_EQ;
1598 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1599 primary_path->rate_selector = IB_SA_EQ;
1600 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1601 primary_path->packet_life_time_selector = IB_SA_EQ;
1602 primary_path->packet_life_time =
1603 cm_req_get_primary_local_ack_timeout(req_msg);
1604 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1605 primary_path->service_id = req_msg->service_id;
1606 if (sa_path_is_roce(primary_path))
1607 primary_path->roce.route_resolved = false;
1609 if (cm_req_has_alt_path(req_msg)) {
1610 alt_path->dgid = req_msg->alt_local_gid;
1611 alt_path->sgid = req_msg->alt_remote_gid;
1612 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1613 alt_path->hop_limit = req_msg->alt_hop_limit;
1614 alt_path->traffic_class = req_msg->alt_traffic_class;
1615 alt_path->reversible = 1;
1616 alt_path->pkey = req_msg->pkey;
1617 alt_path->sl = cm_req_get_alt_sl(req_msg);
1618 alt_path->mtu_selector = IB_SA_EQ;
1619 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1620 alt_path->rate_selector = IB_SA_EQ;
1621 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1622 alt_path->packet_life_time_selector = IB_SA_EQ;
1623 alt_path->packet_life_time =
1624 cm_req_get_alt_local_ack_timeout(req_msg);
1625 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1626 alt_path->service_id = req_msg->service_id;
1628 if (sa_path_is_roce(alt_path))
1629 alt_path->roce.route_resolved = false;
1631 cm_format_path_lid_from_req(req_msg, primary_path, alt_path);
1634 static u16 cm_get_bth_pkey(struct cm_work *work)
1636 struct ib_device *ib_dev = work->port->cm_dev->ib_device;
1637 u8 port_num = work->port->port_num;
1638 u16 pkey_index = work->mad_recv_wc->wc->pkey_index;
1642 ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey);
1644 dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n",
1645 port_num, pkey_index, ret);
1653 * Convert OPA SGID to IB SGID
1654 * ULPs (such as IPoIB) do not understand OPA GIDs and will
1655 * reject them as the local_gid will not match the sgid. Therefore,
1656 * change the pathrec's SGID to an IB SGID.
1658 * @work: Work completion
1659 * @path: Path record
1661 static void cm_opa_to_ib_sgid(struct cm_work *work,
1662 struct sa_path_rec *path)
1664 struct ib_device *dev = work->port->cm_dev->ib_device;
1665 u8 port_num = work->port->port_num;
1667 if (rdma_cap_opa_ah(dev, port_num) &&
1668 (ib_is_opa_gid(&path->sgid))) {
1671 if (rdma_query_gid(dev, port_num, 0, &sgid)) {
1673 "Error updating sgid in CM request\n");
1681 static void cm_format_req_event(struct cm_work *work,
1682 struct cm_id_private *cm_id_priv,
1683 struct ib_cm_id *listen_id)
1685 struct cm_req_msg *req_msg;
1686 struct ib_cm_req_event_param *param;
1688 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1689 param = &work->cm_event.param.req_rcvd;
1690 param->listen_id = listen_id;
1691 param->bth_pkey = cm_get_bth_pkey(work);
1692 param->port = cm_id_priv->av.port->port_num;
1693 param->primary_path = &work->path[0];
1694 cm_opa_to_ib_sgid(work, param->primary_path);
1695 if (cm_req_has_alt_path(req_msg)) {
1696 param->alternate_path = &work->path[1];
1697 cm_opa_to_ib_sgid(work, param->alternate_path);
1699 param->alternate_path = NULL;
1701 param->remote_ca_guid = req_msg->local_ca_guid;
1702 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1703 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1704 param->qp_type = cm_req_get_qp_type(req_msg);
1705 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1706 param->responder_resources = cm_req_get_init_depth(req_msg);
1707 param->initiator_depth = cm_req_get_resp_res(req_msg);
1708 param->local_cm_response_timeout =
1709 cm_req_get_remote_resp_timeout(req_msg);
1710 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1711 param->remote_cm_response_timeout =
1712 cm_req_get_local_resp_timeout(req_msg);
1713 param->retry_count = cm_req_get_retry_count(req_msg);
1714 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1715 param->srq = cm_req_get_srq(req_msg);
1716 param->ppath_sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
1717 work->cm_event.private_data = &req_msg->private_data;
1720 static void cm_process_work(struct cm_id_private *cm_id_priv,
1721 struct cm_work *work)
1725 /* We will typically only have the current event to report. */
1726 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1729 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1730 spin_lock_irq(&cm_id_priv->lock);
1731 work = cm_dequeue_work(cm_id_priv);
1732 spin_unlock_irq(&cm_id_priv->lock);
1736 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1740 cm_deref_id(cm_id_priv);
1742 cm_destroy_id(&cm_id_priv->id, ret);
1745 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1746 struct cm_id_private *cm_id_priv,
1747 enum cm_msg_response msg_mraed, u8 service_timeout,
1748 const void *private_data, u8 private_data_len)
1750 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1751 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1752 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1753 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1754 cm_mra_set_service_timeout(mra_msg, service_timeout);
1756 if (private_data && private_data_len)
1757 memcpy(mra_msg->private_data, private_data, private_data_len);
1760 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1761 struct cm_id_private *cm_id_priv,
1762 enum ib_cm_rej_reason reason,
1765 const void *private_data,
1766 u8 private_data_len)
1768 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1769 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1771 switch(cm_id_priv->id.state) {
1772 case IB_CM_REQ_RCVD:
1773 rej_msg->local_comm_id = 0;
1774 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1776 case IB_CM_MRA_REQ_SENT:
1777 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1778 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1780 case IB_CM_REP_RCVD:
1781 case IB_CM_MRA_REP_SENT:
1782 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1783 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1786 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1787 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1791 rej_msg->reason = cpu_to_be16(reason);
1792 if (ari && ari_length) {
1793 cm_rej_set_reject_info_len(rej_msg, ari_length);
1794 memcpy(rej_msg->ari, ari, ari_length);
1797 if (private_data && private_data_len)
1798 memcpy(rej_msg->private_data, private_data, private_data_len);
1801 static void cm_dup_req_handler(struct cm_work *work,
1802 struct cm_id_private *cm_id_priv)
1804 struct ib_mad_send_buf *msg = NULL;
1807 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1808 counter[CM_REQ_COUNTER]);
1810 /* Quick state check to discard duplicate REQs. */
1811 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1814 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1818 spin_lock_irq(&cm_id_priv->lock);
1819 switch (cm_id_priv->id.state) {
1820 case IB_CM_MRA_REQ_SENT:
1821 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1822 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1823 cm_id_priv->private_data,
1824 cm_id_priv->private_data_len);
1826 case IB_CM_TIMEWAIT:
1827 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1828 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1833 spin_unlock_irq(&cm_id_priv->lock);
1835 ret = ib_post_send_mad(msg, NULL);
1840 unlock: spin_unlock_irq(&cm_id_priv->lock);
1841 free: cm_free_msg(msg);
1844 static struct cm_id_private * cm_match_req(struct cm_work *work,
1845 struct cm_id_private *cm_id_priv)
1847 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1848 struct cm_timewait_info *timewait_info;
1849 struct cm_req_msg *req_msg;
1850 struct ib_cm_id *cm_id;
1852 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1854 /* Check for possible duplicate REQ. */
1855 spin_lock_irq(&cm.lock);
1856 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1857 if (timewait_info) {
1858 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1859 timewait_info->work.remote_id);
1860 spin_unlock_irq(&cm.lock);
1861 if (cur_cm_id_priv) {
1862 cm_dup_req_handler(work, cur_cm_id_priv);
1863 cm_deref_id(cur_cm_id_priv);
1868 /* Check for stale connections. */
1869 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1870 if (timewait_info) {
1871 cm_cleanup_timewait(cm_id_priv->timewait_info);
1872 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1873 timewait_info->work.remote_id);
1875 spin_unlock_irq(&cm.lock);
1876 cm_issue_rej(work->port, work->mad_recv_wc,
1877 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1879 if (cur_cm_id_priv) {
1880 cm_id = &cur_cm_id_priv->id;
1881 ib_send_cm_dreq(cm_id, NULL, 0);
1882 cm_deref_id(cur_cm_id_priv);
1887 /* Find matching listen request. */
1888 listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1889 req_msg->service_id);
1890 if (!listen_cm_id_priv) {
1891 cm_cleanup_timewait(cm_id_priv->timewait_info);
1892 spin_unlock_irq(&cm.lock);
1893 cm_issue_rej(work->port, work->mad_recv_wc,
1894 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1898 atomic_inc(&listen_cm_id_priv->refcount);
1899 atomic_inc(&cm_id_priv->refcount);
1900 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1901 atomic_inc(&cm_id_priv->work_count);
1902 spin_unlock_irq(&cm.lock);
1904 return listen_cm_id_priv;
1908 * Work-around for inter-subnet connections. If the LIDs are permissive,
1909 * we need to override the LID/SL data in the REQ with the LID information
1910 * in the work completion.
1912 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1914 if (!cm_req_get_primary_subnet_local(req_msg)) {
1915 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1916 req_msg->primary_local_lid = ib_lid_be16(wc->slid);
1917 cm_req_set_primary_sl(req_msg, wc->sl);
1920 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1921 req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1924 if (!cm_req_get_alt_subnet_local(req_msg)) {
1925 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1926 req_msg->alt_local_lid = ib_lid_be16(wc->slid);
1927 cm_req_set_alt_sl(req_msg, wc->sl);
1930 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1931 req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1935 static int cm_req_handler(struct cm_work *work)
1937 struct ib_cm_id *cm_id;
1938 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1939 struct cm_req_msg *req_msg;
1940 const struct ib_global_route *grh;
1941 const struct ib_gid_attr *gid_attr;
1944 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1946 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1948 return PTR_ERR(cm_id);
1950 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1951 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1952 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1953 work->mad_recv_wc->recv_buf.grh,
1957 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1959 if (IS_ERR(cm_id_priv->timewait_info)) {
1960 ret = PTR_ERR(cm_id_priv->timewait_info);
1963 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1964 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1965 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1967 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1968 if (!listen_cm_id_priv) {
1969 pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
1970 be32_to_cpu(cm_id->local_id));
1975 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1976 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1977 cm_id_priv->id.service_id = req_msg->service_id;
1978 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1980 cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1982 memset(&work->path[0], 0, sizeof(work->path[0]));
1983 if (cm_req_has_alt_path(req_msg))
1984 memset(&work->path[1], 0, sizeof(work->path[1]));
1985 grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
1986 gid_attr = grh->sgid_attr;
1989 rdma_protocol_roce(work->port->cm_dev->ib_device,
1990 work->port->port_num)) {
1991 work->path[0].rec_type =
1992 sa_conv_gid_to_pathrec_type(gid_attr->gid_type);
1994 cm_path_set_rec_type(work->port->cm_dev->ib_device,
1995 work->port->port_num,
1997 &req_msg->primary_local_gid);
1999 if (cm_req_has_alt_path(req_msg))
2000 work->path[1].rec_type = work->path[0].rec_type;
2001 cm_format_paths_from_req(req_msg, &work->path[0],
2003 if (cm_id_priv->av.ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
2004 sa_path_set_dmac(&work->path[0],
2005 cm_id_priv->av.ah_attr.roce.dmac);
2006 work->path[0].hop_limit = grh->hop_limit;
2007 ret = cm_init_av_by_path(&work->path[0], gid_attr, &cm_id_priv->av,
2012 err = rdma_query_gid(work->port->cm_dev->ib_device,
2013 work->port->port_num, 0,
2014 &work->path[0].sgid);
2016 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
2019 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
2020 &work->path[0].sgid,
2021 sizeof(work->path[0].sgid),
2025 if (cm_req_has_alt_path(req_msg)) {
2026 ret = cm_init_av_by_path(&work->path[1], NULL,
2027 &cm_id_priv->alt_av, cm_id_priv);
2029 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
2030 &work->path[0].sgid,
2031 sizeof(work->path[0].sgid), NULL, 0);
2035 cm_id_priv->tid = req_msg->hdr.tid;
2036 cm_id_priv->timeout_ms = cm_convert_to_ms(
2037 cm_req_get_local_resp_timeout(req_msg));
2038 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
2039 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
2040 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
2041 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
2042 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
2043 cm_id_priv->pkey = req_msg->pkey;
2044 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
2045 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
2046 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
2047 cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
2049 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
2050 cm_process_work(cm_id_priv, work);
2051 cm_deref_id(listen_cm_id_priv);
2055 atomic_dec(&cm_id_priv->refcount);
2056 cm_deref_id(listen_cm_id_priv);
2058 kfree(cm_id_priv->timewait_info);
2060 ib_destroy_cm_id(cm_id);
2064 static void cm_format_rep(struct cm_rep_msg *rep_msg,
2065 struct cm_id_private *cm_id_priv,
2066 struct ib_cm_rep_param *param)
2068 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
2069 rep_msg->local_comm_id = cm_id_priv->id.local_id;
2070 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2071 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
2072 rep_msg->resp_resources = param->responder_resources;
2073 cm_rep_set_target_ack_delay(rep_msg,
2074 cm_id_priv->av.port->cm_dev->ack_delay);
2075 cm_rep_set_failover(rep_msg, param->failover_accepted);
2076 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
2077 rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
2079 if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
2080 rep_msg->initiator_depth = param->initiator_depth;
2081 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
2082 cm_rep_set_srq(rep_msg, param->srq);
2083 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
2085 cm_rep_set_srq(rep_msg, 1);
2086 cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
2089 if (param->private_data && param->private_data_len)
2090 memcpy(rep_msg->private_data, param->private_data,
2091 param->private_data_len);
2094 int ib_send_cm_rep(struct ib_cm_id *cm_id,
2095 struct ib_cm_rep_param *param)
2097 struct cm_id_private *cm_id_priv;
2098 struct ib_mad_send_buf *msg;
2099 struct cm_rep_msg *rep_msg;
2100 unsigned long flags;
2103 if (param->private_data &&
2104 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
2107 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2108 spin_lock_irqsave(&cm_id_priv->lock, flags);
2109 if (cm_id->state != IB_CM_REQ_RCVD &&
2110 cm_id->state != IB_CM_MRA_REQ_SENT) {
2111 pr_debug("%s: local_comm_id %d, cm_id->state: %d\n", __func__,
2112 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2117 ret = cm_alloc_msg(cm_id_priv, &msg);
2121 rep_msg = (struct cm_rep_msg *) msg->mad;
2122 cm_format_rep(rep_msg, cm_id_priv, param);
2123 msg->timeout_ms = cm_id_priv->timeout_ms;
2124 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
2126 ret = ib_post_send_mad(msg, NULL);
2128 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2133 cm_id->state = IB_CM_REP_SENT;
2134 cm_id_priv->msg = msg;
2135 cm_id_priv->initiator_depth = param->initiator_depth;
2136 cm_id_priv->responder_resources = param->responder_resources;
2137 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
2138 cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
2140 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2143 EXPORT_SYMBOL(ib_send_cm_rep);
2145 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
2146 struct cm_id_private *cm_id_priv,
2147 const void *private_data,
2148 u8 private_data_len)
2150 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
2151 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
2152 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
2154 if (private_data && private_data_len)
2155 memcpy(rtu_msg->private_data, private_data, private_data_len);
2158 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
2159 const void *private_data,
2160 u8 private_data_len)
2162 struct cm_id_private *cm_id_priv;
2163 struct ib_mad_send_buf *msg;
2164 unsigned long flags;
2168 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
2171 data = cm_copy_private_data(private_data, private_data_len);
2173 return PTR_ERR(data);
2175 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2176 spin_lock_irqsave(&cm_id_priv->lock, flags);
2177 if (cm_id->state != IB_CM_REP_RCVD &&
2178 cm_id->state != IB_CM_MRA_REP_SENT) {
2179 pr_debug("%s: local_id %d, cm_id->state %d\n", __func__,
2180 be32_to_cpu(cm_id->local_id), cm_id->state);
2185 ret = cm_alloc_msg(cm_id_priv, &msg);
2189 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2190 private_data, private_data_len);
2192 ret = ib_post_send_mad(msg, NULL);
2194 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2200 cm_id->state = IB_CM_ESTABLISHED;
2201 cm_set_private_data(cm_id_priv, data, private_data_len);
2202 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2205 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2209 EXPORT_SYMBOL(ib_send_cm_rtu);
2211 static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
2213 struct cm_rep_msg *rep_msg;
2214 struct ib_cm_rep_event_param *param;
2216 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2217 param = &work->cm_event.param.rep_rcvd;
2218 param->remote_ca_guid = rep_msg->local_ca_guid;
2219 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
2220 param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
2221 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
2222 param->responder_resources = rep_msg->initiator_depth;
2223 param->initiator_depth = rep_msg->resp_resources;
2224 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2225 param->failover_accepted = cm_rep_get_failover(rep_msg);
2226 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
2227 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2228 param->srq = cm_rep_get_srq(rep_msg);
2229 work->cm_event.private_data = &rep_msg->private_data;
2232 static void cm_dup_rep_handler(struct cm_work *work)
2234 struct cm_id_private *cm_id_priv;
2235 struct cm_rep_msg *rep_msg;
2236 struct ib_mad_send_buf *msg = NULL;
2239 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
2240 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
2241 rep_msg->local_comm_id);
2245 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2246 counter[CM_REP_COUNTER]);
2247 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
2251 spin_lock_irq(&cm_id_priv->lock);
2252 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
2253 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
2254 cm_id_priv->private_data,
2255 cm_id_priv->private_data_len);
2256 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
2257 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2258 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
2259 cm_id_priv->private_data,
2260 cm_id_priv->private_data_len);
2263 spin_unlock_irq(&cm_id_priv->lock);
2265 ret = ib_post_send_mad(msg, NULL);
2270 unlock: spin_unlock_irq(&cm_id_priv->lock);
2271 free: cm_free_msg(msg);
2272 deref: cm_deref_id(cm_id_priv);
2275 static int cm_rep_handler(struct cm_work *work)
2277 struct cm_id_private *cm_id_priv;
2278 struct cm_rep_msg *rep_msg;
2280 struct cm_id_private *cur_cm_id_priv;
2281 struct ib_cm_id *cm_id;
2282 struct cm_timewait_info *timewait_info;
2284 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
2285 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
2287 cm_dup_rep_handler(work);
2288 pr_debug("%s: remote_comm_id %d, no cm_id_priv\n", __func__,
2289 be32_to_cpu(rep_msg->remote_comm_id));
2293 cm_format_rep_event(work, cm_id_priv->qp_type);
2295 spin_lock_irq(&cm_id_priv->lock);
2296 switch (cm_id_priv->id.state) {
2297 case IB_CM_REQ_SENT:
2298 case IB_CM_MRA_REQ_RCVD:
2301 spin_unlock_irq(&cm_id_priv->lock);
2303 pr_debug("%s: cm_id_priv->id.state: %d, local_comm_id %d, remote_comm_id %d\n",
2304 __func__, cm_id_priv->id.state,
2305 be32_to_cpu(rep_msg->local_comm_id),
2306 be32_to_cpu(rep_msg->remote_comm_id));
2310 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
2311 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
2312 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2314 spin_lock(&cm.lock);
2315 /* Check for duplicate REP. */
2316 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
2317 spin_unlock(&cm.lock);
2318 spin_unlock_irq(&cm_id_priv->lock);
2320 pr_debug("%s: Failed to insert remote id %d\n", __func__,
2321 be32_to_cpu(rep_msg->remote_comm_id));
2324 /* Check for a stale connection. */
2325 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
2326 if (timewait_info) {
2327 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
2328 &cm.remote_id_table);
2329 cm_id_priv->timewait_info->inserted_remote_id = 0;
2330 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
2331 timewait_info->work.remote_id);
2333 spin_unlock(&cm.lock);
2334 spin_unlock_irq(&cm_id_priv->lock);
2335 cm_issue_rej(work->port, work->mad_recv_wc,
2336 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
2339 pr_debug("%s: Stale connection. local_comm_id %d, remote_comm_id %d\n",
2340 __func__, be32_to_cpu(rep_msg->local_comm_id),
2341 be32_to_cpu(rep_msg->remote_comm_id));
2343 if (cur_cm_id_priv) {
2344 cm_id = &cur_cm_id_priv->id;
2345 ib_send_cm_dreq(cm_id, NULL, 0);
2346 cm_deref_id(cur_cm_id_priv);
2351 spin_unlock(&cm.lock);
2353 cm_id_priv->id.state = IB_CM_REP_RCVD;
2354 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
2355 cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
2356 cm_id_priv->initiator_depth = rep_msg->resp_resources;
2357 cm_id_priv->responder_resources = rep_msg->initiator_depth;
2358 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
2359 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
2360 cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
2361 cm_id_priv->av.timeout =
2362 cm_ack_timeout(cm_id_priv->target_ack_delay,
2363 cm_id_priv->av.timeout - 1);
2364 cm_id_priv->alt_av.timeout =
2365 cm_ack_timeout(cm_id_priv->target_ack_delay,
2366 cm_id_priv->alt_av.timeout - 1);
2368 /* todo: handle peer_to_peer */
2370 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2371 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2373 list_add_tail(&work->list, &cm_id_priv->work_list);
2374 spin_unlock_irq(&cm_id_priv->lock);
2377 cm_process_work(cm_id_priv, work);
2379 cm_deref_id(cm_id_priv);
2383 cm_deref_id(cm_id_priv);
2387 static int cm_establish_handler(struct cm_work *work)
2389 struct cm_id_private *cm_id_priv;
2392 /* See comment in cm_establish about lookup. */
2393 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
2397 spin_lock_irq(&cm_id_priv->lock);
2398 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
2399 spin_unlock_irq(&cm_id_priv->lock);
2403 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2404 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2406 list_add_tail(&work->list, &cm_id_priv->work_list);
2407 spin_unlock_irq(&cm_id_priv->lock);
2410 cm_process_work(cm_id_priv, work);
2412 cm_deref_id(cm_id_priv);
2415 cm_deref_id(cm_id_priv);
2419 static int cm_rtu_handler(struct cm_work *work)
2421 struct cm_id_private *cm_id_priv;
2422 struct cm_rtu_msg *rtu_msg;
2425 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
2426 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
2427 rtu_msg->local_comm_id);
2431 work->cm_event.private_data = &rtu_msg->private_data;
2433 spin_lock_irq(&cm_id_priv->lock);
2434 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
2435 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
2436 spin_unlock_irq(&cm_id_priv->lock);
2437 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2438 counter[CM_RTU_COUNTER]);
2441 cm_id_priv->id.state = IB_CM_ESTABLISHED;
2443 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2444 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2446 list_add_tail(&work->list, &cm_id_priv->work_list);
2447 spin_unlock_irq(&cm_id_priv->lock);
2450 cm_process_work(cm_id_priv, work);
2452 cm_deref_id(cm_id_priv);
2455 cm_deref_id(cm_id_priv);
2459 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
2460 struct cm_id_private *cm_id_priv,
2461 const void *private_data,
2462 u8 private_data_len)
2464 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
2465 cm_form_tid(cm_id_priv));
2466 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
2467 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
2468 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
2470 if (private_data && private_data_len)
2471 memcpy(dreq_msg->private_data, private_data, private_data_len);
2474 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
2475 const void *private_data,
2476 u8 private_data_len)
2478 struct cm_id_private *cm_id_priv;
2479 struct ib_mad_send_buf *msg;
2480 unsigned long flags;
2483 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
2486 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2487 spin_lock_irqsave(&cm_id_priv->lock, flags);
2488 if (cm_id->state != IB_CM_ESTABLISHED) {
2489 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2490 be32_to_cpu(cm_id->local_id), cm_id->state);
2495 if (cm_id->lap_state == IB_CM_LAP_SENT ||
2496 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
2497 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2499 ret = cm_alloc_msg(cm_id_priv, &msg);
2501 cm_enter_timewait(cm_id_priv);
2505 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2506 private_data, private_data_len);
2507 msg->timeout_ms = cm_id_priv->timeout_ms;
2508 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2510 ret = ib_post_send_mad(msg, NULL);
2512 cm_enter_timewait(cm_id_priv);
2513 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2518 cm_id->state = IB_CM_DREQ_SENT;
2519 cm_id_priv->msg = msg;
2520 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2523 EXPORT_SYMBOL(ib_send_cm_dreq);
2525 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2526 struct cm_id_private *cm_id_priv,
2527 const void *private_data,
2528 u8 private_data_len)
2530 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2531 drep_msg->local_comm_id = cm_id_priv->id.local_id;
2532 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2534 if (private_data && private_data_len)
2535 memcpy(drep_msg->private_data, private_data, private_data_len);
2538 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2539 const void *private_data,
2540 u8 private_data_len)
2542 struct cm_id_private *cm_id_priv;
2543 struct ib_mad_send_buf *msg;
2544 unsigned long flags;
2548 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2551 data = cm_copy_private_data(private_data, private_data_len);
2553 return PTR_ERR(data);
2555 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2556 spin_lock_irqsave(&cm_id_priv->lock, flags);
2557 if (cm_id->state != IB_CM_DREQ_RCVD) {
2558 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2560 pr_debug("%s: local_id %d, cm_idcm_id->state(%d) != IB_CM_DREQ_RCVD\n",
2561 __func__, be32_to_cpu(cm_id->local_id), cm_id->state);
2565 cm_set_private_data(cm_id_priv, data, private_data_len);
2566 cm_enter_timewait(cm_id_priv);
2568 ret = cm_alloc_msg(cm_id_priv, &msg);
2572 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2573 private_data, private_data_len);
2575 ret = ib_post_send_mad(msg, NULL);
2577 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2582 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2585 EXPORT_SYMBOL(ib_send_cm_drep);
2587 static int cm_issue_drep(struct cm_port *port,
2588 struct ib_mad_recv_wc *mad_recv_wc)
2590 struct ib_mad_send_buf *msg = NULL;
2591 struct cm_dreq_msg *dreq_msg;
2592 struct cm_drep_msg *drep_msg;
2595 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2599 dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2600 drep_msg = (struct cm_drep_msg *) msg->mad;
2602 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2603 drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2604 drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2606 ret = ib_post_send_mad(msg, NULL);
2613 static int cm_dreq_handler(struct cm_work *work)
2615 struct cm_id_private *cm_id_priv;
2616 struct cm_dreq_msg *dreq_msg;
2617 struct ib_mad_send_buf *msg = NULL;
2620 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2621 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2622 dreq_msg->local_comm_id);
2624 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2625 counter[CM_DREQ_COUNTER]);
2626 cm_issue_drep(work->port, work->mad_recv_wc);
2627 pr_debug("%s: no cm_id_priv, local_comm_id %d, remote_comm_id %d\n",
2628 __func__, be32_to_cpu(dreq_msg->local_comm_id),
2629 be32_to_cpu(dreq_msg->remote_comm_id));
2633 work->cm_event.private_data = &dreq_msg->private_data;
2635 spin_lock_irq(&cm_id_priv->lock);
2636 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2639 switch (cm_id_priv->id.state) {
2640 case IB_CM_REP_SENT:
2641 case IB_CM_DREQ_SENT:
2642 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2644 case IB_CM_ESTABLISHED:
2645 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2646 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2647 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2649 case IB_CM_MRA_REP_RCVD:
2651 case IB_CM_TIMEWAIT:
2652 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2653 counter[CM_DREQ_COUNTER]);
2654 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
2658 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2659 cm_id_priv->private_data,
2660 cm_id_priv->private_data_len);
2661 spin_unlock_irq(&cm_id_priv->lock);
2663 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
2664 ib_post_send_mad(msg, NULL))
2667 case IB_CM_DREQ_RCVD:
2668 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2669 counter[CM_DREQ_COUNTER]);
2672 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2673 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2674 cm_id_priv->id.state);
2677 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2678 cm_id_priv->tid = dreq_msg->hdr.tid;
2679 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2681 list_add_tail(&work->list, &cm_id_priv->work_list);
2682 spin_unlock_irq(&cm_id_priv->lock);
2685 cm_process_work(cm_id_priv, work);
2687 cm_deref_id(cm_id_priv);
2690 unlock: spin_unlock_irq(&cm_id_priv->lock);
2691 deref: cm_deref_id(cm_id_priv);
2695 static int cm_drep_handler(struct cm_work *work)
2697 struct cm_id_private *cm_id_priv;
2698 struct cm_drep_msg *drep_msg;
2701 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2702 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2703 drep_msg->local_comm_id);
2707 work->cm_event.private_data = &drep_msg->private_data;
2709 spin_lock_irq(&cm_id_priv->lock);
2710 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2711 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2712 spin_unlock_irq(&cm_id_priv->lock);
2715 cm_enter_timewait(cm_id_priv);
2717 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2718 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2720 list_add_tail(&work->list, &cm_id_priv->work_list);
2721 spin_unlock_irq(&cm_id_priv->lock);
2724 cm_process_work(cm_id_priv, work);
2726 cm_deref_id(cm_id_priv);
2729 cm_deref_id(cm_id_priv);
2733 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2734 enum ib_cm_rej_reason reason,
2737 const void *private_data,
2738 u8 private_data_len)
2740 struct cm_id_private *cm_id_priv;
2741 struct ib_mad_send_buf *msg;
2742 unsigned long flags;
2745 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2746 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2749 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2751 spin_lock_irqsave(&cm_id_priv->lock, flags);
2752 switch (cm_id->state) {
2753 case IB_CM_REQ_SENT:
2754 case IB_CM_MRA_REQ_RCVD:
2755 case IB_CM_REQ_RCVD:
2756 case IB_CM_MRA_REQ_SENT:
2757 case IB_CM_REP_RCVD:
2758 case IB_CM_MRA_REP_SENT:
2759 ret = cm_alloc_msg(cm_id_priv, &msg);
2761 cm_format_rej((struct cm_rej_msg *) msg->mad,
2762 cm_id_priv, reason, ari, ari_length,
2763 private_data, private_data_len);
2765 cm_reset_to_idle(cm_id_priv);
2767 case IB_CM_REP_SENT:
2768 case IB_CM_MRA_REP_RCVD:
2769 ret = cm_alloc_msg(cm_id_priv, &msg);
2771 cm_format_rej((struct cm_rej_msg *) msg->mad,
2772 cm_id_priv, reason, ari, ari_length,
2773 private_data, private_data_len);
2775 cm_enter_timewait(cm_id_priv);
2778 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
2779 be32_to_cpu(cm_id_priv->id.local_id), cm_id->state);
2787 ret = ib_post_send_mad(msg, NULL);
2791 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2794 EXPORT_SYMBOL(ib_send_cm_rej);
2796 static void cm_format_rej_event(struct cm_work *work)
2798 struct cm_rej_msg *rej_msg;
2799 struct ib_cm_rej_event_param *param;
2801 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2802 param = &work->cm_event.param.rej_rcvd;
2803 param->ari = rej_msg->ari;
2804 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2805 param->reason = __be16_to_cpu(rej_msg->reason);
2806 work->cm_event.private_data = &rej_msg->private_data;
2809 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2811 struct cm_timewait_info *timewait_info;
2812 struct cm_id_private *cm_id_priv;
2815 remote_id = rej_msg->local_comm_id;
2817 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2818 spin_lock_irq(&cm.lock);
2819 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2821 if (!timewait_info) {
2822 spin_unlock_irq(&cm.lock);
2825 cm_id_priv = xa_load(&cm.local_id_table,
2826 cm_local_id(timewait_info->work.local_id));
2828 if (cm_id_priv->id.remote_id == remote_id)
2829 atomic_inc(&cm_id_priv->refcount);
2833 spin_unlock_irq(&cm.lock);
2834 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2835 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2837 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2842 static int cm_rej_handler(struct cm_work *work)
2844 struct cm_id_private *cm_id_priv;
2845 struct cm_rej_msg *rej_msg;
2848 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2849 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2853 cm_format_rej_event(work);
2855 spin_lock_irq(&cm_id_priv->lock);
2856 switch (cm_id_priv->id.state) {
2857 case IB_CM_REQ_SENT:
2858 case IB_CM_MRA_REQ_RCVD:
2859 case IB_CM_REP_SENT:
2860 case IB_CM_MRA_REP_RCVD:
2861 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2863 case IB_CM_REQ_RCVD:
2864 case IB_CM_MRA_REQ_SENT:
2865 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2866 cm_enter_timewait(cm_id_priv);
2868 cm_reset_to_idle(cm_id_priv);
2870 case IB_CM_DREQ_SENT:
2871 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2873 case IB_CM_REP_RCVD:
2874 case IB_CM_MRA_REP_SENT:
2875 cm_enter_timewait(cm_id_priv);
2877 case IB_CM_ESTABLISHED:
2878 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2879 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2880 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2881 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2883 cm_enter_timewait(cm_id_priv);
2888 spin_unlock_irq(&cm_id_priv->lock);
2889 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2890 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2891 cm_id_priv->id.state);
2896 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2898 list_add_tail(&work->list, &cm_id_priv->work_list);
2899 spin_unlock_irq(&cm_id_priv->lock);
2902 cm_process_work(cm_id_priv, work);
2904 cm_deref_id(cm_id_priv);
2907 cm_deref_id(cm_id_priv);
2911 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2913 const void *private_data,
2914 u8 private_data_len)
2916 struct cm_id_private *cm_id_priv;
2917 struct ib_mad_send_buf *msg;
2918 enum ib_cm_state cm_state;
2919 enum ib_cm_lap_state lap_state;
2920 enum cm_msg_response msg_response;
2922 unsigned long flags;
2925 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2928 data = cm_copy_private_data(private_data, private_data_len);
2930 return PTR_ERR(data);
2932 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2934 spin_lock_irqsave(&cm_id_priv->lock, flags);
2935 switch(cm_id_priv->id.state) {
2936 case IB_CM_REQ_RCVD:
2937 cm_state = IB_CM_MRA_REQ_SENT;
2938 lap_state = cm_id->lap_state;
2939 msg_response = CM_MSG_RESPONSE_REQ;
2941 case IB_CM_REP_RCVD:
2942 cm_state = IB_CM_MRA_REP_SENT;
2943 lap_state = cm_id->lap_state;
2944 msg_response = CM_MSG_RESPONSE_REP;
2946 case IB_CM_ESTABLISHED:
2947 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2948 cm_state = cm_id->state;
2949 lap_state = IB_CM_MRA_LAP_SENT;
2950 msg_response = CM_MSG_RESPONSE_OTHER;
2955 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
2956 __func__, be32_to_cpu(cm_id_priv->id.local_id),
2957 cm_id_priv->id.state);
2962 if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2963 ret = cm_alloc_msg(cm_id_priv, &msg);
2967 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2968 msg_response, service_timeout,
2969 private_data, private_data_len);
2970 ret = ib_post_send_mad(msg, NULL);
2975 cm_id->state = cm_state;
2976 cm_id->lap_state = lap_state;
2977 cm_id_priv->service_timeout = service_timeout;
2978 cm_set_private_data(cm_id_priv, data, private_data_len);
2979 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2982 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2986 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2991 EXPORT_SYMBOL(ib_send_cm_mra);
2993 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2995 switch (cm_mra_get_msg_mraed(mra_msg)) {
2996 case CM_MSG_RESPONSE_REQ:
2997 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2998 case CM_MSG_RESPONSE_REP:
2999 case CM_MSG_RESPONSE_OTHER:
3000 return cm_acquire_id(mra_msg->remote_comm_id,
3001 mra_msg->local_comm_id);
3007 static int cm_mra_handler(struct cm_work *work)
3009 struct cm_id_private *cm_id_priv;
3010 struct cm_mra_msg *mra_msg;
3013 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
3014 cm_id_priv = cm_acquire_mraed_id(mra_msg);
3018 work->cm_event.private_data = &mra_msg->private_data;
3019 work->cm_event.param.mra_rcvd.service_timeout =
3020 cm_mra_get_service_timeout(mra_msg);
3021 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
3022 cm_convert_to_ms(cm_id_priv->av.timeout);
3024 spin_lock_irq(&cm_id_priv->lock);
3025 switch (cm_id_priv->id.state) {
3026 case IB_CM_REQ_SENT:
3027 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
3028 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3029 cm_id_priv->msg, timeout))
3031 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
3033 case IB_CM_REP_SENT:
3034 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
3035 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3036 cm_id_priv->msg, timeout))
3038 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
3040 case IB_CM_ESTABLISHED:
3041 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
3042 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
3043 ib_modify_mad(cm_id_priv->av.port->mad_agent,
3044 cm_id_priv->msg, timeout)) {
3045 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
3046 atomic_long_inc(&work->port->
3047 counter_group[CM_RECV_DUPLICATES].
3048 counter[CM_MRA_COUNTER]);
3051 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
3053 case IB_CM_MRA_REQ_RCVD:
3054 case IB_CM_MRA_REP_RCVD:
3055 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3056 counter[CM_MRA_COUNTER]);
3059 pr_debug("%s local_id %d, cm_id_priv->id.state: %d\n",
3060 __func__, be32_to_cpu(cm_id_priv->id.local_id),
3061 cm_id_priv->id.state);
3065 cm_id_priv->msg->context[1] = (void *) (unsigned long)
3066 cm_id_priv->id.state;
3067 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3069 list_add_tail(&work->list, &cm_id_priv->work_list);
3070 spin_unlock_irq(&cm_id_priv->lock);
3073 cm_process_work(cm_id_priv, work);
3075 cm_deref_id(cm_id_priv);
3078 spin_unlock_irq(&cm_id_priv->lock);
3079 cm_deref_id(cm_id_priv);
3083 static void cm_format_lap(struct cm_lap_msg *lap_msg,
3084 struct cm_id_private *cm_id_priv,
3085 struct sa_path_rec *alternate_path,
3086 const void *private_data,
3087 u8 private_data_len)
3089 bool alt_ext = false;
3091 if (alternate_path->rec_type == SA_PATH_REC_TYPE_OPA)
3092 alt_ext = opa_is_extended_lid(alternate_path->opa.dlid,
3093 alternate_path->opa.slid);
3094 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
3095 cm_form_tid(cm_id_priv));
3096 lap_msg->local_comm_id = cm_id_priv->id.local_id;
3097 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
3098 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
3099 /* todo: need remote CM response timeout */
3100 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
3101 lap_msg->alt_local_lid =
3102 htons(ntohl(sa_path_get_slid(alternate_path)));
3103 lap_msg->alt_remote_lid =
3104 htons(ntohl(sa_path_get_dlid(alternate_path)));
3105 lap_msg->alt_local_gid = alternate_path->sgid;
3106 lap_msg->alt_remote_gid = alternate_path->dgid;
3108 lap_msg->alt_local_gid.global.interface_id
3109 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.slid));
3110 lap_msg->alt_remote_gid.global.interface_id
3111 = OPA_MAKE_ID(be32_to_cpu(alternate_path->opa.dlid));
3113 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
3114 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
3115 lap_msg->alt_hop_limit = alternate_path->hop_limit;
3116 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
3117 cm_lap_set_sl(lap_msg, alternate_path->sl);
3118 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
3119 cm_lap_set_local_ack_timeout(lap_msg,
3120 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
3121 alternate_path->packet_life_time));
3123 if (private_data && private_data_len)
3124 memcpy(lap_msg->private_data, private_data, private_data_len);
3127 int ib_send_cm_lap(struct ib_cm_id *cm_id,
3128 struct sa_path_rec *alternate_path,
3129 const void *private_data,
3130 u8 private_data_len)
3132 struct cm_id_private *cm_id_priv;
3133 struct ib_mad_send_buf *msg;
3134 unsigned long flags;
3137 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
3140 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3141 spin_lock_irqsave(&cm_id_priv->lock, flags);
3142 if (cm_id->state != IB_CM_ESTABLISHED ||
3143 (cm_id->lap_state != IB_CM_LAP_UNINIT &&
3144 cm_id->lap_state != IB_CM_LAP_IDLE)) {
3149 ret = cm_init_av_by_path(alternate_path, NULL, &cm_id_priv->alt_av,
3153 cm_id_priv->alt_av.timeout =
3154 cm_ack_timeout(cm_id_priv->target_ack_delay,
3155 cm_id_priv->alt_av.timeout - 1);
3157 ret = cm_alloc_msg(cm_id_priv, &msg);
3161 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
3162 alternate_path, private_data, private_data_len);
3163 msg->timeout_ms = cm_id_priv->timeout_ms;
3164 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
3166 ret = ib_post_send_mad(msg, NULL);
3168 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3173 cm_id->lap_state = IB_CM_LAP_SENT;
3174 cm_id_priv->msg = msg;
3176 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3179 EXPORT_SYMBOL(ib_send_cm_lap);
3181 static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
3182 struct sa_path_rec *path)
3186 if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
3187 sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
3188 sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
3190 lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
3191 sa_path_set_dlid(path, lid);
3193 lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
3194 sa_path_set_slid(path, lid);
3198 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
3199 struct sa_path_rec *path,
3200 struct cm_lap_msg *lap_msg)
3202 path->dgid = lap_msg->alt_local_gid;
3203 path->sgid = lap_msg->alt_remote_gid;
3204 path->flow_label = cm_lap_get_flow_label(lap_msg);
3205 path->hop_limit = lap_msg->alt_hop_limit;
3206 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
3207 path->reversible = 1;
3208 path->pkey = cm_id_priv->pkey;
3209 path->sl = cm_lap_get_sl(lap_msg);
3210 path->mtu_selector = IB_SA_EQ;
3211 path->mtu = cm_id_priv->path_mtu;
3212 path->rate_selector = IB_SA_EQ;
3213 path->rate = cm_lap_get_packet_rate(lap_msg);
3214 path->packet_life_time_selector = IB_SA_EQ;
3215 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
3216 path->packet_life_time -= (path->packet_life_time > 0);
3217 cm_format_path_lid_from_lap(lap_msg, path);
3220 static int cm_lap_handler(struct cm_work *work)
3222 struct cm_id_private *cm_id_priv;
3223 struct cm_lap_msg *lap_msg;
3224 struct ib_cm_lap_event_param *param;
3225 struct ib_mad_send_buf *msg = NULL;
3228 /* Currently Alternate path messages are not supported for
3231 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3232 work->port->port_num))
3235 /* todo: verify LAP request and send reject APR if invalid. */
3236 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
3237 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
3238 lap_msg->local_comm_id);
3242 param = &work->cm_event.param.lap_rcvd;
3243 memset(&work->path[0], 0, sizeof(work->path[1]));
3244 cm_path_set_rec_type(work->port->cm_dev->ib_device,
3245 work->port->port_num,
3247 &lap_msg->alt_local_gid);
3248 param->alternate_path = &work->path[0];
3249 cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
3250 work->cm_event.private_data = &lap_msg->private_data;
3252 spin_lock_irq(&cm_id_priv->lock);
3253 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
3256 switch (cm_id_priv->id.lap_state) {
3257 case IB_CM_LAP_UNINIT:
3258 case IB_CM_LAP_IDLE:
3260 case IB_CM_MRA_LAP_SENT:
3261 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3262 counter[CM_LAP_COUNTER]);
3263 msg = cm_alloc_response_msg_no_ah(work->port, work->mad_recv_wc);
3267 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
3268 CM_MSG_RESPONSE_OTHER,
3269 cm_id_priv->service_timeout,
3270 cm_id_priv->private_data,
3271 cm_id_priv->private_data_len);
3272 spin_unlock_irq(&cm_id_priv->lock);
3274 if (cm_create_response_msg_ah(work->port, work->mad_recv_wc, msg) ||
3275 ib_post_send_mad(msg, NULL))
3278 case IB_CM_LAP_RCVD:
3279 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3280 counter[CM_LAP_COUNTER]);
3286 ret = cm_init_av_for_lap(work->port, work->mad_recv_wc->wc,
3287 work->mad_recv_wc->recv_buf.grh,
3292 ret = cm_init_av_by_path(param->alternate_path, NULL,
3293 &cm_id_priv->alt_av, cm_id_priv);
3297 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
3298 cm_id_priv->tid = lap_msg->hdr.tid;
3299 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3301 list_add_tail(&work->list, &cm_id_priv->work_list);
3302 spin_unlock_irq(&cm_id_priv->lock);
3305 cm_process_work(cm_id_priv, work);
3307 cm_deref_id(cm_id_priv);
3310 unlock: spin_unlock_irq(&cm_id_priv->lock);
3311 deref: cm_deref_id(cm_id_priv);
3315 static void cm_format_apr(struct cm_apr_msg *apr_msg,
3316 struct cm_id_private *cm_id_priv,
3317 enum ib_cm_apr_status status,
3320 const void *private_data,
3321 u8 private_data_len)
3323 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
3324 apr_msg->local_comm_id = cm_id_priv->id.local_id;
3325 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
3326 apr_msg->ap_status = (u8) status;
3328 if (info && info_length) {
3329 apr_msg->info_length = info_length;
3330 memcpy(apr_msg->info, info, info_length);
3333 if (private_data && private_data_len)
3334 memcpy(apr_msg->private_data, private_data, private_data_len);
3337 int ib_send_cm_apr(struct ib_cm_id *cm_id,
3338 enum ib_cm_apr_status status,
3341 const void *private_data,
3342 u8 private_data_len)
3344 struct cm_id_private *cm_id_priv;
3345 struct ib_mad_send_buf *msg;
3346 unsigned long flags;
3349 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
3350 (info && info_length > IB_CM_APR_INFO_LENGTH))
3353 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3354 spin_lock_irqsave(&cm_id_priv->lock, flags);
3355 if (cm_id->state != IB_CM_ESTABLISHED ||
3356 (cm_id->lap_state != IB_CM_LAP_RCVD &&
3357 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
3362 ret = cm_alloc_msg(cm_id_priv, &msg);
3366 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
3367 info, info_length, private_data, private_data_len);
3368 ret = ib_post_send_mad(msg, NULL);
3370 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3375 cm_id->lap_state = IB_CM_LAP_IDLE;
3376 out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3379 EXPORT_SYMBOL(ib_send_cm_apr);
3381 static int cm_apr_handler(struct cm_work *work)
3383 struct cm_id_private *cm_id_priv;
3384 struct cm_apr_msg *apr_msg;
3387 /* Currently Alternate path messages are not supported for
3390 if (rdma_protocol_roce(work->port->cm_dev->ib_device,
3391 work->port->port_num))
3394 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
3395 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
3396 apr_msg->local_comm_id);
3398 return -EINVAL; /* Unmatched reply. */
3400 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
3401 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
3402 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
3403 work->cm_event.private_data = &apr_msg->private_data;
3405 spin_lock_irq(&cm_id_priv->lock);
3406 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
3407 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
3408 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
3409 spin_unlock_irq(&cm_id_priv->lock);
3412 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
3413 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3414 cm_id_priv->msg = NULL;
3416 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3418 list_add_tail(&work->list, &cm_id_priv->work_list);
3419 spin_unlock_irq(&cm_id_priv->lock);
3422 cm_process_work(cm_id_priv, work);
3424 cm_deref_id(cm_id_priv);
3427 cm_deref_id(cm_id_priv);
3431 static int cm_timewait_handler(struct cm_work *work)
3433 struct cm_timewait_info *timewait_info;
3434 struct cm_id_private *cm_id_priv;
3437 timewait_info = (struct cm_timewait_info *)work;
3438 spin_lock_irq(&cm.lock);
3439 list_del(&timewait_info->list);
3440 spin_unlock_irq(&cm.lock);
3442 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
3443 timewait_info->work.remote_id);
3447 spin_lock_irq(&cm_id_priv->lock);
3448 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
3449 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
3450 spin_unlock_irq(&cm_id_priv->lock);
3453 cm_id_priv->id.state = IB_CM_IDLE;
3454 ret = atomic_inc_and_test(&cm_id_priv->work_count);
3456 list_add_tail(&work->list, &cm_id_priv->work_list);
3457 spin_unlock_irq(&cm_id_priv->lock);
3460 cm_process_work(cm_id_priv, work);
3462 cm_deref_id(cm_id_priv);
3465 cm_deref_id(cm_id_priv);
3469 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
3470 struct cm_id_private *cm_id_priv,
3471 struct ib_cm_sidr_req_param *param)
3473 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
3474 cm_form_tid(cm_id_priv));
3475 sidr_req_msg->request_id = cm_id_priv->id.local_id;
3476 sidr_req_msg->pkey = param->path->pkey;
3477 sidr_req_msg->service_id = param->service_id;
3479 if (param->private_data && param->private_data_len)
3480 memcpy(sidr_req_msg->private_data, param->private_data,
3481 param->private_data_len);
3484 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
3485 struct ib_cm_sidr_req_param *param)
3487 struct cm_id_private *cm_id_priv;
3488 struct ib_mad_send_buf *msg;
3489 unsigned long flags;
3492 if (!param->path || (param->private_data &&
3493 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
3496 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3497 ret = cm_init_av_by_path(param->path, param->sgid_attr,
3503 cm_id->service_id = param->service_id;
3504 cm_id->service_mask = ~cpu_to_be64(0);
3505 cm_id_priv->timeout_ms = param->timeout_ms;
3506 cm_id_priv->max_cm_retries = param->max_cm_retries;
3507 ret = cm_alloc_msg(cm_id_priv, &msg);
3511 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
3513 msg->timeout_ms = cm_id_priv->timeout_ms;
3514 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
3516 spin_lock_irqsave(&cm_id_priv->lock, flags);
3517 if (cm_id->state == IB_CM_IDLE)
3518 ret = ib_post_send_mad(msg, NULL);
3523 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3527 cm_id->state = IB_CM_SIDR_REQ_SENT;
3528 cm_id_priv->msg = msg;
3529 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3533 EXPORT_SYMBOL(ib_send_cm_sidr_req);
3535 static void cm_format_sidr_req_event(struct cm_work *work,
3536 const struct cm_id_private *rx_cm_id,
3537 struct ib_cm_id *listen_id)
3539 struct cm_sidr_req_msg *sidr_req_msg;
3540 struct ib_cm_sidr_req_event_param *param;
3542 sidr_req_msg = (struct cm_sidr_req_msg *)
3543 work->mad_recv_wc->recv_buf.mad;
3544 param = &work->cm_event.param.sidr_req_rcvd;
3545 param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
3546 param->listen_id = listen_id;
3547 param->service_id = sidr_req_msg->service_id;
3548 param->bth_pkey = cm_get_bth_pkey(work);
3549 param->port = work->port->port_num;
3550 param->sgid_attr = rx_cm_id->av.ah_attr.grh.sgid_attr;
3551 work->cm_event.private_data = &sidr_req_msg->private_data;
3554 static int cm_sidr_req_handler(struct cm_work *work)
3556 struct ib_cm_id *cm_id;
3557 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
3558 struct cm_sidr_req_msg *sidr_req_msg;
3562 cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
3564 return PTR_ERR(cm_id);
3565 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3567 /* Record SGID/SLID and request ID for lookup. */
3568 sidr_req_msg = (struct cm_sidr_req_msg *)
3569 work->mad_recv_wc->recv_buf.mad;
3570 wc = work->mad_recv_wc->wc;
3571 cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
3572 cm_id_priv->av.dgid.global.interface_id = 0;
3573 ret = cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
3574 work->mad_recv_wc->recv_buf.grh,
3579 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
3580 cm_id_priv->tid = sidr_req_msg->hdr.tid;
3581 atomic_inc(&cm_id_priv->work_count);
3583 spin_lock_irq(&cm.lock);
3584 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
3585 if (cur_cm_id_priv) {
3586 spin_unlock_irq(&cm.lock);
3587 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
3588 counter[CM_SIDR_REQ_COUNTER]);
3589 goto out; /* Duplicate message. */
3591 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3592 cur_cm_id_priv = cm_find_listen(cm_id->device,
3593 sidr_req_msg->service_id);
3594 if (!cur_cm_id_priv) {
3595 spin_unlock_irq(&cm.lock);
3596 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3597 goto out; /* No match. */
3599 atomic_inc(&cur_cm_id_priv->refcount);
3600 atomic_inc(&cm_id_priv->refcount);
3601 spin_unlock_irq(&cm.lock);
3603 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3604 cm_id_priv->id.context = cur_cm_id_priv->id.context;
3605 cm_id_priv->id.service_id = sidr_req_msg->service_id;
3606 cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3608 cm_format_sidr_req_event(work, cm_id_priv, &cur_cm_id_priv->id);
3609 cm_process_work(cm_id_priv, work);
3610 cm_deref_id(cur_cm_id_priv);
3613 ib_destroy_cm_id(&cm_id_priv->id);
3617 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3618 struct cm_id_private *cm_id_priv,
3619 struct ib_cm_sidr_rep_param *param)
3621 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3623 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3624 sidr_rep_msg->status = param->status;
3625 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3626 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3627 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3629 if (param->info && param->info_length)
3630 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3632 if (param->private_data && param->private_data_len)
3633 memcpy(sidr_rep_msg->private_data, param->private_data,
3634 param->private_data_len);
3637 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3638 struct ib_cm_sidr_rep_param *param)
3640 struct cm_id_private *cm_id_priv;
3641 struct ib_mad_send_buf *msg;
3642 unsigned long flags;
3645 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3646 (param->private_data &&
3647 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3650 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3651 spin_lock_irqsave(&cm_id_priv->lock, flags);
3652 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3657 ret = cm_alloc_msg(cm_id_priv, &msg);
3661 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3663 ret = ib_post_send_mad(msg, NULL);
3665 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3669 cm_id->state = IB_CM_IDLE;
3670 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3672 spin_lock_irqsave(&cm.lock, flags);
3673 if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
3674 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3675 RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
3677 spin_unlock_irqrestore(&cm.lock, flags);
3680 error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3683 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3685 static void cm_format_sidr_rep_event(struct cm_work *work,
3686 const struct cm_id_private *cm_id_priv)
3688 struct cm_sidr_rep_msg *sidr_rep_msg;
3689 struct ib_cm_sidr_rep_event_param *param;
3691 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3692 work->mad_recv_wc->recv_buf.mad;
3693 param = &work->cm_event.param.sidr_rep_rcvd;
3694 param->status = sidr_rep_msg->status;
3695 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3696 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3697 param->info = &sidr_rep_msg->info;
3698 param->info_len = sidr_rep_msg->info_length;
3699 param->sgid_attr = cm_id_priv->av.ah_attr.grh.sgid_attr;
3700 work->cm_event.private_data = &sidr_rep_msg->private_data;
3703 static int cm_sidr_rep_handler(struct cm_work *work)
3705 struct cm_sidr_rep_msg *sidr_rep_msg;
3706 struct cm_id_private *cm_id_priv;
3708 sidr_rep_msg = (struct cm_sidr_rep_msg *)
3709 work->mad_recv_wc->recv_buf.mad;
3710 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3712 return -EINVAL; /* Unmatched reply. */
3714 spin_lock_irq(&cm_id_priv->lock);
3715 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3716 spin_unlock_irq(&cm_id_priv->lock);
3719 cm_id_priv->id.state = IB_CM_IDLE;
3720 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3721 spin_unlock_irq(&cm_id_priv->lock);
3723 cm_format_sidr_rep_event(work, cm_id_priv);
3724 cm_process_work(cm_id_priv, work);
3727 cm_deref_id(cm_id_priv);
3731 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3732 enum ib_wc_status wc_status)
3734 struct cm_id_private *cm_id_priv;
3735 struct ib_cm_event cm_event;
3736 enum ib_cm_state state;
3739 memset(&cm_event, 0, sizeof cm_event);
3740 cm_id_priv = msg->context[0];
3742 /* Discard old sends or ones without a response. */
3743 spin_lock_irq(&cm_id_priv->lock);
3744 state = (enum ib_cm_state) (unsigned long) msg->context[1];
3745 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3748 pr_debug_ratelimited("CM: failed sending MAD in state %d. (%s)\n",
3749 state, ib_wc_status_msg(wc_status));
3751 case IB_CM_REQ_SENT:
3752 case IB_CM_MRA_REQ_RCVD:
3753 cm_reset_to_idle(cm_id_priv);
3754 cm_event.event = IB_CM_REQ_ERROR;
3756 case IB_CM_REP_SENT:
3757 case IB_CM_MRA_REP_RCVD:
3758 cm_reset_to_idle(cm_id_priv);
3759 cm_event.event = IB_CM_REP_ERROR;
3761 case IB_CM_DREQ_SENT:
3762 cm_enter_timewait(cm_id_priv);
3763 cm_event.event = IB_CM_DREQ_ERROR;
3765 case IB_CM_SIDR_REQ_SENT:
3766 cm_id_priv->id.state = IB_CM_IDLE;
3767 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3772 spin_unlock_irq(&cm_id_priv->lock);
3773 cm_event.param.send_status = wc_status;
3775 /* No other events can occur on the cm_id at this point. */
3776 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3779 ib_destroy_cm_id(&cm_id_priv->id);
3782 spin_unlock_irq(&cm_id_priv->lock);
3786 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3787 struct ib_mad_send_wc *mad_send_wc)
3789 struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3790 struct cm_port *port;
3793 port = mad_agent->context;
3794 attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3795 msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3798 * If the send was in response to a received message (context[0] is not
3799 * set to a cm_id), and is not a REJ, then it is a send that was
3802 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3805 atomic_long_add(1 + msg->retries,
3806 &port->counter_group[CM_XMIT].counter[attr_index]);
3808 atomic_long_add(msg->retries,
3809 &port->counter_group[CM_XMIT_RETRIES].
3810 counter[attr_index]);
3812 switch (mad_send_wc->status) {
3814 case IB_WC_WR_FLUSH_ERR:
3818 if (msg->context[0] && msg->context[1])
3819 cm_process_send_error(msg, mad_send_wc->status);
3826 static void cm_work_handler(struct work_struct *_work)
3828 struct cm_work *work = container_of(_work, struct cm_work, work.work);
3831 switch (work->cm_event.event) {
3832 case IB_CM_REQ_RECEIVED:
3833 ret = cm_req_handler(work);
3835 case IB_CM_MRA_RECEIVED:
3836 ret = cm_mra_handler(work);
3838 case IB_CM_REJ_RECEIVED:
3839 ret = cm_rej_handler(work);
3841 case IB_CM_REP_RECEIVED:
3842 ret = cm_rep_handler(work);
3844 case IB_CM_RTU_RECEIVED:
3845 ret = cm_rtu_handler(work);
3847 case IB_CM_USER_ESTABLISHED:
3848 ret = cm_establish_handler(work);
3850 case IB_CM_DREQ_RECEIVED:
3851 ret = cm_dreq_handler(work);
3853 case IB_CM_DREP_RECEIVED:
3854 ret = cm_drep_handler(work);
3856 case IB_CM_SIDR_REQ_RECEIVED:
3857 ret = cm_sidr_req_handler(work);
3859 case IB_CM_SIDR_REP_RECEIVED:
3860 ret = cm_sidr_rep_handler(work);
3862 case IB_CM_LAP_RECEIVED:
3863 ret = cm_lap_handler(work);
3865 case IB_CM_APR_RECEIVED:
3866 ret = cm_apr_handler(work);
3868 case IB_CM_TIMEWAIT_EXIT:
3869 ret = cm_timewait_handler(work);
3872 pr_debug("cm_event.event: 0x%x\n", work->cm_event.event);
3880 static int cm_establish(struct ib_cm_id *cm_id)
3882 struct cm_id_private *cm_id_priv;
3883 struct cm_work *work;
3884 unsigned long flags;
3886 struct cm_device *cm_dev;
3888 cm_dev = ib_get_client_data(cm_id->device, &cm_client);
3892 work = kmalloc(sizeof *work, GFP_ATOMIC);
3896 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3897 spin_lock_irqsave(&cm_id_priv->lock, flags);
3898 switch (cm_id->state)
3900 case IB_CM_REP_SENT:
3901 case IB_CM_MRA_REP_RCVD:
3902 cm_id->state = IB_CM_ESTABLISHED;
3904 case IB_CM_ESTABLISHED:
3908 pr_debug("%s: local_id %d, cm_id->state: %d\n", __func__,
3909 be32_to_cpu(cm_id->local_id), cm_id->state);
3913 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3921 * The CM worker thread may try to destroy the cm_id before it
3922 * can execute this work item. To prevent potential deadlock,
3923 * we need to find the cm_id once we're in the context of the
3924 * worker thread, rather than holding a reference on it.
3926 INIT_DELAYED_WORK(&work->work, cm_work_handler);
3927 work->local_id = cm_id->local_id;
3928 work->remote_id = cm_id->remote_id;
3929 work->mad_recv_wc = NULL;
3930 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3932 /* Check if the device started its remove_one */
3933 spin_lock_irqsave(&cm.lock, flags);
3934 if (!cm_dev->going_down) {
3935 queue_delayed_work(cm.wq, &work->work, 0);
3940 spin_unlock_irqrestore(&cm.lock, flags);
3946 static int cm_migrate(struct ib_cm_id *cm_id)
3948 struct cm_id_private *cm_id_priv;
3949 struct cm_av tmp_av;
3950 unsigned long flags;
3951 int tmp_send_port_not_ready;
3954 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3955 spin_lock_irqsave(&cm_id_priv->lock, flags);
3956 if (cm_id->state == IB_CM_ESTABLISHED &&
3957 (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3958 cm_id->lap_state == IB_CM_LAP_IDLE)) {
3959 cm_id->lap_state = IB_CM_LAP_IDLE;
3960 /* Swap address vector */
3961 tmp_av = cm_id_priv->av;
3962 cm_id_priv->av = cm_id_priv->alt_av;
3963 cm_id_priv->alt_av = tmp_av;
3964 /* Swap port send ready state */
3965 tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready;
3966 cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready;
3967 cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready;
3970 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3975 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3980 case IB_EVENT_COMM_EST:
3981 ret = cm_establish(cm_id);
3983 case IB_EVENT_PATH_MIG:
3984 ret = cm_migrate(cm_id);
3991 EXPORT_SYMBOL(ib_cm_notify);
3993 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3994 struct ib_mad_send_buf *send_buf,
3995 struct ib_mad_recv_wc *mad_recv_wc)
3997 struct cm_port *port = mad_agent->context;
3998 struct cm_work *work;
3999 enum ib_cm_event_type event;
4000 bool alt_path = false;
4005 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
4006 case CM_REQ_ATTR_ID:
4007 alt_path = cm_req_has_alt_path((struct cm_req_msg *)
4008 mad_recv_wc->recv_buf.mad);
4009 paths = 1 + (alt_path != 0);
4010 event = IB_CM_REQ_RECEIVED;
4012 case CM_MRA_ATTR_ID:
4013 event = IB_CM_MRA_RECEIVED;
4015 case CM_REJ_ATTR_ID:
4016 event = IB_CM_REJ_RECEIVED;
4018 case CM_REP_ATTR_ID:
4019 event = IB_CM_REP_RECEIVED;
4021 case CM_RTU_ATTR_ID:
4022 event = IB_CM_RTU_RECEIVED;
4024 case CM_DREQ_ATTR_ID:
4025 event = IB_CM_DREQ_RECEIVED;
4027 case CM_DREP_ATTR_ID:
4028 event = IB_CM_DREP_RECEIVED;
4030 case CM_SIDR_REQ_ATTR_ID:
4031 event = IB_CM_SIDR_REQ_RECEIVED;
4033 case CM_SIDR_REP_ATTR_ID:
4034 event = IB_CM_SIDR_REP_RECEIVED;
4036 case CM_LAP_ATTR_ID:
4038 event = IB_CM_LAP_RECEIVED;
4040 case CM_APR_ATTR_ID:
4041 event = IB_CM_APR_RECEIVED;
4044 ib_free_recv_mad(mad_recv_wc);
4048 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
4049 atomic_long_inc(&port->counter_group[CM_RECV].
4050 counter[attr_id - CM_ATTR_ID_OFFSET]);
4052 work = kmalloc(struct_size(work, path, paths), GFP_KERNEL);
4054 ib_free_recv_mad(mad_recv_wc);
4058 INIT_DELAYED_WORK(&work->work, cm_work_handler);
4059 work->cm_event.event = event;
4060 work->mad_recv_wc = mad_recv_wc;
4063 /* Check if the device started its remove_one */
4064 spin_lock_irq(&cm.lock);
4065 if (!port->cm_dev->going_down)
4066 queue_delayed_work(cm.wq, &work->work, 0);
4069 spin_unlock_irq(&cm.lock);
4073 ib_free_recv_mad(mad_recv_wc);
4077 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
4078 struct ib_qp_attr *qp_attr,
4081 unsigned long flags;
4084 spin_lock_irqsave(&cm_id_priv->lock, flags);
4085 switch (cm_id_priv->id.state) {
4086 case IB_CM_REQ_SENT:
4087 case IB_CM_MRA_REQ_RCVD:
4088 case IB_CM_REQ_RCVD:
4089 case IB_CM_MRA_REQ_SENT:
4090 case IB_CM_REP_RCVD:
4091 case IB_CM_MRA_REP_SENT:
4092 case IB_CM_REP_SENT:
4093 case IB_CM_MRA_REP_RCVD:
4094 case IB_CM_ESTABLISHED:
4095 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
4096 IB_QP_PKEY_INDEX | IB_QP_PORT;
4097 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
4098 if (cm_id_priv->responder_resources)
4099 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
4100 IB_ACCESS_REMOTE_ATOMIC;
4101 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
4102 qp_attr->port_num = cm_id_priv->av.port->port_num;
4106 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4107 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4108 cm_id_priv->id.state);
4112 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4116 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
4117 struct ib_qp_attr *qp_attr,
4120 unsigned long flags;
4123 spin_lock_irqsave(&cm_id_priv->lock, flags);
4124 switch (cm_id_priv->id.state) {
4125 case IB_CM_REQ_RCVD:
4126 case IB_CM_MRA_REQ_SENT:
4127 case IB_CM_REP_RCVD:
4128 case IB_CM_MRA_REP_SENT:
4129 case IB_CM_REP_SENT:
4130 case IB_CM_MRA_REP_RCVD:
4131 case IB_CM_ESTABLISHED:
4132 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
4133 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
4134 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
4135 qp_attr->path_mtu = cm_id_priv->path_mtu;
4136 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
4137 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
4138 if (cm_id_priv->qp_type == IB_QPT_RC ||
4139 cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
4140 *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
4141 IB_QP_MIN_RNR_TIMER;
4142 qp_attr->max_dest_rd_atomic =
4143 cm_id_priv->responder_resources;
4144 qp_attr->min_rnr_timer = 0;
4146 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4147 *qp_attr_mask |= IB_QP_ALT_PATH;
4148 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4149 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4150 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4151 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4156 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4157 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4158 cm_id_priv->id.state);
4162 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4166 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
4167 struct ib_qp_attr *qp_attr,
4170 unsigned long flags;
4173 spin_lock_irqsave(&cm_id_priv->lock, flags);
4174 switch (cm_id_priv->id.state) {
4175 /* Allow transition to RTS before sending REP */
4176 case IB_CM_REQ_RCVD:
4177 case IB_CM_MRA_REQ_SENT:
4179 case IB_CM_REP_RCVD:
4180 case IB_CM_MRA_REP_SENT:
4181 case IB_CM_REP_SENT:
4182 case IB_CM_MRA_REP_RCVD:
4183 case IB_CM_ESTABLISHED:
4184 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
4185 *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
4186 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
4187 switch (cm_id_priv->qp_type) {
4189 case IB_QPT_XRC_INI:
4190 *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
4191 IB_QP_MAX_QP_RD_ATOMIC;
4192 qp_attr->retry_cnt = cm_id_priv->retry_count;
4193 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
4194 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
4196 case IB_QPT_XRC_TGT:
4197 *qp_attr_mask |= IB_QP_TIMEOUT;
4198 qp_attr->timeout = cm_id_priv->av.timeout;
4203 if (rdma_ah_get_dlid(&cm_id_priv->alt_av.ah_attr)) {
4204 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
4205 qp_attr->path_mig_state = IB_MIG_REARM;
4208 *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
4209 qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
4210 qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
4211 qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
4212 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
4213 qp_attr->path_mig_state = IB_MIG_REARM;
4218 pr_debug("%s: local_id %d, cm_id_priv->id.state: %d\n",
4219 __func__, be32_to_cpu(cm_id_priv->id.local_id),
4220 cm_id_priv->id.state);
4224 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
4228 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
4229 struct ib_qp_attr *qp_attr,
4232 struct cm_id_private *cm_id_priv;
4235 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
4236 switch (qp_attr->qp_state) {
4238 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
4241 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
4244 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
4252 EXPORT_SYMBOL(ib_cm_init_qp_attr);
4254 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
4257 struct cm_counter_group *group;
4258 struct cm_counter_attribute *cm_attr;
4260 group = container_of(obj, struct cm_counter_group, obj);
4261 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
4263 return sprintf(buf, "%ld\n",
4264 atomic_long_read(&group->counter[cm_attr->index]));
4267 static const struct sysfs_ops cm_counter_ops = {
4268 .show = cm_show_counter
4271 static struct kobj_type cm_counter_obj_type = {
4272 .sysfs_ops = &cm_counter_ops,
4273 .default_attrs = cm_counter_default_attrs
4276 static char *cm_devnode(struct device *dev, umode_t *mode)
4280 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
4283 struct class cm_class = {
4284 .owner = THIS_MODULE,
4285 .name = "infiniband_cm",
4286 .devnode = cm_devnode,
4288 EXPORT_SYMBOL(cm_class);
4290 static int cm_create_port_fs(struct cm_port *port)
4294 for (i = 0; i < CM_COUNTER_GROUPS; i++) {
4295 ret = ib_port_register_module_stat(port->cm_dev->ib_device,
4297 &port->counter_group[i].obj,
4298 &cm_counter_obj_type,
4299 counter_group_names[i]);
4308 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4313 static void cm_remove_port_fs(struct cm_port *port)
4317 for (i = 0; i < CM_COUNTER_GROUPS; i++)
4318 ib_port_unregister_module_stat(&port->counter_group[i].obj);
4322 static void cm_add_one(struct ib_device *ib_device)
4324 struct cm_device *cm_dev;
4325 struct cm_port *port;
4326 struct ib_mad_reg_req reg_req = {
4327 .mgmt_class = IB_MGMT_CLASS_CM,
4328 .mgmt_class_version = IB_CM_CLASS_VERSION,
4330 struct ib_port_modify port_modify = {
4331 .set_port_cap_mask = IB_PORT_CM_SUP
4333 unsigned long flags;
4338 cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
4343 cm_dev->ib_device = ib_device;
4344 cm_dev->ack_delay = ib_device->attrs.local_ca_ack_delay;
4345 cm_dev->going_down = 0;
4347 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
4348 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4349 if (!rdma_cap_ib_cm(ib_device, i))
4352 port = kzalloc(sizeof *port, GFP_KERNEL);
4356 cm_dev->port[i-1] = port;
4357 port->cm_dev = cm_dev;
4360 INIT_LIST_HEAD(&port->cm_priv_prim_list);
4361 INIT_LIST_HEAD(&port->cm_priv_altr_list);
4363 ret = cm_create_port_fs(port);
4367 port->mad_agent = ib_register_mad_agent(ib_device, i,
4375 if (IS_ERR(port->mad_agent))
4378 ret = ib_modify_port(ib_device, i, 0, &port_modify);
4388 ib_set_client_data(ib_device, &cm_client, cm_dev);
4390 write_lock_irqsave(&cm.device_lock, flags);
4391 list_add_tail(&cm_dev->list, &cm.device_list);
4392 write_unlock_irqrestore(&cm.device_lock, flags);
4396 ib_unregister_mad_agent(port->mad_agent);
4398 cm_remove_port_fs(port);
4400 port_modify.set_port_cap_mask = 0;
4401 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
4404 if (!rdma_cap_ib_cm(ib_device, i))
4407 port = cm_dev->port[i-1];
4408 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4409 ib_unregister_mad_agent(port->mad_agent);
4410 cm_remove_port_fs(port);
4417 static void cm_remove_one(struct ib_device *ib_device, void *client_data)
4419 struct cm_device *cm_dev = client_data;
4420 struct cm_port *port;
4421 struct cm_id_private *cm_id_priv;
4422 struct ib_mad_agent *cur_mad_agent;
4423 struct ib_port_modify port_modify = {
4424 .clr_port_cap_mask = IB_PORT_CM_SUP
4426 unsigned long flags;
4432 write_lock_irqsave(&cm.device_lock, flags);
4433 list_del(&cm_dev->list);
4434 write_unlock_irqrestore(&cm.device_lock, flags);
4436 spin_lock_irq(&cm.lock);
4437 cm_dev->going_down = 1;
4438 spin_unlock_irq(&cm.lock);
4440 for (i = 1; i <= ib_device->phys_port_cnt; i++) {
4441 if (!rdma_cap_ib_cm(ib_device, i))
4444 port = cm_dev->port[i-1];
4445 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
4446 /* Mark all the cm_id's as not valid */
4447 spin_lock_irq(&cm.lock);
4448 list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list)
4449 cm_id_priv->altr_send_port_not_ready = 1;
4450 list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list)
4451 cm_id_priv->prim_send_port_not_ready = 1;
4452 spin_unlock_irq(&cm.lock);
4454 * We flush the queue here after the going_down set, this
4455 * verify that no new works will be queued in the recv handler,
4456 * after that we can call the unregister_mad_agent
4458 flush_workqueue(cm.wq);
4459 spin_lock_irq(&cm.state_lock);
4460 cur_mad_agent = port->mad_agent;
4461 port->mad_agent = NULL;
4462 spin_unlock_irq(&cm.state_lock);
4463 ib_unregister_mad_agent(cur_mad_agent);
4464 cm_remove_port_fs(port);
4471 static int __init ib_cm_init(void)
4475 INIT_LIST_HEAD(&cm.device_list);
4476 rwlock_init(&cm.device_lock);
4477 spin_lock_init(&cm.lock);
4478 spin_lock_init(&cm.state_lock);
4479 cm.listen_service_table = RB_ROOT;
4480 cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
4481 cm.remote_id_table = RB_ROOT;
4482 cm.remote_qp_table = RB_ROOT;
4483 cm.remote_sidr_table = RB_ROOT;
4484 xa_init_flags(&cm.local_id_table, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
4485 get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
4486 INIT_LIST_HEAD(&cm.timewait_list);
4488 ret = class_register(&cm_class);
4494 cm.wq = alloc_workqueue("ib_cm", 0, 1);
4500 ret = ib_register_client(&cm_client);
4506 destroy_workqueue(cm.wq);
4508 class_unregister(&cm_class);
4513 static void __exit ib_cm_cleanup(void)
4515 struct cm_timewait_info *timewait_info, *tmp;
4517 spin_lock_irq(&cm.lock);
4518 list_for_each_entry(timewait_info, &cm.timewait_list, list)
4519 cancel_delayed_work(&timewait_info->work.work);
4520 spin_unlock_irq(&cm.lock);
4522 ib_unregister_client(&cm_client);
4523 destroy_workqueue(cm.wq);
4525 list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
4526 list_del(&timewait_info->list);
4527 kfree(timewait_info);
4530 class_unregister(&cm_class);
4531 WARN_ON(!xa_empty(&cm.local_id_table));
4534 module_init(ib_cm_init);
4535 module_exit(ib_cm_cleanup);