2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014,2018 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/security.h>
44 #include <linux/xarray.h>
45 #include <rdma/ib_cache.h>
48 #include "core_priv.h"
54 #define CREATE_TRACE_POINTS
55 #include <trace/events/ib_mad.h>
57 #ifdef CONFIG_TRACEPOINTS
58 static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
59 struct ib_mad_qp_info *qp_info,
60 struct trace_event_raw_ib_mad_send_template *entry)
63 struct ib_device *dev = qp_info->port_priv->device;
64 u8 pnum = qp_info->port_priv->port_num;
65 struct ib_ud_wr *wr = &mad_send_wr->send_wr;
66 struct rdma_ah_attr attr = {};
68 rdma_query_ah(wr->ah, &attr);
70 /* These are common */
72 ib_query_pkey(dev, pnum, wr->pkey_index, &pkey);
74 entry->rqpn = wr->remote_qpn;
75 entry->rqkey = wr->remote_qkey;
76 entry->dlid = rdma_ah_get_dlid(&attr);
80 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
81 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
83 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
84 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
85 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
86 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
88 /* Client ID 0 is used for snoop-only clients */
89 static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
90 static u32 ib_mad_client_next;
91 static struct list_head ib_mad_port_list;
94 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
96 /* Forward declarations */
97 static int method_in_use(struct ib_mad_mgmt_method_table **method,
98 struct ib_mad_reg_req *mad_reg_req);
99 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
100 static struct ib_mad_agent_private *find_mad_agent(
101 struct ib_mad_port_private *port_priv,
102 const struct ib_mad_hdr *mad);
103 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
104 struct ib_mad_private *mad);
105 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
106 static void timeout_sends(struct work_struct *work);
107 static void local_completions(struct work_struct *work);
108 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
109 struct ib_mad_agent_private *agent_priv,
111 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
112 struct ib_mad_agent_private *agent_priv);
113 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
115 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
118 * Returns a ib_mad_port_private structure or NULL for a device/port
119 * Assumes ib_mad_port_list_lock is being held
121 static inline struct ib_mad_port_private *
122 __ib_get_mad_port(struct ib_device *device, int port_num)
124 struct ib_mad_port_private *entry;
126 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
127 if (entry->device == device && entry->port_num == port_num)
134 * Wrapper function to return a ib_mad_port_private structure or NULL
137 static inline struct ib_mad_port_private *
138 ib_get_mad_port(struct ib_device *device, int port_num)
140 struct ib_mad_port_private *entry;
143 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
144 entry = __ib_get_mad_port(device, port_num);
145 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
150 static inline u8 convert_mgmt_class(u8 mgmt_class)
152 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
153 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
157 static int get_spl_qp_index(enum ib_qp_type qp_type)
170 static int vendor_class_index(u8 mgmt_class)
172 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
175 static int is_vendor_class(u8 mgmt_class)
177 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
178 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
183 static int is_vendor_oui(char *oui)
185 if (oui[0] || oui[1] || oui[2])
190 static int is_vendor_method_in_use(
191 struct ib_mad_mgmt_vendor_class *vendor_class,
192 struct ib_mad_reg_req *mad_reg_req)
194 struct ib_mad_mgmt_method_table *method;
197 for (i = 0; i < MAX_MGMT_OUI; i++) {
198 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
199 method = vendor_class->method_table[i];
201 if (method_in_use(&method, mad_reg_req))
211 int ib_response_mad(const struct ib_mad_hdr *hdr)
213 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
214 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
215 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
216 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
218 EXPORT_SYMBOL(ib_response_mad);
221 * ib_register_mad_agent - Register to send/receive MADs
223 * Context: Process context.
225 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
227 enum ib_qp_type qp_type,
228 struct ib_mad_reg_req *mad_reg_req,
230 ib_mad_send_handler send_handler,
231 ib_mad_recv_handler recv_handler,
233 u32 registration_flags)
235 struct ib_mad_port_private *port_priv;
236 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
237 struct ib_mad_agent_private *mad_agent_priv;
238 struct ib_mad_reg_req *reg_req = NULL;
239 struct ib_mad_mgmt_class_table *class;
240 struct ib_mad_mgmt_vendor_class_table *vendor;
241 struct ib_mad_mgmt_vendor_class *vendor_class;
242 struct ib_mad_mgmt_method_table *method;
244 u8 mgmt_class, vclass;
246 if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
247 (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
248 return ERR_PTR(-EPROTONOSUPPORT);
250 /* Validate parameters */
251 qpn = get_spl_qp_index(qp_type);
253 dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
258 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
259 dev_dbg_ratelimited(&device->dev,
260 "%s: invalid RMPP Version %u\n",
261 __func__, rmpp_version);
265 /* Validate MAD registration request if supplied */
267 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
268 dev_dbg_ratelimited(&device->dev,
269 "%s: invalid Class Version %u\n",
271 mad_reg_req->mgmt_class_version);
275 dev_dbg_ratelimited(&device->dev,
276 "%s: no recv_handler\n", __func__);
279 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
281 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
282 * one in this range currently allowed
284 if (mad_reg_req->mgmt_class !=
285 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
286 dev_dbg_ratelimited(&device->dev,
287 "%s: Invalid Mgmt Class 0x%x\n",
288 __func__, mad_reg_req->mgmt_class);
291 } else if (mad_reg_req->mgmt_class == 0) {
293 * Class 0 is reserved in IBA and is used for
294 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
296 dev_dbg_ratelimited(&device->dev,
297 "%s: Invalid Mgmt Class 0\n",
300 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
302 * If class is in "new" vendor range,
303 * ensure supplied OUI is not zero
305 if (!is_vendor_oui(mad_reg_req->oui)) {
306 dev_dbg_ratelimited(&device->dev,
307 "%s: No OUI specified for class 0x%x\n",
309 mad_reg_req->mgmt_class);
313 /* Make sure class supplied is consistent with RMPP */
314 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
316 dev_dbg_ratelimited(&device->dev,
317 "%s: RMPP version for non-RMPP class 0x%x\n",
318 __func__, mad_reg_req->mgmt_class);
323 /* Make sure class supplied is consistent with QP type */
324 if (qp_type == IB_QPT_SMI) {
325 if ((mad_reg_req->mgmt_class !=
326 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
327 (mad_reg_req->mgmt_class !=
328 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
329 dev_dbg_ratelimited(&device->dev,
330 "%s: Invalid SM QP type: class 0x%x\n",
331 __func__, mad_reg_req->mgmt_class);
335 if ((mad_reg_req->mgmt_class ==
336 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
337 (mad_reg_req->mgmt_class ==
338 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
339 dev_dbg_ratelimited(&device->dev,
340 "%s: Invalid GS QP type: class 0x%x\n",
341 __func__, mad_reg_req->mgmt_class);
346 /* No registration request supplied */
349 if (registration_flags & IB_MAD_USER_RMPP)
353 /* Validate device and port */
354 port_priv = ib_get_mad_port(device, port_num);
356 dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
358 ret = ERR_PTR(-ENODEV);
362 /* Verify the QP requested is supported. For example, Ethernet devices
365 if (!port_priv->qp_info[qpn].qp) {
366 dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
368 ret = ERR_PTR(-EPROTONOSUPPORT);
372 /* Allocate structures */
373 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
374 if (!mad_agent_priv) {
375 ret = ERR_PTR(-ENOMEM);
380 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
382 ret = ERR_PTR(-ENOMEM);
387 /* Now, fill in the various structures */
388 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
389 mad_agent_priv->reg_req = reg_req;
390 mad_agent_priv->agent.rmpp_version = rmpp_version;
391 mad_agent_priv->agent.device = device;
392 mad_agent_priv->agent.recv_handler = recv_handler;
393 mad_agent_priv->agent.send_handler = send_handler;
394 mad_agent_priv->agent.context = context;
395 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
396 mad_agent_priv->agent.port_num = port_num;
397 mad_agent_priv->agent.flags = registration_flags;
398 spin_lock_init(&mad_agent_priv->lock);
399 INIT_LIST_HEAD(&mad_agent_priv->send_list);
400 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
401 INIT_LIST_HEAD(&mad_agent_priv->done_list);
402 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
403 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
404 INIT_LIST_HEAD(&mad_agent_priv->local_list);
405 INIT_WORK(&mad_agent_priv->local_work, local_completions);
406 atomic_set(&mad_agent_priv->refcount, 1);
407 init_completion(&mad_agent_priv->comp);
409 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
416 * The mlx4 driver uses the top byte to distinguish which virtual
417 * function generated the MAD, so we must avoid using it.
419 ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
420 mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
421 &ib_mad_client_next, GFP_KERNEL);
428 * Make sure MAD registration (if supplied)
429 * is non overlapping with any existing ones
431 spin_lock_irq(&port_priv->reg_lock);
433 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
434 if (!is_vendor_class(mgmt_class)) {
435 class = port_priv->version[mad_reg_req->
436 mgmt_class_version].class;
438 method = class->method_table[mgmt_class];
440 if (method_in_use(&method,
445 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
448 /* "New" vendor class range */
449 vendor = port_priv->version[mad_reg_req->
450 mgmt_class_version].vendor;
452 vclass = vendor_class_index(mgmt_class);
453 vendor_class = vendor->vendor_class[vclass];
455 if (is_vendor_method_in_use(
461 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
468 spin_unlock_irq(&port_priv->reg_lock);
470 trace_ib_mad_create_agent(mad_agent_priv);
471 return &mad_agent_priv->agent;
473 spin_unlock_irq(&port_priv->reg_lock);
474 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
476 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
480 kfree(mad_agent_priv);
484 EXPORT_SYMBOL(ib_register_mad_agent);
486 static inline int is_snooping_sends(int mad_snoop_flags)
488 return (mad_snoop_flags &
489 (/*IB_MAD_SNOOP_POSTED_SENDS |
490 IB_MAD_SNOOP_RMPP_SENDS |*/
491 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
492 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
495 static inline int is_snooping_recvs(int mad_snoop_flags)
497 return (mad_snoop_flags &
498 (IB_MAD_SNOOP_RECVS /*|
499 IB_MAD_SNOOP_RMPP_RECVS*/));
502 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
503 struct ib_mad_snoop_private *mad_snoop_priv)
505 struct ib_mad_snoop_private **new_snoop_table;
509 spin_lock_irqsave(&qp_info->snoop_lock, flags);
510 /* Check for empty slot in array. */
511 for (i = 0; i < qp_info->snoop_table_size; i++)
512 if (!qp_info->snoop_table[i])
515 if (i == qp_info->snoop_table_size) {
517 new_snoop_table = krealloc(qp_info->snoop_table,
518 sizeof mad_snoop_priv *
519 (qp_info->snoop_table_size + 1),
521 if (!new_snoop_table) {
526 qp_info->snoop_table = new_snoop_table;
527 qp_info->snoop_table_size++;
529 qp_info->snoop_table[i] = mad_snoop_priv;
530 atomic_inc(&qp_info->snoop_count);
532 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
536 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
538 enum ib_qp_type qp_type,
540 ib_mad_snoop_handler snoop_handler,
541 ib_mad_recv_handler recv_handler,
544 struct ib_mad_port_private *port_priv;
545 struct ib_mad_agent *ret;
546 struct ib_mad_snoop_private *mad_snoop_priv;
550 /* Validate parameters */
551 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
552 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
553 ret = ERR_PTR(-EINVAL);
556 qpn = get_spl_qp_index(qp_type);
558 ret = ERR_PTR(-EINVAL);
561 port_priv = ib_get_mad_port(device, port_num);
563 ret = ERR_PTR(-ENODEV);
566 /* Allocate structures */
567 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
568 if (!mad_snoop_priv) {
569 ret = ERR_PTR(-ENOMEM);
573 /* Now, fill in the various structures */
574 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
575 mad_snoop_priv->agent.device = device;
576 mad_snoop_priv->agent.recv_handler = recv_handler;
577 mad_snoop_priv->agent.snoop_handler = snoop_handler;
578 mad_snoop_priv->agent.context = context;
579 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
580 mad_snoop_priv->agent.port_num = port_num;
581 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
582 init_completion(&mad_snoop_priv->comp);
584 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
590 mad_snoop_priv->snoop_index = register_snoop_agent(
591 &port_priv->qp_info[qpn],
593 if (mad_snoop_priv->snoop_index < 0) {
594 ret = ERR_PTR(mad_snoop_priv->snoop_index);
598 atomic_set(&mad_snoop_priv->refcount, 1);
599 return &mad_snoop_priv->agent;
601 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
603 kfree(mad_snoop_priv);
607 EXPORT_SYMBOL(ib_register_mad_snoop);
609 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
611 if (atomic_dec_and_test(&mad_agent_priv->refcount))
612 complete(&mad_agent_priv->comp);
615 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
617 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
618 complete(&mad_snoop_priv->comp);
621 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
623 struct ib_mad_port_private *port_priv;
625 /* Note that we could still be handling received MADs */
626 trace_ib_mad_unregister_agent(mad_agent_priv);
629 * Canceling all sends results in dropping received response
630 * MADs, preventing us from queuing additional work
632 cancel_mads(mad_agent_priv);
633 port_priv = mad_agent_priv->qp_info->port_priv;
634 cancel_delayed_work(&mad_agent_priv->timed_work);
636 spin_lock_irq(&port_priv->reg_lock);
637 remove_mad_reg_req(mad_agent_priv);
638 spin_unlock_irq(&port_priv->reg_lock);
639 xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
641 flush_workqueue(port_priv->wq);
642 ib_cancel_rmpp_recvs(mad_agent_priv);
644 deref_mad_agent(mad_agent_priv);
645 wait_for_completion(&mad_agent_priv->comp);
647 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
649 kfree(mad_agent_priv->reg_req);
650 kfree_rcu(mad_agent_priv, rcu);
653 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
655 struct ib_mad_qp_info *qp_info;
658 qp_info = mad_snoop_priv->qp_info;
659 spin_lock_irqsave(&qp_info->snoop_lock, flags);
660 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
661 atomic_dec(&qp_info->snoop_count);
662 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
664 deref_snoop_agent(mad_snoop_priv);
665 wait_for_completion(&mad_snoop_priv->comp);
667 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
669 kfree(mad_snoop_priv);
673 * ib_unregister_mad_agent - Unregisters a client from using MAD services
675 * Context: Process context.
677 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
679 struct ib_mad_agent_private *mad_agent_priv;
680 struct ib_mad_snoop_private *mad_snoop_priv;
682 /* If the TID is zero, the agent can only snoop. */
683 if (mad_agent->hi_tid) {
684 mad_agent_priv = container_of(mad_agent,
685 struct ib_mad_agent_private,
687 unregister_mad_agent(mad_agent_priv);
689 mad_snoop_priv = container_of(mad_agent,
690 struct ib_mad_snoop_private,
692 unregister_mad_snoop(mad_snoop_priv);
695 EXPORT_SYMBOL(ib_unregister_mad_agent);
697 static void dequeue_mad(struct ib_mad_list_head *mad_list)
699 struct ib_mad_queue *mad_queue;
702 mad_queue = mad_list->mad_queue;
703 spin_lock_irqsave(&mad_queue->lock, flags);
704 list_del(&mad_list->list);
706 spin_unlock_irqrestore(&mad_queue->lock, flags);
709 static void snoop_send(struct ib_mad_qp_info *qp_info,
710 struct ib_mad_send_buf *send_buf,
711 struct ib_mad_send_wc *mad_send_wc,
714 struct ib_mad_snoop_private *mad_snoop_priv;
718 spin_lock_irqsave(&qp_info->snoop_lock, flags);
719 for (i = 0; i < qp_info->snoop_table_size; i++) {
720 mad_snoop_priv = qp_info->snoop_table[i];
721 if (!mad_snoop_priv ||
722 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
725 atomic_inc(&mad_snoop_priv->refcount);
726 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
727 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
728 send_buf, mad_send_wc);
729 deref_snoop_agent(mad_snoop_priv);
730 spin_lock_irqsave(&qp_info->snoop_lock, flags);
732 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
735 static void snoop_recv(struct ib_mad_qp_info *qp_info,
736 struct ib_mad_recv_wc *mad_recv_wc,
739 struct ib_mad_snoop_private *mad_snoop_priv;
743 spin_lock_irqsave(&qp_info->snoop_lock, flags);
744 for (i = 0; i < qp_info->snoop_table_size; i++) {
745 mad_snoop_priv = qp_info->snoop_table[i];
746 if (!mad_snoop_priv ||
747 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
750 atomic_inc(&mad_snoop_priv->refcount);
751 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
752 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
754 deref_snoop_agent(mad_snoop_priv);
755 spin_lock_irqsave(&qp_info->snoop_lock, flags);
757 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
760 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
761 u16 pkey_index, u8 port_num, struct ib_wc *wc)
763 memset(wc, 0, sizeof *wc);
765 wc->status = IB_WC_SUCCESS;
766 wc->opcode = IB_WC_RECV;
767 wc->pkey_index = pkey_index;
768 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
773 wc->dlid_path_bits = 0;
774 wc->port_num = port_num;
777 static size_t mad_priv_size(const struct ib_mad_private *mp)
779 return sizeof(struct ib_mad_private) + mp->mad_size;
782 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
784 size_t size = sizeof(struct ib_mad_private) + mad_size;
785 struct ib_mad_private *ret = kzalloc(size, flags);
788 ret->mad_size = mad_size;
793 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
795 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
798 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
800 return sizeof(struct ib_grh) + mp->mad_size;
804 * Return 0 if SMP is to be sent
805 * Return 1 if SMP was consumed locally (whether or not solicited)
806 * Return < 0 if error
808 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
809 struct ib_mad_send_wr_private *mad_send_wr)
812 struct ib_smp *smp = mad_send_wr->send_buf.mad;
813 struct opa_smp *opa_smp = (struct opa_smp *)smp;
815 struct ib_mad_local_private *local;
816 struct ib_mad_private *mad_priv;
817 struct ib_mad_port_private *port_priv;
818 struct ib_mad_agent_private *recv_mad_agent = NULL;
819 struct ib_device *device = mad_agent_priv->agent.device;
822 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
823 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
824 u16 out_mad_pkey_index = 0;
826 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
827 mad_agent_priv->qp_info->port_priv->port_num);
829 if (rdma_cap_ib_switch(device) &&
830 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
831 port_num = send_wr->port_num;
833 port_num = mad_agent_priv->agent.port_num;
836 * Directed route handling starts if the initial LID routed part of
837 * a request or the ending LID routed part of a response is empty.
838 * If we are at the start of the LID routed part, don't update the
839 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
841 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
844 trace_ib_mad_handle_out_opa_smi(opa_smp);
846 if ((opa_get_smp_direction(opa_smp)
847 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
848 OPA_LID_PERMISSIVE &&
849 opa_smi_handle_dr_smp_send(opa_smp,
850 rdma_cap_ib_switch(device),
851 port_num) == IB_SMI_DISCARD) {
853 dev_err(&device->dev, "OPA Invalid directed route\n");
856 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
857 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
858 opa_drslid & 0xffff0000) {
860 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
864 drslid = (u16)(opa_drslid & 0x0000ffff);
866 /* Check to post send on QP or process locally */
867 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
868 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
871 trace_ib_mad_handle_out_ib_smi(smp);
873 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
875 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
878 dev_err(&device->dev, "Invalid directed route\n");
881 drslid = be16_to_cpu(smp->dr_slid);
883 /* Check to post send on QP or process locally */
884 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
885 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
889 local = kmalloc(sizeof *local, GFP_ATOMIC);
894 local->mad_priv = NULL;
895 local->recv_mad_agent = NULL;
896 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
903 build_smp_wc(mad_agent_priv->agent.qp,
904 send_wr->wr.wr_cqe, drslid,
906 send_wr->port_num, &mad_wc);
908 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
909 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
910 + mad_send_wr->send_buf.data_len
911 + sizeof(struct ib_grh);
914 /* No GRH for DR SMP */
915 ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
916 (const struct ib_mad_hdr *)smp, mad_size,
917 (struct ib_mad_hdr *)mad_priv->mad,
918 &mad_size, &out_mad_pkey_index);
921 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
922 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
923 mad_agent_priv->agent.recv_handler) {
924 local->mad_priv = mad_priv;
925 local->recv_mad_agent = mad_agent_priv;
927 * Reference MAD agent until receive
928 * side of local completion handled
930 atomic_inc(&mad_agent_priv->refcount);
934 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
937 case IB_MAD_RESULT_SUCCESS:
938 /* Treat like an incoming receive MAD */
939 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
940 mad_agent_priv->agent.port_num);
942 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
943 recv_mad_agent = find_mad_agent(port_priv,
944 (const struct ib_mad_hdr *)mad_priv->mad);
946 if (!port_priv || !recv_mad_agent) {
948 * No receiving agent so drop packet and
949 * generate send completion.
954 local->mad_priv = mad_priv;
955 local->recv_mad_agent = recv_mad_agent;
964 local->mad_send_wr = mad_send_wr;
966 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
967 local->return_wc_byte_len = mad_size;
969 /* Reference MAD agent until send side of local completion handled */
970 atomic_inc(&mad_agent_priv->refcount);
971 /* Queue local completion to local list */
972 spin_lock_irqsave(&mad_agent_priv->lock, flags);
973 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
974 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
975 queue_work(mad_agent_priv->qp_info->port_priv->wq,
976 &mad_agent_priv->local_work);
982 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
986 seg_size = mad_size - hdr_len;
987 if (data_len && seg_size) {
988 pad = seg_size - data_len % seg_size;
989 return pad == seg_size ? 0 : pad;
994 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
996 struct ib_rmpp_segment *s, *t;
998 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
1004 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
1005 size_t mad_size, gfp_t gfp_mask)
1007 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
1008 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
1009 struct ib_rmpp_segment *seg = NULL;
1010 int left, seg_size, pad;
1012 send_buf->seg_size = mad_size - send_buf->hdr_len;
1013 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
1014 seg_size = send_buf->seg_size;
1017 /* Allocate data segments. */
1018 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
1019 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
1021 free_send_rmpp_list(send_wr);
1024 seg->num = ++send_buf->seg_count;
1025 list_add_tail(&seg->list, &send_wr->rmpp_list);
1028 /* Zero any padding */
1030 memset(seg->data + seg_size - pad, 0, pad);
1032 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
1034 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
1035 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
1037 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
1038 struct ib_rmpp_segment, list);
1039 send_wr->last_ack_seg = send_wr->cur_seg;
1043 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
1045 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
1047 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
1049 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
1050 u32 remote_qpn, u16 pkey_index,
1052 int hdr_len, int data_len,
1056 struct ib_mad_agent_private *mad_agent_priv;
1057 struct ib_mad_send_wr_private *mad_send_wr;
1058 int pad, message_size, ret, size;
1063 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1066 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1068 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1069 mad_size = sizeof(struct opa_mad);
1071 mad_size = sizeof(struct ib_mad);
1073 pad = get_pad_size(hdr_len, data_len, mad_size);
1074 message_size = hdr_len + data_len + pad;
1076 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1077 if (!rmpp_active && message_size > mad_size)
1078 return ERR_PTR(-EINVAL);
1080 if (rmpp_active || message_size > mad_size)
1081 return ERR_PTR(-EINVAL);
1083 size = rmpp_active ? hdr_len : mad_size;
1084 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1086 return ERR_PTR(-ENOMEM);
1088 mad_send_wr = buf + size;
1089 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1090 mad_send_wr->send_buf.mad = buf;
1091 mad_send_wr->send_buf.hdr_len = hdr_len;
1092 mad_send_wr->send_buf.data_len = data_len;
1093 mad_send_wr->pad = pad;
1095 mad_send_wr->mad_agent_priv = mad_agent_priv;
1096 mad_send_wr->sg_list[0].length = hdr_len;
1097 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1099 /* OPA MADs don't have to be the full 2048 bytes */
1100 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1101 data_len < mad_size - hdr_len)
1102 mad_send_wr->sg_list[1].length = data_len;
1104 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1106 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1108 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1110 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1111 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1112 mad_send_wr->send_wr.wr.num_sge = 2;
1113 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1114 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1115 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1116 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1117 mad_send_wr->send_wr.pkey_index = pkey_index;
1120 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1123 return ERR_PTR(ret);
1127 mad_send_wr->send_buf.mad_agent = mad_agent;
1128 atomic_inc(&mad_agent_priv->refcount);
1129 return &mad_send_wr->send_buf;
1131 EXPORT_SYMBOL(ib_create_send_mad);
1133 int ib_get_mad_data_offset(u8 mgmt_class)
1135 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1136 return IB_MGMT_SA_HDR;
1137 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1138 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1139 (mgmt_class == IB_MGMT_CLASS_BIS))
1140 return IB_MGMT_DEVICE_HDR;
1141 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1142 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1143 return IB_MGMT_VENDOR_HDR;
1145 return IB_MGMT_MAD_HDR;
1147 EXPORT_SYMBOL(ib_get_mad_data_offset);
1149 int ib_is_mad_class_rmpp(u8 mgmt_class)
1151 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1152 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1153 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1154 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1155 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1156 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1160 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1162 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1164 struct ib_mad_send_wr_private *mad_send_wr;
1165 struct list_head *list;
1167 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1169 list = &mad_send_wr->cur_seg->list;
1171 if (mad_send_wr->cur_seg->num < seg_num) {
1172 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1173 if (mad_send_wr->cur_seg->num == seg_num)
1175 } else if (mad_send_wr->cur_seg->num > seg_num) {
1176 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1177 if (mad_send_wr->cur_seg->num == seg_num)
1180 return mad_send_wr->cur_seg->data;
1182 EXPORT_SYMBOL(ib_get_rmpp_segment);
1184 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1186 if (mad_send_wr->send_buf.seg_count)
1187 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1188 mad_send_wr->seg_num);
1190 return mad_send_wr->send_buf.mad +
1191 mad_send_wr->send_buf.hdr_len;
1194 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1196 struct ib_mad_agent_private *mad_agent_priv;
1197 struct ib_mad_send_wr_private *mad_send_wr;
1199 mad_agent_priv = container_of(send_buf->mad_agent,
1200 struct ib_mad_agent_private, agent);
1201 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1204 free_send_rmpp_list(mad_send_wr);
1205 kfree(send_buf->mad);
1206 deref_mad_agent(mad_agent_priv);
1208 EXPORT_SYMBOL(ib_free_send_mad);
1210 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1212 struct ib_mad_qp_info *qp_info;
1213 struct list_head *list;
1214 struct ib_mad_agent *mad_agent;
1216 unsigned long flags;
1219 /* Set WR ID to find mad_send_wr upon completion */
1220 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1221 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1222 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1223 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1225 mad_agent = mad_send_wr->send_buf.mad_agent;
1226 sge = mad_send_wr->sg_list;
1227 sge[0].addr = ib_dma_map_single(mad_agent->device,
1228 mad_send_wr->send_buf.mad,
1231 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1234 mad_send_wr->header_mapping = sge[0].addr;
1236 sge[1].addr = ib_dma_map_single(mad_agent->device,
1237 ib_get_payload(mad_send_wr),
1240 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1241 ib_dma_unmap_single(mad_agent->device,
1242 mad_send_wr->header_mapping,
1243 sge[0].length, DMA_TO_DEVICE);
1246 mad_send_wr->payload_mapping = sge[1].addr;
1248 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1249 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1250 trace_ib_mad_ib_send_mad(mad_send_wr, qp_info);
1251 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1253 list = &qp_info->send_queue.list;
1256 list = &qp_info->overflow_list;
1260 qp_info->send_queue.count++;
1261 list_add_tail(&mad_send_wr->mad_list.list, list);
1263 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1265 ib_dma_unmap_single(mad_agent->device,
1266 mad_send_wr->header_mapping,
1267 sge[0].length, DMA_TO_DEVICE);
1268 ib_dma_unmap_single(mad_agent->device,
1269 mad_send_wr->payload_mapping,
1270 sge[1].length, DMA_TO_DEVICE);
1276 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1277 * with the registered client
1279 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1280 struct ib_mad_send_buf **bad_send_buf)
1282 struct ib_mad_agent_private *mad_agent_priv;
1283 struct ib_mad_send_buf *next_send_buf;
1284 struct ib_mad_send_wr_private *mad_send_wr;
1285 unsigned long flags;
1288 /* Walk list of send WRs and post each on send list */
1289 for (; send_buf; send_buf = next_send_buf) {
1290 mad_send_wr = container_of(send_buf,
1291 struct ib_mad_send_wr_private,
1293 mad_agent_priv = mad_send_wr->mad_agent_priv;
1295 ret = ib_mad_enforce_security(mad_agent_priv,
1296 mad_send_wr->send_wr.pkey_index);
1300 if (!send_buf->mad_agent->send_handler ||
1301 (send_buf->timeout_ms &&
1302 !send_buf->mad_agent->recv_handler)) {
1307 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1308 if (mad_agent_priv->agent.rmpp_version) {
1315 * Save pointer to next work request to post in case the
1316 * current one completes, and the user modifies the work
1317 * request associated with the completion
1319 next_send_buf = send_buf->next;
1320 mad_send_wr->send_wr.ah = send_buf->ah;
1322 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1323 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1324 ret = handle_outgoing_dr_smp(mad_agent_priv,
1326 if (ret < 0) /* error */
1328 else if (ret == 1) /* locally consumed */
1332 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1333 /* Timeout will be updated after send completes */
1334 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1335 mad_send_wr->max_retries = send_buf->retries;
1336 mad_send_wr->retries_left = send_buf->retries;
1337 send_buf->retries = 0;
1338 /* Reference for work request to QP + response */
1339 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1340 mad_send_wr->status = IB_WC_SUCCESS;
1342 /* Reference MAD agent until send completes */
1343 atomic_inc(&mad_agent_priv->refcount);
1344 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1345 list_add_tail(&mad_send_wr->agent_list,
1346 &mad_agent_priv->send_list);
1347 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1349 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1350 ret = ib_send_rmpp_mad(mad_send_wr);
1351 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1352 ret = ib_send_mad(mad_send_wr);
1354 ret = ib_send_mad(mad_send_wr);
1356 /* Fail send request */
1357 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1358 list_del(&mad_send_wr->agent_list);
1359 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1360 atomic_dec(&mad_agent_priv->refcount);
1367 *bad_send_buf = send_buf;
1370 EXPORT_SYMBOL(ib_post_send_mad);
1373 * ib_free_recv_mad - Returns data buffers used to receive
1374 * a MAD to the access layer
1376 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1378 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1379 struct ib_mad_private_header *mad_priv_hdr;
1380 struct ib_mad_private *priv;
1381 struct list_head free_list;
1383 INIT_LIST_HEAD(&free_list);
1384 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1386 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1388 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1390 mad_priv_hdr = container_of(mad_recv_wc,
1391 struct ib_mad_private_header,
1393 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1398 EXPORT_SYMBOL(ib_free_recv_mad);
1400 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1402 ib_mad_send_handler send_handler,
1403 ib_mad_recv_handler recv_handler,
1406 return ERR_PTR(-EINVAL); /* XXX: for now */
1408 EXPORT_SYMBOL(ib_redirect_mad_qp);
1410 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1413 dev_err(&mad_agent->device->dev,
1414 "ib_process_mad_wc() not implemented yet\n");
1417 EXPORT_SYMBOL(ib_process_mad_wc);
1419 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1420 struct ib_mad_reg_req *mad_reg_req)
1424 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1425 if ((*method)->agent[i]) {
1426 pr_err("Method %d already in use\n", i);
1433 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1435 /* Allocate management method table */
1436 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1437 return (*method) ? 0 : (-ENOMEM);
1441 * Check to see if there are any methods still in use
1443 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1447 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1448 if (method->agent[i])
1454 * Check to see if there are any method tables for this class still in use
1456 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1460 for (i = 0; i < MAX_MGMT_CLASS; i++)
1461 if (class->method_table[i])
1466 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1470 for (i = 0; i < MAX_MGMT_OUI; i++)
1471 if (vendor_class->method_table[i])
1476 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1481 for (i = 0; i < MAX_MGMT_OUI; i++)
1482 /* Is there matching OUI for this vendor class ? */
1483 if (!memcmp(vendor_class->oui[i], oui, 3))
1489 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1493 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1494 if (vendor->vendor_class[i])
1500 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1501 struct ib_mad_agent_private *agent)
1505 /* Remove any methods for this mad agent */
1506 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1507 if (method->agent[i] == agent) {
1508 method->agent[i] = NULL;
1513 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1514 struct ib_mad_agent_private *agent_priv,
1517 struct ib_mad_port_private *port_priv;
1518 struct ib_mad_mgmt_class_table **class;
1519 struct ib_mad_mgmt_method_table **method;
1522 port_priv = agent_priv->qp_info->port_priv;
1523 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1525 /* Allocate management class table for "new" class version */
1526 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1532 /* Allocate method table for this management class */
1533 method = &(*class)->method_table[mgmt_class];
1534 if ((ret = allocate_method_table(method)))
1537 method = &(*class)->method_table[mgmt_class];
1539 /* Allocate method table for this management class */
1540 if ((ret = allocate_method_table(method)))
1545 /* Now, make sure methods are not already in use */
1546 if (method_in_use(method, mad_reg_req))
1549 /* Finally, add in methods being registered */
1550 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1551 (*method)->agent[i] = agent_priv;
1556 /* Remove any methods for this mad agent */
1557 remove_methods_mad_agent(*method, agent_priv);
1558 /* Now, check to see if there are any methods in use */
1559 if (!check_method_table(*method)) {
1560 /* If not, release management method table */
1573 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1574 struct ib_mad_agent_private *agent_priv)
1576 struct ib_mad_port_private *port_priv;
1577 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1578 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1579 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1580 struct ib_mad_mgmt_method_table **method;
1581 int i, ret = -ENOMEM;
1584 /* "New" vendor (with OUI) class */
1585 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1586 port_priv = agent_priv->qp_info->port_priv;
1587 vendor_table = &port_priv->version[
1588 mad_reg_req->mgmt_class_version].vendor;
1589 if (!*vendor_table) {
1590 /* Allocate mgmt vendor class table for "new" class version */
1591 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1595 *vendor_table = vendor;
1597 if (!(*vendor_table)->vendor_class[vclass]) {
1598 /* Allocate table for this management vendor class */
1599 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1603 (*vendor_table)->vendor_class[vclass] = vendor_class;
1605 for (i = 0; i < MAX_MGMT_OUI; i++) {
1606 /* Is there matching OUI for this vendor class ? */
1607 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1608 mad_reg_req->oui, 3)) {
1609 method = &(*vendor_table)->vendor_class[
1610 vclass]->method_table[i];
1616 for (i = 0; i < MAX_MGMT_OUI; i++) {
1617 /* OUI slot available ? */
1618 if (!is_vendor_oui((*vendor_table)->vendor_class[
1620 method = &(*vendor_table)->vendor_class[
1621 vclass]->method_table[i];
1622 /* Allocate method table for this OUI */
1624 ret = allocate_method_table(method);
1628 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1629 mad_reg_req->oui, 3);
1633 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1637 /* Now, make sure methods are not already in use */
1638 if (method_in_use(method, mad_reg_req))
1641 /* Finally, add in methods being registered */
1642 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1643 (*method)->agent[i] = agent_priv;
1648 /* Remove any methods for this mad agent */
1649 remove_methods_mad_agent(*method, agent_priv);
1650 /* Now, check to see if there are any methods in use */
1651 if (!check_method_table(*method)) {
1652 /* If not, release management method table */
1659 (*vendor_table)->vendor_class[vclass] = NULL;
1660 kfree(vendor_class);
1664 *vendor_table = NULL;
1671 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1673 struct ib_mad_port_private *port_priv;
1674 struct ib_mad_mgmt_class_table *class;
1675 struct ib_mad_mgmt_method_table *method;
1676 struct ib_mad_mgmt_vendor_class_table *vendor;
1677 struct ib_mad_mgmt_vendor_class *vendor_class;
1682 * Was MAD registration request supplied
1683 * with original registration ?
1685 if (!agent_priv->reg_req) {
1689 port_priv = agent_priv->qp_info->port_priv;
1690 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1691 class = port_priv->version[
1692 agent_priv->reg_req->mgmt_class_version].class;
1696 method = class->method_table[mgmt_class];
1698 /* Remove any methods for this mad agent */
1699 remove_methods_mad_agent(method, agent_priv);
1700 /* Now, check to see if there are any methods still in use */
1701 if (!check_method_table(method)) {
1702 /* If not, release management method table */
1704 class->method_table[mgmt_class] = NULL;
1705 /* Any management classes left ? */
1706 if (!check_class_table(class)) {
1707 /* If not, release management class table */
1710 agent_priv->reg_req->
1711 mgmt_class_version].class = NULL;
1717 if (!is_vendor_class(mgmt_class))
1720 /* normalize mgmt_class to vendor range 2 */
1721 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1722 vendor = port_priv->version[
1723 agent_priv->reg_req->mgmt_class_version].vendor;
1728 vendor_class = vendor->vendor_class[mgmt_class];
1730 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1733 method = vendor_class->method_table[index];
1735 /* Remove any methods for this mad agent */
1736 remove_methods_mad_agent(method, agent_priv);
1738 * Now, check to see if there are
1739 * any methods still in use
1741 if (!check_method_table(method)) {
1742 /* If not, release management method table */
1744 vendor_class->method_table[index] = NULL;
1745 memset(vendor_class->oui[index], 0, 3);
1746 /* Any OUIs left ? */
1747 if (!check_vendor_class(vendor_class)) {
1748 /* If not, release vendor class table */
1749 kfree(vendor_class);
1750 vendor->vendor_class[mgmt_class] = NULL;
1751 /* Any other vendor classes left ? */
1752 if (!check_vendor_table(vendor)) {
1755 agent_priv->reg_req->
1756 mgmt_class_version].
1768 static struct ib_mad_agent_private *
1769 find_mad_agent(struct ib_mad_port_private *port_priv,
1770 const struct ib_mad_hdr *mad_hdr)
1772 struct ib_mad_agent_private *mad_agent = NULL;
1773 unsigned long flags;
1775 if (ib_response_mad(mad_hdr)) {
1779 * Routing is based on high 32 bits of transaction ID
1782 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1784 mad_agent = xa_load(&ib_mad_clients, hi_tid);
1785 if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
1789 struct ib_mad_mgmt_class_table *class;
1790 struct ib_mad_mgmt_method_table *method;
1791 struct ib_mad_mgmt_vendor_class_table *vendor;
1792 struct ib_mad_mgmt_vendor_class *vendor_class;
1793 const struct ib_vendor_mad *vendor_mad;
1796 spin_lock_irqsave(&port_priv->reg_lock, flags);
1798 * Routing is based on version, class, and method
1799 * For "newer" vendor MADs, also based on OUI
1801 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1803 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1804 class = port_priv->version[
1805 mad_hdr->class_version].class;
1808 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1809 ARRAY_SIZE(class->method_table))
1811 method = class->method_table[convert_mgmt_class(
1812 mad_hdr->mgmt_class)];
1814 mad_agent = method->agent[mad_hdr->method &
1815 ~IB_MGMT_METHOD_RESP];
1817 vendor = port_priv->version[
1818 mad_hdr->class_version].vendor;
1821 vendor_class = vendor->vendor_class[vendor_class_index(
1822 mad_hdr->mgmt_class)];
1825 /* Find matching OUI */
1826 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1827 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1830 method = vendor_class->method_table[index];
1832 mad_agent = method->agent[mad_hdr->method &
1833 ~IB_MGMT_METHOD_RESP];
1837 atomic_inc(&mad_agent->refcount);
1839 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1842 if (mad_agent && !mad_agent->agent.recv_handler) {
1843 dev_notice(&port_priv->device->dev,
1844 "No receive handler for client %p on port %d\n",
1845 &mad_agent->agent, port_priv->port_num);
1846 deref_mad_agent(mad_agent);
1853 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1854 const struct ib_mad_qp_info *qp_info,
1858 u32 qp_num = qp_info->qp->qp_num;
1860 /* Make sure MAD base version is understood */
1861 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1862 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1863 pr_err("MAD received with unsupported base version %d %s\n",
1864 mad_hdr->base_version, opa ? "(opa)" : "");
1868 /* Filter SMI packets sent to other than QP0 */
1869 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1870 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1874 /* CM attributes other than ClassPortInfo only use Send method */
1875 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1876 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1877 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1879 /* Filter GSI packets sent to QP0 */
1888 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1889 const struct ib_mad_hdr *mad_hdr)
1891 struct ib_rmpp_mad *rmpp_mad;
1893 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1894 return !mad_agent_priv->agent.rmpp_version ||
1895 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1896 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1897 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1898 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1901 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1902 const struct ib_mad_recv_wc *rwc)
1904 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1905 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1908 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1909 const struct ib_mad_send_wr_private *wr,
1910 const struct ib_mad_recv_wc *rwc )
1912 struct rdma_ah_attr attr;
1913 u8 send_resp, rcv_resp;
1915 struct ib_device *device = mad_agent_priv->agent.device;
1916 u8 port_num = mad_agent_priv->agent.port_num;
1920 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1921 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1923 if (send_resp == rcv_resp)
1924 /* both requests, or both responses. GIDs different */
1927 if (rdma_query_ah(wr->send_buf.ah, &attr))
1928 /* Assume not equal, to avoid false positives. */
1931 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1932 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1933 /* one has GID, other does not. Assume different */
1936 if (!send_resp && rcv_resp) {
1937 /* is request/response. */
1939 if (ib_get_cached_lmc(device, port_num, &lmc))
1941 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1942 rwc->wc->dlid_path_bits) &
1945 const struct ib_global_route *grh =
1946 rdma_ah_read_grh(&attr);
1948 if (rdma_query_gid(device, port_num,
1949 grh->sgid_index, &sgid))
1951 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1957 return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1959 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1960 rwc->recv_buf.grh->sgid.raw,
1964 static inline int is_direct(u8 class)
1966 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1969 struct ib_mad_send_wr_private*
1970 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1971 const struct ib_mad_recv_wc *wc)
1973 struct ib_mad_send_wr_private *wr;
1974 const struct ib_mad_hdr *mad_hdr;
1976 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1978 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1979 if ((wr->tid == mad_hdr->tid) &&
1980 rcv_has_same_class(wr, wc) &&
1982 * Don't check GID for direct routed MADs.
1983 * These might have permissive LIDs.
1985 (is_direct(mad_hdr->mgmt_class) ||
1986 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1987 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1991 * It's possible to receive the response before we've
1992 * been notified that the send has completed
1994 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1995 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1996 wr->tid == mad_hdr->tid &&
1998 rcv_has_same_class(wr, wc) &&
2000 * Don't check GID for direct routed MADs.
2001 * These might have permissive LIDs.
2003 (is_direct(mad_hdr->mgmt_class) ||
2004 rcv_has_same_gid(mad_agent_priv, wr, wc)))
2005 /* Verify request has not been canceled */
2006 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
2011 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
2013 mad_send_wr->timeout = 0;
2014 if (mad_send_wr->refcount == 1)
2015 list_move_tail(&mad_send_wr->agent_list,
2016 &mad_send_wr->mad_agent_priv->done_list);
2019 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
2020 struct ib_mad_recv_wc *mad_recv_wc)
2022 struct ib_mad_send_wr_private *mad_send_wr;
2023 struct ib_mad_send_wc mad_send_wc;
2024 unsigned long flags;
2027 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
2028 ret = ib_mad_enforce_security(mad_agent_priv,
2029 mad_recv_wc->wc->pkey_index);
2031 ib_free_recv_mad(mad_recv_wc);
2032 deref_mad_agent(mad_agent_priv);
2036 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
2037 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2038 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
2041 deref_mad_agent(mad_agent_priv);
2046 /* Complete corresponding request */
2047 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
2048 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2049 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
2051 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2052 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
2053 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
2054 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
2055 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
2056 /* user rmpp is in effect
2057 * and this is an active RMPP MAD
2059 mad_agent_priv->agent.recv_handler(
2060 &mad_agent_priv->agent, NULL,
2062 atomic_dec(&mad_agent_priv->refcount);
2064 /* not user rmpp, revert to normal behavior and
2066 ib_free_recv_mad(mad_recv_wc);
2067 deref_mad_agent(mad_agent_priv);
2071 ib_mark_mad_done(mad_send_wr);
2072 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2074 /* Defined behavior is to complete response before request */
2075 mad_agent_priv->agent.recv_handler(
2076 &mad_agent_priv->agent,
2077 &mad_send_wr->send_buf,
2079 atomic_dec(&mad_agent_priv->refcount);
2081 mad_send_wc.status = IB_WC_SUCCESS;
2082 mad_send_wc.vendor_err = 0;
2083 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2084 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2087 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2089 deref_mad_agent(mad_agent_priv);
2095 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2096 const struct ib_mad_qp_info *qp_info,
2097 const struct ib_wc *wc,
2099 struct ib_mad_private *recv,
2100 struct ib_mad_private *response)
2102 enum smi_forward_action retsmi;
2103 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2105 trace_ib_mad_handle_ib_smi(smp);
2107 if (smi_handle_dr_smp_recv(smp,
2108 rdma_cap_ib_switch(port_priv->device),
2110 port_priv->device->phys_port_cnt) ==
2112 return IB_SMI_DISCARD;
2114 retsmi = smi_check_forward_dr_smp(smp);
2115 if (retsmi == IB_SMI_LOCAL)
2116 return IB_SMI_HANDLE;
2118 if (retsmi == IB_SMI_SEND) { /* don't forward */
2119 if (smi_handle_dr_smp_send(smp,
2120 rdma_cap_ib_switch(port_priv->device),
2121 port_num) == IB_SMI_DISCARD)
2122 return IB_SMI_DISCARD;
2124 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2125 return IB_SMI_DISCARD;
2126 } else if (rdma_cap_ib_switch(port_priv->device)) {
2127 /* forward case for switches */
2128 memcpy(response, recv, mad_priv_size(response));
2129 response->header.recv_wc.wc = &response->header.wc;
2130 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2131 response->header.recv_wc.recv_buf.grh = &response->grh;
2133 agent_send_response((const struct ib_mad_hdr *)response->mad,
2136 smi_get_fwd_port(smp),
2137 qp_info->qp->qp_num,
2141 return IB_SMI_DISCARD;
2143 return IB_SMI_HANDLE;
2146 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2147 struct ib_mad_private *response,
2148 size_t *resp_len, bool opa)
2150 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2151 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2153 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2154 recv_hdr->method == IB_MGMT_METHOD_SET) {
2155 memcpy(response, recv, mad_priv_size(response));
2156 response->header.recv_wc.wc = &response->header.wc;
2157 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2158 response->header.recv_wc.recv_buf.grh = &response->grh;
2159 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2160 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2161 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2162 resp_hdr->status |= IB_SMP_DIRECTION;
2164 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2165 if (recv_hdr->mgmt_class ==
2166 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2167 recv_hdr->mgmt_class ==
2168 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2169 *resp_len = opa_get_smp_header_size(
2170 (struct opa_smp *)recv->mad);
2172 *resp_len = sizeof(struct ib_mad_hdr);
2181 static enum smi_action
2182 handle_opa_smi(struct ib_mad_port_private *port_priv,
2183 struct ib_mad_qp_info *qp_info,
2186 struct ib_mad_private *recv,
2187 struct ib_mad_private *response)
2189 enum smi_forward_action retsmi;
2190 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2192 trace_ib_mad_handle_opa_smi(smp);
2194 if (opa_smi_handle_dr_smp_recv(smp,
2195 rdma_cap_ib_switch(port_priv->device),
2197 port_priv->device->phys_port_cnt) ==
2199 return IB_SMI_DISCARD;
2201 retsmi = opa_smi_check_forward_dr_smp(smp);
2202 if (retsmi == IB_SMI_LOCAL)
2203 return IB_SMI_HANDLE;
2205 if (retsmi == IB_SMI_SEND) { /* don't forward */
2206 if (opa_smi_handle_dr_smp_send(smp,
2207 rdma_cap_ib_switch(port_priv->device),
2208 port_num) == IB_SMI_DISCARD)
2209 return IB_SMI_DISCARD;
2211 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2213 return IB_SMI_DISCARD;
2215 } else if (rdma_cap_ib_switch(port_priv->device)) {
2216 /* forward case for switches */
2217 memcpy(response, recv, mad_priv_size(response));
2218 response->header.recv_wc.wc = &response->header.wc;
2219 response->header.recv_wc.recv_buf.opa_mad =
2220 (struct opa_mad *)response->mad;
2221 response->header.recv_wc.recv_buf.grh = &response->grh;
2223 agent_send_response((const struct ib_mad_hdr *)response->mad,
2226 opa_smi_get_fwd_port(smp),
2227 qp_info->qp->qp_num,
2228 recv->header.wc.byte_len,
2231 return IB_SMI_DISCARD;
2234 return IB_SMI_HANDLE;
2237 static enum smi_action
2238 handle_smi(struct ib_mad_port_private *port_priv,
2239 struct ib_mad_qp_info *qp_info,
2242 struct ib_mad_private *recv,
2243 struct ib_mad_private *response,
2246 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2248 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2249 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2250 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2253 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2256 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2258 struct ib_mad_port_private *port_priv = cq->cq_context;
2259 struct ib_mad_list_head *mad_list =
2260 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2261 struct ib_mad_qp_info *qp_info;
2262 struct ib_mad_private_header *mad_priv_hdr;
2263 struct ib_mad_private *recv, *response = NULL;
2264 struct ib_mad_agent_private *mad_agent;
2266 int ret = IB_MAD_RESULT_SUCCESS;
2268 u16 resp_mad_pkey_index = 0;
2271 if (list_empty_careful(&port_priv->port_list))
2274 if (wc->status != IB_WC_SUCCESS) {
2276 * Receive errors indicate that the QP has entered the error
2277 * state - error handling/shutdown code will cleanup
2282 qp_info = mad_list->mad_queue->qp_info;
2283 dequeue_mad(mad_list);
2285 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2286 qp_info->port_priv->port_num);
2288 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2290 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2291 ib_dma_unmap_single(port_priv->device,
2292 recv->header.mapping,
2293 mad_priv_dma_size(recv),
2296 /* Setup MAD receive work completion from "normal" work completion */
2297 recv->header.wc = *wc;
2298 recv->header.recv_wc.wc = &recv->header.wc;
2300 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2301 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2302 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2304 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2305 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2308 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2309 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2311 if (atomic_read(&qp_info->snoop_count))
2312 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2315 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2318 trace_ib_mad_recv_done_handler(qp_info, wc,
2319 (struct ib_mad_hdr *)recv->mad);
2321 mad_size = recv->mad_size;
2322 response = alloc_mad_private(mad_size, GFP_KERNEL);
2326 if (rdma_cap_ib_switch(port_priv->device))
2327 port_num = wc->port_num;
2329 port_num = port_priv->port_num;
2331 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2332 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2333 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2339 /* Give driver "right of first refusal" on incoming MAD */
2340 if (port_priv->device->ops.process_mad) {
2341 ret = port_priv->device->ops.process_mad(
2342 port_priv->device, 0, port_priv->port_num, wc,
2343 &recv->grh, (const struct ib_mad_hdr *)recv->mad,
2344 recv->mad_size, (struct ib_mad_hdr *)response->mad,
2345 &mad_size, &resp_mad_pkey_index);
2348 wc->pkey_index = resp_mad_pkey_index;
2350 if (ret & IB_MAD_RESULT_SUCCESS) {
2351 if (ret & IB_MAD_RESULT_CONSUMED)
2353 if (ret & IB_MAD_RESULT_REPLY) {
2354 agent_send_response((const struct ib_mad_hdr *)response->mad,
2358 qp_info->qp->qp_num,
2365 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2367 trace_ib_mad_recv_done_agent(mad_agent);
2368 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2370 * recv is freed up in error cases in ib_mad_complete_recv
2371 * or via recv_handler in ib_mad_complete_recv()
2374 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2375 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2376 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2377 port_priv->device, port_num,
2378 qp_info->qp->qp_num, mad_size, opa);
2382 /* Post another receive request for this QP */
2384 ib_mad_post_receive_mads(qp_info, response);
2387 ib_mad_post_receive_mads(qp_info, recv);
2390 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2392 struct ib_mad_send_wr_private *mad_send_wr;
2393 unsigned long delay;
2395 if (list_empty(&mad_agent_priv->wait_list)) {
2396 cancel_delayed_work(&mad_agent_priv->timed_work);
2398 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2399 struct ib_mad_send_wr_private,
2402 if (time_after(mad_agent_priv->timeout,
2403 mad_send_wr->timeout)) {
2404 mad_agent_priv->timeout = mad_send_wr->timeout;
2405 delay = mad_send_wr->timeout - jiffies;
2406 if ((long)delay <= 0)
2408 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2409 &mad_agent_priv->timed_work, delay);
2414 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2416 struct ib_mad_agent_private *mad_agent_priv;
2417 struct ib_mad_send_wr_private *temp_mad_send_wr;
2418 struct list_head *list_item;
2419 unsigned long delay;
2421 mad_agent_priv = mad_send_wr->mad_agent_priv;
2422 list_del(&mad_send_wr->agent_list);
2424 delay = mad_send_wr->timeout;
2425 mad_send_wr->timeout += jiffies;
2428 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2429 temp_mad_send_wr = list_entry(list_item,
2430 struct ib_mad_send_wr_private,
2432 if (time_after(mad_send_wr->timeout,
2433 temp_mad_send_wr->timeout))
2438 list_item = &mad_agent_priv->wait_list;
2439 list_add(&mad_send_wr->agent_list, list_item);
2441 /* Reschedule a work item if we have a shorter timeout */
2442 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2443 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2444 &mad_agent_priv->timed_work, delay);
2447 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2448 unsigned long timeout_ms)
2450 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2451 wait_for_response(mad_send_wr);
2455 * Process a send work completion
2457 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2458 struct ib_mad_send_wc *mad_send_wc)
2460 struct ib_mad_agent_private *mad_agent_priv;
2461 unsigned long flags;
2464 mad_agent_priv = mad_send_wr->mad_agent_priv;
2465 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2466 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2467 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2468 if (ret == IB_RMPP_RESULT_CONSUMED)
2471 ret = IB_RMPP_RESULT_UNHANDLED;
2473 if (mad_send_wc->status != IB_WC_SUCCESS &&
2474 mad_send_wr->status == IB_WC_SUCCESS) {
2475 mad_send_wr->status = mad_send_wc->status;
2476 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2479 if (--mad_send_wr->refcount > 0) {
2480 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2481 mad_send_wr->status == IB_WC_SUCCESS) {
2482 wait_for_response(mad_send_wr);
2487 /* Remove send from MAD agent and notify client of completion */
2488 list_del(&mad_send_wr->agent_list);
2489 adjust_timeout(mad_agent_priv);
2490 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2492 if (mad_send_wr->status != IB_WC_SUCCESS )
2493 mad_send_wc->status = mad_send_wr->status;
2494 if (ret == IB_RMPP_RESULT_INTERNAL)
2495 ib_rmpp_send_handler(mad_send_wc);
2497 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2500 /* Release reference on agent taken when sending */
2501 deref_mad_agent(mad_agent_priv);
2504 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2507 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2509 struct ib_mad_port_private *port_priv = cq->cq_context;
2510 struct ib_mad_list_head *mad_list =
2511 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2512 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2513 struct ib_mad_qp_info *qp_info;
2514 struct ib_mad_queue *send_queue;
2515 struct ib_mad_send_wc mad_send_wc;
2516 unsigned long flags;
2519 if (list_empty_careful(&port_priv->port_list))
2522 if (wc->status != IB_WC_SUCCESS) {
2523 if (!ib_mad_send_error(port_priv, wc))
2527 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2529 send_queue = mad_list->mad_queue;
2530 qp_info = send_queue->qp_info;
2532 trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
2533 trace_ib_mad_send_done_handler(mad_send_wr, wc);
2536 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2537 mad_send_wr->header_mapping,
2538 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2539 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2540 mad_send_wr->payload_mapping,
2541 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2542 queued_send_wr = NULL;
2543 spin_lock_irqsave(&send_queue->lock, flags);
2544 list_del(&mad_list->list);
2546 /* Move queued send to the send queue */
2547 if (send_queue->count-- > send_queue->max_active) {
2548 mad_list = container_of(qp_info->overflow_list.next,
2549 struct ib_mad_list_head, list);
2550 queued_send_wr = container_of(mad_list,
2551 struct ib_mad_send_wr_private,
2553 list_move_tail(&mad_list->list, &send_queue->list);
2555 spin_unlock_irqrestore(&send_queue->lock, flags);
2557 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2558 mad_send_wc.status = wc->status;
2559 mad_send_wc.vendor_err = wc->vendor_err;
2560 if (atomic_read(&qp_info->snoop_count))
2561 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2562 IB_MAD_SNOOP_SEND_COMPLETIONS);
2563 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2565 if (queued_send_wr) {
2566 trace_ib_mad_send_done_resend(queued_send_wr, qp_info);
2567 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2570 dev_err(&port_priv->device->dev,
2571 "ib_post_send failed: %d\n", ret);
2572 mad_send_wr = queued_send_wr;
2573 wc->status = IB_WC_LOC_QP_OP_ERR;
2579 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2581 struct ib_mad_send_wr_private *mad_send_wr;
2582 struct ib_mad_list_head *mad_list;
2583 unsigned long flags;
2585 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2586 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2587 mad_send_wr = container_of(mad_list,
2588 struct ib_mad_send_wr_private,
2590 mad_send_wr->retry = 1;
2592 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2595 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2598 struct ib_mad_list_head *mad_list =
2599 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2600 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2601 struct ib_mad_send_wr_private *mad_send_wr;
2605 * Send errors will transition the QP to SQE - move
2606 * QP to RTS and repost flushed work requests
2608 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2610 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2611 if (mad_send_wr->retry) {
2613 mad_send_wr->retry = 0;
2614 trace_ib_mad_error_handler(mad_send_wr, qp_info);
2615 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2621 struct ib_qp_attr *attr;
2623 /* Transition QP to RTS and fail offending send */
2624 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2626 attr->qp_state = IB_QPS_RTS;
2627 attr->cur_qp_state = IB_QPS_SQE;
2628 ret = ib_modify_qp(qp_info->qp, attr,
2629 IB_QP_STATE | IB_QP_CUR_STATE);
2632 dev_err(&port_priv->device->dev,
2633 "%s - ib_modify_qp to RTS: %d\n",
2636 mark_sends_for_retry(qp_info);
2643 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2645 unsigned long flags;
2646 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2647 struct ib_mad_send_wc mad_send_wc;
2648 struct list_head cancel_list;
2650 INIT_LIST_HEAD(&cancel_list);
2652 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2653 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2654 &mad_agent_priv->send_list, agent_list) {
2655 if (mad_send_wr->status == IB_WC_SUCCESS) {
2656 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2657 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2661 /* Empty wait list to prevent receives from finding a request */
2662 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2663 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2665 /* Report all cancelled requests */
2666 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2667 mad_send_wc.vendor_err = 0;
2669 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2670 &cancel_list, agent_list) {
2671 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2672 list_del(&mad_send_wr->agent_list);
2673 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2675 atomic_dec(&mad_agent_priv->refcount);
2679 static struct ib_mad_send_wr_private*
2680 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2681 struct ib_mad_send_buf *send_buf)
2683 struct ib_mad_send_wr_private *mad_send_wr;
2685 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2687 if (&mad_send_wr->send_buf == send_buf)
2691 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2693 if (is_rmpp_data_mad(mad_agent_priv,
2694 mad_send_wr->send_buf.mad) &&
2695 &mad_send_wr->send_buf == send_buf)
2701 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2702 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2704 struct ib_mad_agent_private *mad_agent_priv;
2705 struct ib_mad_send_wr_private *mad_send_wr;
2706 unsigned long flags;
2709 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2711 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2712 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2713 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2714 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2718 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2720 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2721 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2724 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2726 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2728 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2730 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2733 EXPORT_SYMBOL(ib_modify_mad);
2735 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2736 struct ib_mad_send_buf *send_buf)
2738 ib_modify_mad(mad_agent, send_buf, 0);
2740 EXPORT_SYMBOL(ib_cancel_mad);
2742 static void local_completions(struct work_struct *work)
2744 struct ib_mad_agent_private *mad_agent_priv;
2745 struct ib_mad_local_private *local;
2746 struct ib_mad_agent_private *recv_mad_agent;
2747 unsigned long flags;
2750 struct ib_mad_send_wc mad_send_wc;
2754 container_of(work, struct ib_mad_agent_private, local_work);
2756 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2757 mad_agent_priv->qp_info->port_priv->port_num);
2759 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2760 while (!list_empty(&mad_agent_priv->local_list)) {
2761 local = list_entry(mad_agent_priv->local_list.next,
2762 struct ib_mad_local_private,
2764 list_del(&local->completion_list);
2765 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2767 if (local->mad_priv) {
2769 recv_mad_agent = local->recv_mad_agent;
2770 if (!recv_mad_agent) {
2771 dev_err(&mad_agent_priv->agent.device->dev,
2772 "No receive MAD agent for local completion\n");
2774 goto local_send_completion;
2778 * Defined behavior is to complete response
2781 build_smp_wc(recv_mad_agent->agent.qp,
2782 local->mad_send_wr->send_wr.wr.wr_cqe,
2783 be16_to_cpu(IB_LID_PERMISSIVE),
2784 local->mad_send_wr->send_wr.pkey_index,
2785 recv_mad_agent->agent.port_num, &wc);
2787 local->mad_priv->header.recv_wc.wc = &wc;
2789 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2790 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2791 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2792 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2794 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2795 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2798 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2799 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2800 &local->mad_priv->header.recv_wc.rmpp_list);
2801 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2802 local->mad_priv->header.recv_wc.recv_buf.mad =
2803 (struct ib_mad *)local->mad_priv->mad;
2804 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2805 snoop_recv(recv_mad_agent->qp_info,
2806 &local->mad_priv->header.recv_wc,
2807 IB_MAD_SNOOP_RECVS);
2808 recv_mad_agent->agent.recv_handler(
2809 &recv_mad_agent->agent,
2810 &local->mad_send_wr->send_buf,
2811 &local->mad_priv->header.recv_wc);
2812 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2813 atomic_dec(&recv_mad_agent->refcount);
2814 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2817 local_send_completion:
2819 mad_send_wc.status = IB_WC_SUCCESS;
2820 mad_send_wc.vendor_err = 0;
2821 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2822 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2823 snoop_send(mad_agent_priv->qp_info,
2824 &local->mad_send_wr->send_buf,
2825 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2826 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2829 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2830 atomic_dec(&mad_agent_priv->refcount);
2832 kfree(local->mad_priv);
2835 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2838 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2842 if (!mad_send_wr->retries_left)
2845 mad_send_wr->retries_left--;
2846 mad_send_wr->send_buf.retries++;
2848 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2850 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2851 ret = ib_retry_rmpp(mad_send_wr);
2853 case IB_RMPP_RESULT_UNHANDLED:
2854 ret = ib_send_mad(mad_send_wr);
2856 case IB_RMPP_RESULT_CONSUMED:
2864 ret = ib_send_mad(mad_send_wr);
2867 mad_send_wr->refcount++;
2868 list_add_tail(&mad_send_wr->agent_list,
2869 &mad_send_wr->mad_agent_priv->send_list);
2874 static void timeout_sends(struct work_struct *work)
2876 struct ib_mad_agent_private *mad_agent_priv;
2877 struct ib_mad_send_wr_private *mad_send_wr;
2878 struct ib_mad_send_wc mad_send_wc;
2879 unsigned long flags, delay;
2881 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2883 mad_send_wc.vendor_err = 0;
2885 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2886 while (!list_empty(&mad_agent_priv->wait_list)) {
2887 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2888 struct ib_mad_send_wr_private,
2891 if (time_after(mad_send_wr->timeout, jiffies)) {
2892 delay = mad_send_wr->timeout - jiffies;
2893 if ((long)delay <= 0)
2895 queue_delayed_work(mad_agent_priv->qp_info->
2897 &mad_agent_priv->timed_work, delay);
2901 list_del(&mad_send_wr->agent_list);
2902 if (mad_send_wr->status == IB_WC_SUCCESS &&
2903 !retry_send(mad_send_wr))
2906 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2908 if (mad_send_wr->status == IB_WC_SUCCESS)
2909 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2911 mad_send_wc.status = mad_send_wr->status;
2912 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2913 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2916 atomic_dec(&mad_agent_priv->refcount);
2917 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2919 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2923 * Allocate receive MADs and post receive WRs for them
2925 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2926 struct ib_mad_private *mad)
2928 unsigned long flags;
2930 struct ib_mad_private *mad_priv;
2931 struct ib_sge sg_list;
2932 struct ib_recv_wr recv_wr;
2933 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2935 /* Initialize common scatter list fields */
2936 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2938 /* Initialize common receive WR fields */
2939 recv_wr.next = NULL;
2940 recv_wr.sg_list = &sg_list;
2941 recv_wr.num_sge = 1;
2944 /* Allocate and map receive buffer */
2949 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2956 sg_list.length = mad_priv_dma_size(mad_priv);
2957 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2959 mad_priv_dma_size(mad_priv),
2961 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2966 mad_priv->header.mapping = sg_list.addr;
2967 mad_priv->header.mad_list.mad_queue = recv_queue;
2968 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2969 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2971 /* Post receive WR */
2972 spin_lock_irqsave(&recv_queue->lock, flags);
2973 post = (++recv_queue->count < recv_queue->max_active);
2974 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2975 spin_unlock_irqrestore(&recv_queue->lock, flags);
2976 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
2978 spin_lock_irqsave(&recv_queue->lock, flags);
2979 list_del(&mad_priv->header.mad_list.list);
2980 recv_queue->count--;
2981 spin_unlock_irqrestore(&recv_queue->lock, flags);
2982 ib_dma_unmap_single(qp_info->port_priv->device,
2983 mad_priv->header.mapping,
2984 mad_priv_dma_size(mad_priv),
2987 dev_err(&qp_info->port_priv->device->dev,
2988 "ib_post_recv failed: %d\n", ret);
2997 * Return all the posted receive MADs
2999 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
3001 struct ib_mad_private_header *mad_priv_hdr;
3002 struct ib_mad_private *recv;
3003 struct ib_mad_list_head *mad_list;
3008 while (!list_empty(&qp_info->recv_queue.list)) {
3010 mad_list = list_entry(qp_info->recv_queue.list.next,
3011 struct ib_mad_list_head, list);
3012 mad_priv_hdr = container_of(mad_list,
3013 struct ib_mad_private_header,
3015 recv = container_of(mad_priv_hdr, struct ib_mad_private,
3018 /* Remove from posted receive MAD list */
3019 list_del(&mad_list->list);
3021 ib_dma_unmap_single(qp_info->port_priv->device,
3022 recv->header.mapping,
3023 mad_priv_dma_size(recv),
3028 qp_info->recv_queue.count = 0;
3034 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
3037 struct ib_qp_attr *attr;
3041 attr = kmalloc(sizeof *attr, GFP_KERNEL);
3045 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
3046 IB_DEFAULT_PKEY_FULL, &pkey_index);
3050 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3051 qp = port_priv->qp_info[i].qp;
3056 * PKey index for QP1 is irrelevant but
3057 * one is needed for the Reset to Init transition
3059 attr->qp_state = IB_QPS_INIT;
3060 attr->pkey_index = pkey_index;
3061 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3062 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3063 IB_QP_PKEY_INDEX | IB_QP_QKEY);
3065 dev_err(&port_priv->device->dev,
3066 "Couldn't change QP%d state to INIT: %d\n",
3071 attr->qp_state = IB_QPS_RTR;
3072 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3074 dev_err(&port_priv->device->dev,
3075 "Couldn't change QP%d state to RTR: %d\n",
3080 attr->qp_state = IB_QPS_RTS;
3081 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3082 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3084 dev_err(&port_priv->device->dev,
3085 "Couldn't change QP%d state to RTS: %d\n",
3091 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3093 dev_err(&port_priv->device->dev,
3094 "Failed to request completion notification: %d\n",
3099 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3100 if (!port_priv->qp_info[i].qp)
3103 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3105 dev_err(&port_priv->device->dev,
3106 "Couldn't post receive WRs\n");
3115 static void qp_event_handler(struct ib_event *event, void *qp_context)
3117 struct ib_mad_qp_info *qp_info = qp_context;
3119 /* It's worse than that! He's dead, Jim! */
3120 dev_err(&qp_info->port_priv->device->dev,
3121 "Fatal error (%d) on MAD QP (%d)\n",
3122 event->event, qp_info->qp->qp_num);
3125 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3126 struct ib_mad_queue *mad_queue)
3128 mad_queue->qp_info = qp_info;
3129 mad_queue->count = 0;
3130 spin_lock_init(&mad_queue->lock);
3131 INIT_LIST_HEAD(&mad_queue->list);
3134 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3135 struct ib_mad_qp_info *qp_info)
3137 qp_info->port_priv = port_priv;
3138 init_mad_queue(qp_info, &qp_info->send_queue);
3139 init_mad_queue(qp_info, &qp_info->recv_queue);
3140 INIT_LIST_HEAD(&qp_info->overflow_list);
3141 spin_lock_init(&qp_info->snoop_lock);
3142 qp_info->snoop_table = NULL;
3143 qp_info->snoop_table_size = 0;
3144 atomic_set(&qp_info->snoop_count, 0);
3147 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3148 enum ib_qp_type qp_type)
3150 struct ib_qp_init_attr qp_init_attr;
3153 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3154 qp_init_attr.send_cq = qp_info->port_priv->cq;
3155 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3156 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3157 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3158 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3159 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3160 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3161 qp_init_attr.qp_type = qp_type;
3162 qp_init_attr.port_num = qp_info->port_priv->port_num;
3163 qp_init_attr.qp_context = qp_info;
3164 qp_init_attr.event_handler = qp_event_handler;
3165 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3166 if (IS_ERR(qp_info->qp)) {
3167 dev_err(&qp_info->port_priv->device->dev,
3168 "Couldn't create ib_mad QP%d\n",
3169 get_spl_qp_index(qp_type));
3170 ret = PTR_ERR(qp_info->qp);
3173 /* Use minimum queue sizes unless the CQ is resized */
3174 qp_info->send_queue.max_active = mad_sendq_size;
3175 qp_info->recv_queue.max_active = mad_recvq_size;
3182 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3187 ib_destroy_qp(qp_info->qp);
3188 kfree(qp_info->snoop_table);
3193 * Create the QP, PD, MR, and CQ if needed
3195 static int ib_mad_port_open(struct ib_device *device,
3199 struct ib_mad_port_private *port_priv;
3200 unsigned long flags;
3201 char name[sizeof "ib_mad123"];
3204 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3207 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3208 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3211 /* Create new device info */
3212 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3216 port_priv->device = device;
3217 port_priv->port_num = port_num;
3218 spin_lock_init(&port_priv->reg_lock);
3219 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3220 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3222 cq_size = mad_sendq_size + mad_recvq_size;
3223 has_smi = rdma_cap_ib_smi(device, port_num);
3227 port_priv->pd = ib_alloc_pd(device, 0);
3228 if (IS_ERR(port_priv->pd)) {
3229 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3230 ret = PTR_ERR(port_priv->pd);
3234 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3235 IB_POLL_UNBOUND_WORKQUEUE);
3236 if (IS_ERR(port_priv->cq)) {
3237 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3238 ret = PTR_ERR(port_priv->cq);
3243 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3247 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3251 snprintf(name, sizeof name, "ib_mad%d", port_num);
3252 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3253 if (!port_priv->wq) {
3258 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3259 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3260 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3262 ret = ib_mad_port_start(port_priv);
3264 dev_err(&device->dev, "Couldn't start port\n");
3271 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3272 list_del_init(&port_priv->port_list);
3273 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3275 destroy_workqueue(port_priv->wq);
3277 destroy_mad_qp(&port_priv->qp_info[1]);
3279 destroy_mad_qp(&port_priv->qp_info[0]);
3281 ib_free_cq(port_priv->cq);
3282 cleanup_recv_queue(&port_priv->qp_info[1]);
3283 cleanup_recv_queue(&port_priv->qp_info[0]);
3285 ib_dealloc_pd(port_priv->pd);
3294 * If there are no classes using the port, free the port
3295 * resources (CQ, MR, PD, QP) and remove the port's info structure
3297 static int ib_mad_port_close(struct ib_device *device, int port_num)
3299 struct ib_mad_port_private *port_priv;
3300 unsigned long flags;
3302 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3303 port_priv = __ib_get_mad_port(device, port_num);
3304 if (port_priv == NULL) {
3305 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3306 dev_err(&device->dev, "Port %d not found\n", port_num);
3309 list_del_init(&port_priv->port_list);
3310 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3312 destroy_workqueue(port_priv->wq);
3313 destroy_mad_qp(&port_priv->qp_info[1]);
3314 destroy_mad_qp(&port_priv->qp_info[0]);
3315 ib_free_cq(port_priv->cq);
3316 ib_dealloc_pd(port_priv->pd);
3317 cleanup_recv_queue(&port_priv->qp_info[1]);
3318 cleanup_recv_queue(&port_priv->qp_info[0]);
3319 /* XXX: Handle deallocation of MAD registration tables */
3326 static void ib_mad_init_device(struct ib_device *device)
3330 start = rdma_start_port(device);
3332 for (i = start; i <= rdma_end_port(device); i++) {
3333 if (!rdma_cap_ib_mad(device, i))
3336 if (ib_mad_port_open(device, i)) {
3337 dev_err(&device->dev, "Couldn't open port %d\n", i);
3340 if (ib_agent_port_open(device, i)) {
3341 dev_err(&device->dev,
3342 "Couldn't open port %d for agents\n", i);
3349 if (ib_mad_port_close(device, i))
3350 dev_err(&device->dev, "Couldn't close port %d\n", i);
3353 while (--i >= start) {
3354 if (!rdma_cap_ib_mad(device, i))
3357 if (ib_agent_port_close(device, i))
3358 dev_err(&device->dev,
3359 "Couldn't close port %d for agents\n", i);
3360 if (ib_mad_port_close(device, i))
3361 dev_err(&device->dev, "Couldn't close port %d\n", i);
3365 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3369 rdma_for_each_port (device, i) {
3370 if (!rdma_cap_ib_mad(device, i))
3373 if (ib_agent_port_close(device, i))
3374 dev_err(&device->dev,
3375 "Couldn't close port %d for agents\n", i);
3376 if (ib_mad_port_close(device, i))
3377 dev_err(&device->dev, "Couldn't close port %d\n", i);
3381 static struct ib_client mad_client = {
3383 .add = ib_mad_init_device,
3384 .remove = ib_mad_remove_device
3387 int ib_mad_init(void)
3389 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3390 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3392 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3393 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3395 INIT_LIST_HEAD(&ib_mad_port_list);
3397 if (ib_register_client(&mad_client)) {
3398 pr_err("Couldn't register ib_mad client\n");
3405 void ib_mad_cleanup(void)
3407 ib_unregister_client(&mad_client);