2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/cmd.h>
34 #include <linux/mlx5/vport.h>
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_smi.h>
37 #include <rdma/ib_pma.h>
42 MLX5_IB_VENDOR_CLASS1 = 0x9,
43 MLX5_IB_VENDOR_CLASS2 = 0xa
46 static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
47 struct ib_mad *in_mad)
49 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED &&
50 in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
52 return dev->mdev->port_caps[port_num - 1].has_smi;
55 static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
56 int ignore_bkey, u8 port, const struct ib_wc *in_wc,
57 const struct ib_grh *in_grh, const void *in_mad,
62 if (!can_do_mad_ifc(dev, port, (struct ib_mad *)in_mad))
65 /* Key check traps can't be generated unless we have in_wc to
66 * tell us where to send the trap.
68 if (ignore_mkey || !in_wc)
70 if (ignore_bkey || !in_wc)
73 return mlx5_cmd_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier,
77 static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
78 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
79 const struct ib_mad *in_mad, struct ib_mad *out_mad)
84 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
86 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
87 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
89 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
90 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
91 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
92 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
93 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
94 return IB_MAD_RESULT_SUCCESS;
96 /* Don't process SMInfo queries -- the SMA can't handle them.
98 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
99 return IB_MAD_RESULT_SUCCESS;
100 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
101 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
102 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
103 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
104 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
105 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
106 return IB_MAD_RESULT_SUCCESS;
108 return IB_MAD_RESULT_SUCCESS;
111 err = mlx5_MAD_IFC(to_mdev(ibdev),
112 mad_flags & IB_MAD_IGNORE_MKEY,
113 mad_flags & IB_MAD_IGNORE_BKEY,
114 port_num, in_wc, in_grh, in_mad, out_mad);
116 return IB_MAD_RESULT_FAILURE;
118 /* set return bit in status of directed route responses */
119 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
120 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
122 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
123 /* no response for trap repress */
124 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
126 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
129 static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
132 #define MLX5_SUM_CNT(p, cntr1, cntr2) \
133 (MLX5_GET64(query_vport_counter_out, p, cntr1) + \
134 MLX5_GET64(query_vport_counter_out, p, cntr2))
136 pma_cnt_ext->port_xmit_data =
137 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
138 transmitted_ib_multicast.octets) >> 2);
139 pma_cnt_ext->port_rcv_data =
140 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
141 received_ib_multicast.octets) >> 2);
142 pma_cnt_ext->port_xmit_packets =
143 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
144 transmitted_ib_multicast.packets));
145 pma_cnt_ext->port_rcv_packets =
146 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
147 received_ib_multicast.packets));
148 pma_cnt_ext->port_unicast_xmit_packets =
149 MLX5_GET64_BE(query_vport_counter_out,
150 out, transmitted_ib_unicast.packets);
151 pma_cnt_ext->port_unicast_rcv_packets =
152 MLX5_GET64_BE(query_vport_counter_out,
153 out, received_ib_unicast.packets);
154 pma_cnt_ext->port_multicast_xmit_packets =
155 MLX5_GET64_BE(query_vport_counter_out,
156 out, transmitted_ib_multicast.packets);
157 pma_cnt_ext->port_multicast_rcv_packets =
158 MLX5_GET64_BE(query_vport_counter_out,
159 out, received_ib_multicast.packets);
162 static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
165 /* Traffic counters will be reported in
166 * their 64bit form via ib_pma_portcounters_ext by default.
168 void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
171 #define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \
172 counter_var = MLX5_GET_BE(typeof(counter_var), \
173 ib_port_cntrs_grp_data_layout, \
174 out_pma, counter_name); \
177 MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
178 symbol_error_counter);
179 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
180 link_error_recovery_counter);
181 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
182 link_downed_counter);
183 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
185 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
186 port_rcv_remote_physical_errors);
187 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
188 port_rcv_switch_relay_errors);
189 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
191 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
192 port_xmit_constraint_errors);
193 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_wait,
195 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
196 port_rcv_constraint_errors);
197 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
198 link_overrun_errors);
199 MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
203 static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
204 const struct ib_mad *in_mad, struct ib_mad *out_mad)
209 /* Declaring support of extended counters */
210 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
211 struct ib_class_port_info cpi = {};
213 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
214 memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
215 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
218 if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
219 struct ib_pma_portcounters_ext *pma_cnt_ext =
220 (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
221 int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
223 out_cnt = kvzalloc(sz, GFP_KERNEL);
225 return IB_MAD_RESULT_FAILURE;
227 err = mlx5_core_query_vport_counter(mdev, 0, 0,
228 port_num, out_cnt, sz);
230 pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
232 struct ib_pma_portcounters *pma_cnt =
233 (struct ib_pma_portcounters *)(out_mad->data + 40);
234 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
236 out_cnt = kvzalloc(sz, GFP_KERNEL);
238 return IB_MAD_RESULT_FAILURE;
240 err = mlx5_core_query_ib_ppcnt(mdev, port_num,
243 pma_cnt_assign(pma_cnt, out_cnt);
248 return IB_MAD_RESULT_FAILURE;
250 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
253 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
254 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
255 const struct ib_mad_hdr *in, size_t in_mad_size,
256 struct ib_mad_hdr *out, size_t *out_mad_size,
257 u16 *out_mad_pkey_index)
259 struct mlx5_ib_dev *dev = to_mdev(ibdev);
260 const struct ib_mad *in_mad = (const struct ib_mad *)in;
261 struct ib_mad *out_mad = (struct ib_mad *)out;
262 struct mlx5_core_dev *mdev;
266 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
267 *out_mad_size != sizeof(*out_mad)))
268 return IB_MAD_RESULT_FAILURE;
270 memset(out_mad->data, 0, sizeof(out_mad->data));
272 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
274 return IB_MAD_RESULT_FAILURE;
276 if (MLX5_CAP_GEN(mdev, vport_counters) &&
277 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
278 in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
279 ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad);
281 ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
284 mlx5_ib_put_native_port_mdev(dev, port_num);
288 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
290 struct ib_smp *in_mad = NULL;
291 struct ib_smp *out_mad = NULL;
295 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
296 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
297 if (!in_mad || !out_mad)
300 init_query_mad(in_mad);
301 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
302 in_mad->attr_mod = cpu_to_be32(port);
304 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
306 packet_error = be16_to_cpu(out_mad->status);
308 dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
309 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
317 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
318 struct ib_smp *out_mad)
320 struct ib_smp *in_mad = NULL;
323 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
327 init_query_mad(in_mad);
328 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
330 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
337 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
338 __be64 *sys_image_guid)
340 struct ib_smp *out_mad = NULL;
343 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
347 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
351 memcpy(sys_image_guid, out_mad->data + 4, 8);
359 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
362 struct ib_smp *out_mad = NULL;
365 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
369 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
373 *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
381 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
384 struct ib_smp *out_mad = NULL;
387 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
391 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
395 *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
403 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
405 struct ib_smp *in_mad = NULL;
406 struct ib_smp *out_mad = NULL;
409 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
410 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
411 if (!in_mad || !out_mad)
414 init_query_mad(in_mad);
415 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
417 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
421 memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
428 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
430 struct ib_smp *in_mad = NULL;
431 struct ib_smp *out_mad = NULL;
434 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
435 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
436 if (!in_mad || !out_mad)
439 init_query_mad(in_mad);
440 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
442 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
446 memcpy(node_guid, out_mad->data + 12, 8);
453 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
456 struct ib_smp *in_mad = NULL;
457 struct ib_smp *out_mad = NULL;
460 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
461 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
462 if (!in_mad || !out_mad)
465 init_query_mad(in_mad);
466 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
467 in_mad->attr_mod = cpu_to_be32(index / 32);
469 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
474 *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
482 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
485 struct ib_smp *in_mad = NULL;
486 struct ib_smp *out_mad = NULL;
489 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
490 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
491 if (!in_mad || !out_mad)
494 init_query_mad(in_mad);
495 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
496 in_mad->attr_mod = cpu_to_be32(port);
498 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
503 memcpy(gid->raw, out_mad->data + 8, 8);
505 init_query_mad(in_mad);
506 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
507 in_mad->attr_mod = cpu_to_be32(index / 8);
509 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
514 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
522 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
523 struct ib_port_attr *props)
525 struct mlx5_ib_dev *dev = to_mdev(ibdev);
526 struct mlx5_core_dev *mdev = dev->mdev;
527 struct ib_smp *in_mad = NULL;
528 struct ib_smp *out_mad = NULL;
529 int ext_active_speed;
532 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
533 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
534 if (!in_mad || !out_mad)
537 /* props being zeroed by the caller, avoid zeroing it here */
539 init_query_mad(in_mad);
540 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
541 in_mad->attr_mod = cpu_to_be32(port);
543 err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
545 mlx5_ib_warn(dev, "err %d\n", err);
549 props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
550 props->lmc = out_mad->data[34] & 0x7;
551 props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
552 props->sm_sl = out_mad->data[36] & 0xf;
553 props->state = out_mad->data[32] & 0xf;
554 props->phys_state = out_mad->data[33] >> 4;
555 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
556 props->gid_tbl_len = out_mad->data[50];
557 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
558 props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
559 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
560 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
561 props->active_width = out_mad->data[31] & 0xf;
562 props->active_speed = out_mad->data[35] >> 4;
563 props->max_mtu = out_mad->data[41] & 0xf;
564 props->active_mtu = out_mad->data[36] >> 4;
565 props->subnet_timeout = out_mad->data[51] & 0x1f;
566 props->max_vl_num = out_mad->data[37] >> 4;
567 props->init_type_reply = out_mad->data[41] >> 4;
569 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) {
570 props->port_cap_flags2 =
571 be16_to_cpup((__be16 *)(out_mad->data + 60));
573 if (props->port_cap_flags2 & IB_PORT_LINK_WIDTH_2X_SUP)
574 props->active_width = out_mad->data[31] & 0x1f;
577 /* Check if extended speeds (EDR/FDR/...) are supported */
578 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
579 ext_active_speed = out_mad->data[62] >> 4;
581 switch (ext_active_speed) {
583 props->active_speed = 16; /* FDR */
586 props->active_speed = 32; /* EDR */
589 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
590 props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP)
591 props->active_speed = IB_SPEED_HDR;
596 /* If reported active speed is QDR, check if is FDR-10 */
597 if (props->active_speed == 4) {
598 if (mdev->port_caps[port - 1].ext_port_cap &
599 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
600 init_query_mad(in_mad);
601 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
602 in_mad->attr_mod = cpu_to_be32(port);
604 err = mlx5_MAD_IFC(dev, 1, 1, port,
605 NULL, NULL, in_mad, out_mad);
609 /* Checking LinkSpeedActive for FDR-10 */
610 if (out_mad->data[15] & 0x1)
611 props->active_speed = 8;