2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
54 #include <net/bonding.h>
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
61 #include <rdma/mlx4-abi.h>
63 #define DRV_NAME MLX4_IB_DRV_NAME
64 #define DRV_VERSION "4.0-0"
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0 0xA0
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
74 int mlx4_ib_sm_guid_assign = 0;
75 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
76 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
78 static const char mlx4_ib_version[] =
79 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
86 static struct workqueue_struct *wq;
88 static void init_query_mad(struct ib_smp *mad)
90 mad->base_version = 1;
91 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92 mad->class_version = 1;
93 mad->method = IB_MGMT_METHOD_GET;
96 static int check_flow_steering_support(struct mlx4_dev *dev)
98 int eth_num_ports = 0;
101 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
105 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
107 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
109 dmfs &= (!ib_num_ports ||
110 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
112 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113 if (ib_num_ports && mlx4_is_mfunc(dev)) {
114 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
121 static int num_ib_ports(struct mlx4_dev *dev)
126 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
132 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
134 struct mlx4_ib_dev *ibdev = to_mdev(device);
135 struct net_device *dev;
138 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
141 if (mlx4_is_bonded(ibdev->dev)) {
142 struct net_device *upper = NULL;
144 upper = netdev_master_upper_dev_get_rcu(dev);
146 struct net_device *active;
148 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
161 static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
162 struct mlx4_ib_dev *ibdev,
165 struct mlx4_cmd_mailbox *mailbox;
167 struct mlx4_dev *dev = ibdev->dev;
169 union ib_gid *gid_tbl;
171 mailbox = mlx4_alloc_cmd_mailbox(dev);
175 gid_tbl = mailbox->buf;
177 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
178 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
180 err = mlx4_cmd(dev, mailbox->dma,
181 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
182 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
184 if (mlx4_is_bonded(dev))
185 err += mlx4_cmd(dev, mailbox->dma,
186 MLX4_SET_PORT_GID_TABLE << 8 | 2,
187 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
190 mlx4_free_cmd_mailbox(dev, mailbox);
194 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
195 struct mlx4_ib_dev *ibdev,
198 struct mlx4_cmd_mailbox *mailbox;
200 struct mlx4_dev *dev = ibdev->dev;
211 mailbox = mlx4_alloc_cmd_mailbox(dev);
215 gid_tbl = mailbox->buf;
216 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
217 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
218 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
219 gid_tbl[i].version = 2;
220 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
225 err = mlx4_cmd(dev, mailbox->dma,
226 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
227 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
229 if (mlx4_is_bonded(dev))
230 err += mlx4_cmd(dev, mailbox->dma,
231 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
232 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
235 mlx4_free_cmd_mailbox(dev, mailbox);
239 static int mlx4_ib_update_gids(struct gid_entry *gids,
240 struct mlx4_ib_dev *ibdev,
243 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
244 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
246 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
249 static void free_gid_entry(struct gid_entry *entry)
251 memset(&entry->gid, 0, sizeof(entry->gid));
256 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
258 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
259 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
260 struct mlx4_port_gid_table *port_gid_table;
261 int free = -1, found = -1;
265 struct gid_entry *gids = NULL;
266 u16 vlan_id = 0xffff;
269 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
272 if (attr->port_num > MLX4_MAX_PORTS)
278 ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
281 port_gid_table = &iboe->gids[attr->port_num - 1];
282 spin_lock_bh(&iboe->lock);
283 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
284 if (!memcmp(&port_gid_table->gids[i].gid,
285 &attr->gid, sizeof(attr->gid)) &&
286 port_gid_table->gids[i].gid_type == attr->gid_type &&
287 port_gid_table->gids[i].vlan_id == vlan_id) {
291 if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
292 free = i; /* HW has space */
299 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
300 if (!port_gid_table->gids[free].ctx) {
303 *context = port_gid_table->gids[free].ctx;
304 memcpy(&port_gid_table->gids[free].gid,
305 &attr->gid, sizeof(attr->gid));
306 port_gid_table->gids[free].gid_type = attr->gid_type;
307 port_gid_table->gids[free].vlan_id = vlan_id;
308 port_gid_table->gids[free].ctx->real_index = free;
309 port_gid_table->gids[free].ctx->refcount = 1;
314 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
318 if (!ret && hw_update) {
319 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
324 free_gid_entry(&port_gid_table->gids[free]);
326 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
327 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
328 gids[i].gid_type = port_gid_table->gids[i].gid_type;
332 spin_unlock_bh(&iboe->lock);
334 if (!ret && hw_update) {
335 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
337 spin_lock_bh(&iboe->lock);
339 free_gid_entry(&port_gid_table->gids[free]);
340 spin_unlock_bh(&iboe->lock);
348 static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
350 struct gid_cache_context *ctx = *context;
351 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
352 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
353 struct mlx4_port_gid_table *port_gid_table;
356 struct gid_entry *gids = NULL;
358 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
361 if (attr->port_num > MLX4_MAX_PORTS)
364 port_gid_table = &iboe->gids[attr->port_num - 1];
365 spin_lock_bh(&iboe->lock);
368 if (!ctx->refcount) {
369 unsigned int real_index = ctx->real_index;
371 free_gid_entry(&port_gid_table->gids[real_index]);
375 if (!ret && hw_update) {
378 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
383 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
385 &port_gid_table->gids[i].gid,
386 sizeof(union ib_gid));
388 port_gid_table->gids[i].gid_type;
392 spin_unlock_bh(&iboe->lock);
394 if (!ret && hw_update) {
395 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
401 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
402 const struct ib_gid_attr *attr)
404 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
405 struct gid_cache_context *ctx = NULL;
406 struct mlx4_port_gid_table *port_gid_table;
407 int real_index = -EINVAL;
410 u8 port_num = attr->port_num;
412 if (port_num > MLX4_MAX_PORTS)
415 if (mlx4_is_bonded(ibdev->dev))
418 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
421 spin_lock_irqsave(&iboe->lock, flags);
422 port_gid_table = &iboe->gids[port_num - 1];
424 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
425 if (!memcmp(&port_gid_table->gids[i].gid,
426 &attr->gid, sizeof(attr->gid)) &&
427 attr->gid_type == port_gid_table->gids[i].gid_type) {
428 ctx = port_gid_table->gids[i].ctx;
432 real_index = ctx->real_index;
433 spin_unlock_irqrestore(&iboe->lock, flags);
437 static int mlx4_ib_query_device(struct ib_device *ibdev,
438 struct ib_device_attr *props,
439 struct ib_udata *uhw)
441 struct mlx4_ib_dev *dev = to_mdev(ibdev);
442 struct ib_smp *in_mad = NULL;
443 struct ib_smp *out_mad = NULL;
446 struct mlx4_uverbs_ex_query_device cmd;
447 struct mlx4_uverbs_ex_query_device_resp resp = {};
448 struct mlx4_clock_params clock_params;
451 if (uhw->inlen < sizeof(cmd))
454 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
465 resp.response_length = offsetof(typeof(resp), response_length) +
466 sizeof(resp.response_length);
467 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
468 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
470 if (!in_mad || !out_mad)
473 init_query_mad(in_mad);
474 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
476 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
477 1, NULL, NULL, in_mad, out_mad);
481 memset(props, 0, sizeof *props);
483 have_ib_ports = num_ib_ports(dev->dev);
485 props->fw_ver = dev->dev->caps.fw_ver;
486 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
487 IB_DEVICE_PORT_ACTIVE_EVENT |
488 IB_DEVICE_SYS_IMAGE_GUID |
489 IB_DEVICE_RC_RNR_NAK_GEN |
490 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
491 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
492 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
493 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
494 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
495 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
496 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
497 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
498 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
499 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
500 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
501 if (dev->dev->caps.max_gso_sz &&
502 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
503 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
504 props->device_cap_flags |= IB_DEVICE_UD_TSO;
505 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
506 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
507 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
508 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
509 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
510 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
511 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
512 props->device_cap_flags |= IB_DEVICE_XRC;
513 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
514 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
515 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
516 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
517 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
519 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
521 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
522 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
524 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
526 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
528 props->vendor_part_id = dev->dev->persist->pdev->device;
529 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
530 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
532 props->max_mr_size = ~0ull;
533 props->page_size_cap = dev->dev->caps.page_size_cap;
534 props->max_qp = dev->dev->quotas.qp;
535 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
536 props->max_send_sge =
537 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
538 props->max_recv_sge =
539 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
540 props->max_sge_rd = MLX4_MAX_SGE_RD;
541 props->max_cq = dev->dev->quotas.cq;
542 props->max_cqe = dev->dev->caps.max_cqes;
543 props->max_mr = dev->dev->quotas.mpt;
544 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
545 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
546 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
547 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
548 props->max_srq = dev->dev->quotas.srq;
549 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
550 props->max_srq_sge = dev->dev->caps.max_srq_sge;
551 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
552 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
553 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
554 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
555 props->masked_atomic_cap = props->atomic_cap;
556 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
557 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
558 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
559 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
560 props->max_mcast_grp;
561 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
562 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
563 props->max_ah = INT_MAX;
565 if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
566 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
567 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
568 props->rss_caps.max_rwq_indirection_tables =
570 props->rss_caps.max_rwq_indirection_table_size =
571 dev->dev->caps.max_rss_tbl_sz;
572 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
573 props->max_wq_type_rq = props->max_qp;
576 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
577 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
580 props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
581 props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
583 if (!mlx4_is_slave(dev->dev))
584 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
586 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
587 resp.response_length += sizeof(resp.hca_core_clock_offset);
588 if (!err && !mlx4_is_slave(dev->dev)) {
589 resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
590 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
594 if (uhw->outlen >= resp.response_length +
595 sizeof(resp.max_inl_recv_sz)) {
596 resp.response_length += sizeof(resp.max_inl_recv_sz);
597 resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
598 sizeof(struct mlx4_wqe_data_seg);
601 if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
602 if (props->rss_caps.supported_qpts) {
603 resp.rss_caps.rx_hash_function =
604 MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
606 resp.rss_caps.rx_hash_fields_mask =
607 MLX4_IB_RX_HASH_SRC_IPV4 |
608 MLX4_IB_RX_HASH_DST_IPV4 |
609 MLX4_IB_RX_HASH_SRC_IPV6 |
610 MLX4_IB_RX_HASH_DST_IPV6 |
611 MLX4_IB_RX_HASH_SRC_PORT_TCP |
612 MLX4_IB_RX_HASH_DST_PORT_TCP |
613 MLX4_IB_RX_HASH_SRC_PORT_UDP |
614 MLX4_IB_RX_HASH_DST_PORT_UDP;
616 if (dev->dev->caps.tunnel_offload_mode ==
617 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
618 resp.rss_caps.rx_hash_fields_mask |=
619 MLX4_IB_RX_HASH_INNER;
621 resp.response_length = offsetof(typeof(resp), rss_caps) +
622 sizeof(resp.rss_caps);
625 if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
626 if (dev->dev->caps.max_gso_sz &&
627 ((mlx4_ib_port_link_layer(ibdev, 1) ==
628 IB_LINK_LAYER_ETHERNET) ||
629 (mlx4_ib_port_link_layer(ibdev, 2) ==
630 IB_LINK_LAYER_ETHERNET))) {
631 resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
632 resp.tso_caps.supported_qpts |=
633 1 << IB_QPT_RAW_PACKET;
635 resp.response_length = offsetof(typeof(resp), tso_caps) +
636 sizeof(resp.tso_caps);
640 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
651 static enum rdma_link_layer
652 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
654 struct mlx4_dev *dev = to_mdev(device)->dev;
656 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
657 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
660 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
661 struct ib_port_attr *props, int netw_view)
663 struct ib_smp *in_mad = NULL;
664 struct ib_smp *out_mad = NULL;
665 int ext_active_speed;
666 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
669 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
670 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
671 if (!in_mad || !out_mad)
674 init_query_mad(in_mad);
675 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
676 in_mad->attr_mod = cpu_to_be32(port);
678 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
679 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
681 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
687 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
688 props->lmc = out_mad->data[34] & 0x7;
689 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
690 props->sm_sl = out_mad->data[36] & 0xf;
691 props->state = out_mad->data[32] & 0xf;
692 props->phys_state = out_mad->data[33] >> 4;
693 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
695 props->gid_tbl_len = out_mad->data[50];
697 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
698 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
699 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
700 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
701 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
702 props->active_width = out_mad->data[31] & 0xf;
703 props->active_speed = out_mad->data[35] >> 4;
704 props->max_mtu = out_mad->data[41] & 0xf;
705 props->active_mtu = out_mad->data[36] >> 4;
706 props->subnet_timeout = out_mad->data[51] & 0x1f;
707 props->max_vl_num = out_mad->data[37] >> 4;
708 props->init_type_reply = out_mad->data[41] >> 4;
710 /* Check if extended speeds (EDR/FDR/...) are supported */
711 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
712 ext_active_speed = out_mad->data[62] >> 4;
714 switch (ext_active_speed) {
716 props->active_speed = IB_SPEED_FDR;
719 props->active_speed = IB_SPEED_EDR;
724 /* If reported active speed is QDR, check if is FDR-10 */
725 if (props->active_speed == IB_SPEED_QDR) {
726 init_query_mad(in_mad);
727 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
728 in_mad->attr_mod = cpu_to_be32(port);
730 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
731 NULL, NULL, in_mad, out_mad);
735 /* Checking LinkSpeedActive for FDR-10 */
736 if (out_mad->data[15] & 0x1)
737 props->active_speed = IB_SPEED_FDR10;
740 /* Avoid wrong speed value returned by FW if the IB link is down. */
741 if (props->state == IB_PORT_DOWN)
742 props->active_speed = IB_SPEED_SDR;
750 static u8 state_to_phys_state(enum ib_port_state state)
752 return state == IB_PORT_ACTIVE ?
753 IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
756 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
757 struct ib_port_attr *props)
760 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
761 struct mlx4_ib_iboe *iboe = &mdev->iboe;
762 struct net_device *ndev;
764 struct mlx4_cmd_mailbox *mailbox;
766 int is_bonded = mlx4_is_bonded(mdev->dev);
768 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
770 return PTR_ERR(mailbox);
772 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
773 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
778 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
779 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
780 IB_WIDTH_4X : IB_WIDTH_1X;
781 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
782 IB_SPEED_FDR : IB_SPEED_QDR;
783 props->port_cap_flags = IB_PORT_CM_SUP;
784 props->ip_gids = true;
785 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
786 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
787 if (mdev->dev->caps.pkey_table_len[port])
788 props->pkey_tbl_len = 1;
789 props->max_mtu = IB_MTU_4096;
790 props->max_vl_num = 2;
791 props->state = IB_PORT_DOWN;
792 props->phys_state = state_to_phys_state(props->state);
793 props->active_mtu = IB_MTU_256;
794 spin_lock_bh(&iboe->lock);
795 ndev = iboe->netdevs[port - 1];
796 if (ndev && is_bonded) {
797 rcu_read_lock(); /* required to get upper dev */
798 ndev = netdev_master_upper_dev_get_rcu(ndev);
804 tmp = iboe_get_mtu(ndev->mtu);
805 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
807 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
808 IB_PORT_ACTIVE : IB_PORT_DOWN;
809 props->phys_state = state_to_phys_state(props->state);
811 spin_unlock_bh(&iboe->lock);
813 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
817 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
818 struct ib_port_attr *props, int netw_view)
822 /* props being zeroed by the caller, avoid zeroing it here */
824 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
825 ib_link_query_port(ibdev, port, props, netw_view) :
826 eth_link_query_port(ibdev, port, props);
831 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
832 struct ib_port_attr *props)
834 /* returns host view */
835 return __mlx4_ib_query_port(ibdev, port, props, 0);
838 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
839 union ib_gid *gid, int netw_view)
841 struct ib_smp *in_mad = NULL;
842 struct ib_smp *out_mad = NULL;
844 struct mlx4_ib_dev *dev = to_mdev(ibdev);
846 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
848 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
849 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
850 if (!in_mad || !out_mad)
853 init_query_mad(in_mad);
854 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
855 in_mad->attr_mod = cpu_to_be32(port);
857 if (mlx4_is_mfunc(dev->dev) && netw_view)
858 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
860 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
864 memcpy(gid->raw, out_mad->data + 8, 8);
866 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
868 /* For any index > 0, return the null guid */
875 init_query_mad(in_mad);
876 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
877 in_mad->attr_mod = cpu_to_be32(index / 8);
879 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
880 NULL, NULL, in_mad, out_mad);
884 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
888 memset(gid->raw + 8, 0, 8);
894 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
897 if (rdma_protocol_ib(ibdev, port))
898 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
902 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
904 union sl2vl_tbl_to_u64 sl2vl64;
905 struct ib_smp *in_mad = NULL;
906 struct ib_smp *out_mad = NULL;
907 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
911 if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
916 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
917 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
918 if (!in_mad || !out_mad)
921 init_query_mad(in_mad);
922 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
923 in_mad->attr_mod = 0;
925 if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
926 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
928 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
933 for (jj = 0; jj < 8; jj++)
934 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
935 *sl2vl_tbl = sl2vl64.sl64;
943 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
949 for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
950 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
952 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
954 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
958 atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
962 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
963 u16 *pkey, int netw_view)
965 struct ib_smp *in_mad = NULL;
966 struct ib_smp *out_mad = NULL;
967 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
970 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
971 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
972 if (!in_mad || !out_mad)
975 init_query_mad(in_mad);
976 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
977 in_mad->attr_mod = cpu_to_be32(index / 32);
979 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
980 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
982 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
987 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
995 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
997 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
1000 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
1001 struct ib_device_modify *props)
1003 struct mlx4_cmd_mailbox *mailbox;
1004 unsigned long flags;
1006 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1009 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1012 if (mlx4_is_slave(to_mdev(ibdev)->dev))
1015 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1016 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1017 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1020 * If possible, pass node desc to FW, so it can generate
1021 * a 144 trap. If cmd fails, just ignore.
1023 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1024 if (IS_ERR(mailbox))
1027 memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1028 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1029 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1031 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1036 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
1039 struct mlx4_cmd_mailbox *mailbox;
1042 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1043 if (IS_ERR(mailbox))
1044 return PTR_ERR(mailbox);
1046 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1047 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
1048 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1050 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
1051 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1054 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1055 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1058 mlx4_free_cmd_mailbox(dev->dev, mailbox);
1062 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1063 struct ib_port_modify *props)
1065 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1066 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1067 struct ib_port_attr attr;
1071 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1072 * of whether port link layer is ETH or IB. For ETH ports, qkey
1073 * violations and port capabilities are not meaningful.
1078 mutex_lock(&mdev->cap_mask_mutex);
1080 err = ib_query_port(ibdev, port, &attr);
1084 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1085 ~props->clr_port_cap_mask;
1087 err = mlx4_ib_SET_PORT(mdev, port,
1088 !!(mask & IB_PORT_RESET_QKEY_CNTR),
1092 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1096 static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1097 struct ib_udata *udata)
1099 struct ib_device *ibdev = uctx->device;
1100 struct mlx4_ib_dev *dev = to_mdev(ibdev);
1101 struct mlx4_ib_ucontext *context = to_mucontext(uctx);
1102 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1103 struct mlx4_ib_alloc_ucontext_resp resp;
1106 if (!dev->ib_active)
1109 if (ibdev->ops.uverbs_abi_ver ==
1110 MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1111 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
1112 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
1113 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1115 resp.dev_caps = dev->dev->caps.userspace_caps;
1116 resp.qp_tab_size = dev->dev->caps.num_qps;
1117 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
1118 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1119 resp.cqe_size = dev->dev->caps.cqe_size;
1122 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1126 INIT_LIST_HEAD(&context->db_page_list);
1127 mutex_init(&context->db_page_mutex);
1129 INIT_LIST_HEAD(&context->wqn_ranges_list);
1130 mutex_init(&context->wqn_ranges_mutex);
1132 if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1133 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1135 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1138 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1145 static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1147 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1149 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1152 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1156 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1158 struct mlx4_ib_dev *dev = to_mdev(context->device);
1160 switch (vma->vm_pgoff) {
1162 return rdma_user_mmap_io(context, vma,
1163 to_mucontext(context)->uar.pfn,
1165 pgprot_noncached(vma->vm_page_prot),
1169 if (dev->dev->caps.bf_reg_size == 0)
1171 return rdma_user_mmap_io(
1173 to_mucontext(context)->uar.pfn +
1174 dev->dev->caps.num_uars,
1175 PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
1179 struct mlx4_clock_params params;
1182 ret = mlx4_get_internal_clock_params(dev->dev, ¶ms);
1186 return rdma_user_mmap_io(
1188 (pci_resource_start(dev->dev->persist->pdev,
1192 PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
1201 static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
1203 struct mlx4_ib_pd *pd = to_mpd(ibpd);
1204 struct ib_device *ibdev = ibpd->device;
1207 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1211 if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1212 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1218 static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
1220 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1224 static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
1226 struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
1227 struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
1228 struct ib_cq_init_attr cq_attr = {};
1231 if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1234 err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
1238 xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
1239 if (IS_ERR(xrcd->pd)) {
1240 err = PTR_ERR(xrcd->pd);
1245 xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
1246 if (IS_ERR(xrcd->cq)) {
1247 err = PTR_ERR(xrcd->cq);
1254 ib_dealloc_pd(xrcd->pd);
1256 mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
1260 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
1262 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1263 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1264 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1268 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1270 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1271 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1272 struct mlx4_ib_gid_entry *ge;
1274 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1279 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1280 ge->port = mqp->port;
1284 mutex_lock(&mqp->mutex);
1285 list_add_tail(&ge->list, &mqp->gid_list);
1286 mutex_unlock(&mqp->mutex);
1291 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1292 struct mlx4_ib_counters *ctr_table)
1294 struct counter_index *counter, *tmp_count;
1296 mutex_lock(&ctr_table->mutex);
1297 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1299 if (counter->allocated)
1300 mlx4_counter_free(ibdev->dev, counter->index);
1301 list_del(&counter->list);
1304 mutex_unlock(&ctr_table->mutex);
1307 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1310 struct net_device *ndev;
1316 spin_lock_bh(&mdev->iboe.lock);
1317 ndev = mdev->iboe.netdevs[mqp->port - 1];
1320 spin_unlock_bh(&mdev->iboe.lock);
1330 struct mlx4_ib_steering {
1331 struct list_head list;
1332 struct mlx4_flow_reg_id reg_id;
1336 #define LAST_ETH_FIELD vlan_tag
1337 #define LAST_IB_FIELD sl
1338 #define LAST_IPV4_FIELD dst_ip
1339 #define LAST_TCP_UDP_FIELD src_port
1341 /* Field is the last supported field */
1342 #define FIELDS_NOT_SUPPORTED(filter, field)\
1343 memchr_inv((void *)&filter.field +\
1344 sizeof(filter.field), 0,\
1346 offsetof(typeof(filter), field) -\
1347 sizeof(filter.field))
1349 static int parse_flow_attr(struct mlx4_dev *dev,
1351 union ib_flow_spec *ib_spec,
1352 struct _rule_hw *mlx4_spec)
1354 enum mlx4_net_trans_rule_id type;
1356 switch (ib_spec->type) {
1357 case IB_FLOW_SPEC_ETH:
1358 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1361 type = MLX4_NET_TRANS_RULE_ID_ETH;
1362 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1364 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1366 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1367 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1369 case IB_FLOW_SPEC_IB:
1370 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1373 type = MLX4_NET_TRANS_RULE_ID_IB;
1374 mlx4_spec->ib.l3_qpn =
1375 cpu_to_be32(qp_num);
1376 mlx4_spec->ib.qpn_mask =
1377 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1381 case IB_FLOW_SPEC_IPV4:
1382 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1385 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1386 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1387 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1388 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1389 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1392 case IB_FLOW_SPEC_TCP:
1393 case IB_FLOW_SPEC_UDP:
1394 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1397 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1398 MLX4_NET_TRANS_RULE_ID_TCP :
1399 MLX4_NET_TRANS_RULE_ID_UDP;
1400 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1401 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1402 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1403 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1409 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1410 mlx4_hw_rule_sz(dev, type) < 0)
1412 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1413 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1414 return mlx4_hw_rule_sz(dev, type);
1417 struct default_rules {
1418 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1419 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1420 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1423 static const struct default_rules default_table[] = {
1425 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1426 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1427 .rules_create_list = {IB_FLOW_SPEC_IB},
1428 .link_layer = IB_LINK_LAYER_INFINIBAND
1432 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1433 struct ib_flow_attr *flow_attr)
1437 const struct default_rules *pdefault_rules = default_table;
1438 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1440 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1441 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1442 memset(&field_types, 0, sizeof(field_types));
1444 if (link_layer != pdefault_rules->link_layer)
1447 ib_flow = flow_attr + 1;
1448 /* we assume the specs are sorted */
1449 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1450 j < flow_attr->num_of_specs; k++) {
1451 union ib_flow_spec *current_flow =
1452 (union ib_flow_spec *)ib_flow;
1454 /* same layer but different type */
1455 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1456 (pdefault_rules->mandatory_fields[k] &
1457 IB_FLOW_SPEC_LAYER_MASK)) &&
1458 (current_flow->type !=
1459 pdefault_rules->mandatory_fields[k]))
1462 /* same layer, try match next one */
1463 if (current_flow->type ==
1464 pdefault_rules->mandatory_fields[k]) {
1467 ((union ib_flow_spec *)ib_flow)->size;
1471 ib_flow = flow_attr + 1;
1472 for (j = 0; j < flow_attr->num_of_specs;
1473 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1474 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1475 /* same layer and same type */
1476 if (((union ib_flow_spec *)ib_flow)->type ==
1477 pdefault_rules->mandatory_not_fields[k])
1486 static int __mlx4_ib_create_default_rules(
1487 struct mlx4_ib_dev *mdev,
1489 const struct default_rules *pdefault_rules,
1490 struct _rule_hw *mlx4_spec) {
1494 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1495 union ib_flow_spec ib_spec = {};
1498 switch (pdefault_rules->rules_create_list[i]) {
1502 case IB_FLOW_SPEC_IB:
1503 ib_spec.type = IB_FLOW_SPEC_IB;
1504 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1511 /* We must put empty rule, qpn is being ignored */
1512 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1515 pr_info("invalid parsing\n");
1519 mlx4_spec = (void *)mlx4_spec + ret;
1525 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1527 enum mlx4_net_trans_promisc_mode flow_type,
1533 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1534 struct mlx4_cmd_mailbox *mailbox;
1535 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1538 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1539 pr_err("Invalid priority value %d\n", flow_attr->priority);
1543 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1546 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1547 if (IS_ERR(mailbox))
1548 return PTR_ERR(mailbox);
1549 ctrl = mailbox->buf;
1551 ctrl->prio = cpu_to_be16(domain | flow_attr->priority);
1552 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1553 ctrl->port = flow_attr->port;
1554 ctrl->qpn = cpu_to_be32(qp->qp_num);
1556 ib_flow = flow_attr + 1;
1557 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1558 /* Add default flows */
1559 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1560 if (default_flow >= 0) {
1561 ret = __mlx4_ib_create_default_rules(
1562 mdev, qp, default_table + default_flow,
1563 mailbox->buf + size);
1565 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1570 for (i = 0; i < flow_attr->num_of_specs; i++) {
1571 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1572 mailbox->buf + size);
1574 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1577 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1581 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1582 flow_attr->num_of_specs == 1) {
1583 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1584 enum ib_flow_spec_type header_spec =
1585 ((union ib_flow_spec *)(flow_attr + 1))->type;
1587 if (header_spec == IB_FLOW_SPEC_ETH)
1588 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1591 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1592 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1595 pr_err("mcg table is full. Fail to register network rule.\n");
1596 else if (ret == -ENXIO)
1597 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1599 pr_err("Invalid argument. Fail to register network rule.\n");
1601 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1605 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1608 err = mlx4_cmd(dev, reg_id, 0, 0,
1609 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1612 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1617 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1621 union ib_flow_spec *ib_spec;
1622 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1625 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1626 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1627 return 0; /* do nothing */
1629 ib_flow = flow_attr + 1;
1630 ib_spec = (union ib_flow_spec *)ib_flow;
1632 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1633 return 0; /* do nothing */
1635 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1636 flow_attr->port, qp->qp_num,
1637 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1642 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1643 struct ib_flow_attr *flow_attr,
1644 enum mlx4_net_trans_promisc_mode *type)
1648 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1649 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1650 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1654 if (flow_attr->num_of_specs == 0) {
1655 type[0] = MLX4_FS_MC_SNIFFER;
1656 type[1] = MLX4_FS_UC_SNIFFER;
1658 union ib_flow_spec *ib_spec;
1660 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1661 if (ib_spec->type != IB_FLOW_SPEC_ETH)
1664 /* if all is zero than MC and UC */
1665 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1666 type[0] = MLX4_FS_MC_SNIFFER;
1667 type[1] = MLX4_FS_UC_SNIFFER;
1669 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1670 ib_spec->eth.mask.dst_mac[1],
1671 ib_spec->eth.mask.dst_mac[2],
1672 ib_spec->eth.mask.dst_mac[3],
1673 ib_spec->eth.mask.dst_mac[4],
1674 ib_spec->eth.mask.dst_mac[5]};
1676 /* Above xor was only on MC bit, non empty mask is valid
1677 * only if this bit is set and rest are zero.
1679 if (!is_zero_ether_addr(&mac[0]))
1682 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1683 type[0] = MLX4_FS_MC_SNIFFER;
1685 type[0] = MLX4_FS_UC_SNIFFER;
1692 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1693 struct ib_flow_attr *flow_attr,
1694 struct ib_udata *udata)
1696 int err = 0, i = 0, j = 0;
1697 struct mlx4_ib_flow *mflow;
1698 enum mlx4_net_trans_promisc_mode type[2];
1699 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1700 int is_bonded = mlx4_is_bonded(dev);
1702 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1703 return ERR_PTR(-EINVAL);
1705 if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1706 return ERR_PTR(-EOPNOTSUPP);
1708 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1709 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1710 return ERR_PTR(-EOPNOTSUPP);
1713 udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1714 return ERR_PTR(-EOPNOTSUPP);
1716 memset(type, 0, sizeof(type));
1718 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1724 switch (flow_attr->type) {
1725 case IB_FLOW_ATTR_NORMAL:
1726 /* If dont trap flag (continue match) is set, under specific
1727 * condition traffic be replicated to given qp,
1728 * without stealing it
1730 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1731 err = mlx4_ib_add_dont_trap_rule(dev,
1737 type[0] = MLX4_FS_REGULAR;
1741 case IB_FLOW_ATTR_ALL_DEFAULT:
1742 type[0] = MLX4_FS_ALL_DEFAULT;
1745 case IB_FLOW_ATTR_MC_DEFAULT:
1746 type[0] = MLX4_FS_MC_DEFAULT;
1749 case IB_FLOW_ATTR_SNIFFER:
1750 type[0] = MLX4_FS_MIRROR_RX_PORT;
1751 type[1] = MLX4_FS_MIRROR_SX_PORT;
1759 while (i < ARRAY_SIZE(type) && type[i]) {
1760 err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS,
1761 type[i], &mflow->reg_id[i].id);
1763 goto err_create_flow;
1765 /* Application always sees one port so the mirror rule
1766 * must be on port #2
1768 flow_attr->port = 2;
1769 err = __mlx4_ib_create_flow(qp, flow_attr,
1770 MLX4_DOMAIN_UVERBS, type[j],
1771 &mflow->reg_id[j].mirror);
1772 flow_attr->port = 1;
1774 goto err_create_flow;
1781 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1782 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1783 &mflow->reg_id[i].id);
1785 goto err_create_flow;
1788 flow_attr->port = 2;
1789 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1790 &mflow->reg_id[j].mirror);
1791 flow_attr->port = 1;
1793 goto err_create_flow;
1796 /* function to create mirror rule */
1800 return &mflow->ibflow;
1804 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1805 mflow->reg_id[i].id);
1810 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1811 mflow->reg_id[j].mirror);
1816 return ERR_PTR(err);
1819 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1823 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1824 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1826 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1827 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1830 if (mflow->reg_id[i].mirror) {
1831 err = __mlx4_ib_destroy_flow(mdev->dev,
1832 mflow->reg_id[i].mirror);
1843 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1846 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1847 struct mlx4_dev *dev = mdev->dev;
1848 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1849 struct mlx4_ib_steering *ib_steering = NULL;
1850 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1851 struct mlx4_flow_reg_id reg_id;
1853 if (mdev->dev->caps.steering_mode ==
1854 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1855 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1860 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1862 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1865 pr_err("multicast attach op failed, err %d\n", err);
1870 if (mlx4_is_bonded(dev)) {
1871 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1872 (mqp->port == 1) ? 2 : 1,
1874 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1875 prot, ®_id.mirror);
1880 err = add_gid_entry(ibqp, gid);
1885 memcpy(ib_steering->gid.raw, gid->raw, 16);
1886 ib_steering->reg_id = reg_id;
1887 mutex_lock(&mqp->mutex);
1888 list_add(&ib_steering->list, &mqp->steering_rules);
1889 mutex_unlock(&mqp->mutex);
1894 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1897 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1898 prot, reg_id.mirror);
1905 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1907 struct mlx4_ib_gid_entry *ge;
1908 struct mlx4_ib_gid_entry *tmp;
1909 struct mlx4_ib_gid_entry *ret = NULL;
1911 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1912 if (!memcmp(raw, ge->gid.raw, 16)) {
1921 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1924 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1925 struct mlx4_dev *dev = mdev->dev;
1926 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1927 struct net_device *ndev;
1928 struct mlx4_ib_gid_entry *ge;
1929 struct mlx4_flow_reg_id reg_id = {0, 0};
1930 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1932 if (mdev->dev->caps.steering_mode ==
1933 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1934 struct mlx4_ib_steering *ib_steering;
1936 mutex_lock(&mqp->mutex);
1937 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1938 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1939 list_del(&ib_steering->list);
1943 mutex_unlock(&mqp->mutex);
1944 if (&ib_steering->list == &mqp->steering_rules) {
1945 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1948 reg_id = ib_steering->reg_id;
1952 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1957 if (mlx4_is_bonded(dev)) {
1958 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1959 prot, reg_id.mirror);
1964 mutex_lock(&mqp->mutex);
1965 ge = find_gid_entry(mqp, gid->raw);
1967 spin_lock_bh(&mdev->iboe.lock);
1968 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1971 spin_unlock_bh(&mdev->iboe.lock);
1974 list_del(&ge->list);
1977 pr_warn("could not find mgid entry\n");
1979 mutex_unlock(&mqp->mutex);
1984 static int init_node_data(struct mlx4_ib_dev *dev)
1986 struct ib_smp *in_mad = NULL;
1987 struct ib_smp *out_mad = NULL;
1988 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1991 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1992 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1993 if (!in_mad || !out_mad)
1996 init_query_mad(in_mad);
1997 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1998 if (mlx4_is_master(dev->dev))
1999 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
2001 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2005 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
2007 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2009 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2013 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2014 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2022 static ssize_t hca_type_show(struct device *device,
2023 struct device_attribute *attr, char *buf)
2025 struct mlx4_ib_dev *dev =
2026 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2027 return sysfs_emit(buf, "MT%d\n", dev->dev->persist->pdev->device);
2029 static DEVICE_ATTR_RO(hca_type);
2031 static ssize_t hw_rev_show(struct device *device,
2032 struct device_attribute *attr, char *buf)
2034 struct mlx4_ib_dev *dev =
2035 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2036 return sysfs_emit(buf, "%x\n", dev->dev->rev_id);
2038 static DEVICE_ATTR_RO(hw_rev);
2040 static ssize_t board_id_show(struct device *device,
2041 struct device_attribute *attr, char *buf)
2043 struct mlx4_ib_dev *dev =
2044 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2046 return sysfs_emit(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id);
2048 static DEVICE_ATTR_RO(board_id);
2050 static struct attribute *mlx4_class_attributes[] = {
2051 &dev_attr_hw_rev.attr,
2052 &dev_attr_hca_type.attr,
2053 &dev_attr_board_id.attr,
2057 static const struct attribute_group mlx4_attr_group = {
2058 .attrs = mlx4_class_attributes,
2061 struct diag_counter {
2066 #define DIAG_COUNTER(_name, _offset) \
2067 { .name = #_name, .offset = _offset }
2069 static const struct diag_counter diag_basic[] = {
2070 DIAG_COUNTER(rq_num_lle, 0x00),
2071 DIAG_COUNTER(sq_num_lle, 0x04),
2072 DIAG_COUNTER(rq_num_lqpoe, 0x08),
2073 DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2074 DIAG_COUNTER(rq_num_lpe, 0x18),
2075 DIAG_COUNTER(sq_num_lpe, 0x1C),
2076 DIAG_COUNTER(rq_num_wrfe, 0x20),
2077 DIAG_COUNTER(sq_num_wrfe, 0x24),
2078 DIAG_COUNTER(sq_num_mwbe, 0x2C),
2079 DIAG_COUNTER(sq_num_bre, 0x34),
2080 DIAG_COUNTER(sq_num_rire, 0x44),
2081 DIAG_COUNTER(rq_num_rire, 0x48),
2082 DIAG_COUNTER(sq_num_rae, 0x4C),
2083 DIAG_COUNTER(rq_num_rae, 0x50),
2084 DIAG_COUNTER(sq_num_roe, 0x54),
2085 DIAG_COUNTER(sq_num_tree, 0x5C),
2086 DIAG_COUNTER(sq_num_rree, 0x64),
2087 DIAG_COUNTER(rq_num_rnr, 0x68),
2088 DIAG_COUNTER(sq_num_rnr, 0x6C),
2089 DIAG_COUNTER(rq_num_oos, 0x100),
2090 DIAG_COUNTER(sq_num_oos, 0x104),
2093 static const struct diag_counter diag_ext[] = {
2094 DIAG_COUNTER(rq_num_dup, 0x130),
2095 DIAG_COUNTER(sq_num_to, 0x134),
2098 static const struct diag_counter diag_device_only[] = {
2099 DIAG_COUNTER(num_cqovf, 0x1A0),
2100 DIAG_COUNTER(rq_num_udsdprd, 0x118),
2103 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2106 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2107 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2109 if (!diag[!!port_num].name)
2112 return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2113 diag[!!port_num].num_counters,
2114 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2117 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2118 struct rdma_hw_stats *stats,
2121 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2122 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2123 u32 hw_value[ARRAY_SIZE(diag_device_only) +
2124 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2128 ret = mlx4_query_diag_counters(dev->dev,
2129 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2130 diag[!!port].offset, hw_value,
2131 diag[!!port].num_counters, port);
2136 for (i = 0; i < diag[!!port].num_counters; i++)
2137 stats->value[i] = hw_value[i];
2139 return diag[!!port].num_counters;
2142 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2150 num_counters = ARRAY_SIZE(diag_basic);
2152 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2153 num_counters += ARRAY_SIZE(diag_ext);
2156 num_counters += ARRAY_SIZE(diag_device_only);
2158 *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2162 *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2166 *num = num_counters;
2175 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2183 for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2184 name[i] = diag_basic[i].name;
2185 offset[i] = diag_basic[i].offset;
2188 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2189 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2190 name[j] = diag_ext[i].name;
2191 offset[j] = diag_ext[i].offset;
2196 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2197 name[j] = diag_device_only[i].name;
2198 offset[j] = diag_device_only[i].offset;
2203 static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2204 .alloc_hw_stats = mlx4_ib_alloc_hw_stats,
2205 .get_hw_stats = mlx4_ib_get_hw_stats,
2208 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2210 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2213 bool per_port = !!(ibdev->dev->caps.flags2 &
2214 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2216 if (mlx4_is_slave(ibdev->dev))
2219 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2220 /* i == 1 means we are building port counters */
2224 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2226 &diag[i].num_counters, i);
2230 mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2234 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
2240 kfree(diag[i - 1].name);
2241 kfree(diag[i - 1].offset);
2247 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2251 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2252 kfree(ibdev->diag_counters[i].offset);
2253 kfree(ibdev->diag_counters[i].name);
2257 #define MLX4_IB_INVALID_MAC ((u64)-1)
2258 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2259 struct net_device *dev,
2263 u64 release_mac = MLX4_IB_INVALID_MAC;
2264 struct mlx4_ib_qp *qp;
2266 read_lock(&dev_base_lock);
2267 new_smac = mlx4_mac_to_u64(dev->dev_addr);
2268 read_unlock(&dev_base_lock);
2270 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2272 /* no need for update QP1 and mac registration in non-SRIOV */
2273 if (!mlx4_is_mfunc(ibdev->dev))
2276 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2277 qp = ibdev->qp1_proxy[port - 1];
2281 struct mlx4_update_qp_params update_params;
2283 mutex_lock(&qp->mutex);
2284 old_smac = qp->pri.smac;
2285 if (new_smac == old_smac)
2288 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2290 if (new_smac_index < 0)
2293 update_params.smac_index = new_smac_index;
2294 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2296 release_mac = new_smac;
2299 /* if old port was zero, no mac was yet registered for this QP */
2300 if (qp->pri.smac_port)
2301 release_mac = old_smac;
2302 qp->pri.smac = new_smac;
2303 qp->pri.smac_port = port;
2304 qp->pri.smac_index = new_smac_index;
2308 if (release_mac != MLX4_IB_INVALID_MAC)
2309 mlx4_unregister_mac(ibdev->dev, port, release_mac);
2311 mutex_unlock(&qp->mutex);
2312 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2315 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2316 struct net_device *dev,
2317 unsigned long event)
2320 struct mlx4_ib_iboe *iboe;
2321 int update_qps_port = -1;
2326 iboe = &ibdev->iboe;
2328 spin_lock_bh(&iboe->lock);
2329 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2331 iboe->netdevs[port - 1] =
2332 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2334 if (dev == iboe->netdevs[port - 1] &&
2335 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2336 event == NETDEV_UP || event == NETDEV_CHANGE))
2337 update_qps_port = port;
2339 if (dev == iboe->netdevs[port - 1] &&
2340 (event == NETDEV_UP || event == NETDEV_DOWN)) {
2341 enum ib_port_state port_state;
2342 struct ib_event ibev = { };
2344 if (ib_get_cached_port_state(&ibdev->ib_dev, port,
2348 if (event == NETDEV_UP &&
2349 (port_state != IB_PORT_ACTIVE ||
2350 iboe->last_port_state[port - 1] != IB_PORT_DOWN))
2352 if (event == NETDEV_DOWN &&
2353 (port_state != IB_PORT_DOWN ||
2354 iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
2356 iboe->last_port_state[port - 1] = port_state;
2358 ibev.device = &ibdev->ib_dev;
2359 ibev.element.port_num = port;
2360 ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
2362 ib_dispatch_event(&ibev);
2366 spin_unlock_bh(&iboe->lock);
2368 if (update_qps_port > 0)
2369 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2372 static int mlx4_ib_netdev_event(struct notifier_block *this,
2373 unsigned long event, void *ptr)
2375 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2376 struct mlx4_ib_dev *ibdev;
2378 if (!net_eq(dev_net(dev), &init_net))
2381 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2382 mlx4_ib_scan_netdevs(ibdev, dev, event);
2387 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2393 if (mlx4_is_master(ibdev->dev)) {
2394 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2396 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2398 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2400 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2401 /* master has the identity virt2phys pkey mapping */
2402 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2403 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2404 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2405 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2409 /* initialize pkey cache */
2410 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2412 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2414 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2420 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2422 int i, j, eq = 0, total_eqs = 0;
2424 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2425 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2426 if (!ibdev->eq_table)
2429 for (i = 1; i <= dev->caps.num_ports; i++) {
2430 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2432 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2434 ibdev->eq_table[eq] = total_eqs;
2435 if (!mlx4_assign_eq(dev, i,
2436 &ibdev->eq_table[eq]))
2439 ibdev->eq_table[eq] = -1;
2443 for (i = eq; i < dev->caps.num_comp_vectors;
2444 ibdev->eq_table[i++] = -1)
2447 /* Advertise the new number of EQs to clients */
2448 ibdev->ib_dev.num_comp_vectors = eq;
2451 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2454 int total_eqs = ibdev->ib_dev.num_comp_vectors;
2456 /* no eqs were allocated */
2457 if (!ibdev->eq_table)
2460 /* Reset the advertised EQ number */
2461 ibdev->ib_dev.num_comp_vectors = 0;
2463 for (i = 0; i < total_eqs; i++)
2464 mlx4_release_eq(dev, ibdev->eq_table[i]);
2466 kfree(ibdev->eq_table);
2467 ibdev->eq_table = NULL;
2470 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2471 struct ib_port_immutable *immutable)
2473 struct ib_port_attr attr;
2474 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2477 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2478 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2479 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2481 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2482 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2483 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2484 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2485 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2486 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2487 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2488 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2489 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2492 err = ib_query_port(ibdev, port_num, &attr);
2496 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2497 immutable->gid_tbl_len = attr.gid_tbl_len;
2502 static void get_fw_ver_str(struct ib_device *device, char *str)
2504 struct mlx4_ib_dev *dev =
2505 container_of(device, struct mlx4_ib_dev, ib_dev);
2506 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
2507 (int) (dev->dev->caps.fw_ver >> 32),
2508 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2509 (int) dev->dev->caps.fw_ver & 0xffff);
2512 static const struct ib_device_ops mlx4_ib_dev_ops = {
2513 .owner = THIS_MODULE,
2514 .driver_id = RDMA_DRIVER_MLX4,
2515 .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
2517 .add_gid = mlx4_ib_add_gid,
2518 .alloc_mr = mlx4_ib_alloc_mr,
2519 .alloc_pd = mlx4_ib_alloc_pd,
2520 .alloc_ucontext = mlx4_ib_alloc_ucontext,
2521 .attach_mcast = mlx4_ib_mcg_attach,
2522 .create_ah = mlx4_ib_create_ah,
2523 .create_cq = mlx4_ib_create_cq,
2524 .create_qp = mlx4_ib_create_qp,
2525 .create_srq = mlx4_ib_create_srq,
2526 .dealloc_pd = mlx4_ib_dealloc_pd,
2527 .dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2528 .del_gid = mlx4_ib_del_gid,
2529 .dereg_mr = mlx4_ib_dereg_mr,
2530 .destroy_ah = mlx4_ib_destroy_ah,
2531 .destroy_cq = mlx4_ib_destroy_cq,
2532 .destroy_qp = mlx4_ib_destroy_qp,
2533 .destroy_srq = mlx4_ib_destroy_srq,
2534 .detach_mcast = mlx4_ib_mcg_detach,
2535 .disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2536 .drain_rq = mlx4_ib_drain_rq,
2537 .drain_sq = mlx4_ib_drain_sq,
2538 .get_dev_fw_str = get_fw_ver_str,
2539 .get_dma_mr = mlx4_ib_get_dma_mr,
2540 .get_link_layer = mlx4_ib_port_link_layer,
2541 .get_netdev = mlx4_ib_get_netdev,
2542 .get_port_immutable = mlx4_port_immutable,
2543 .map_mr_sg = mlx4_ib_map_mr_sg,
2544 .mmap = mlx4_ib_mmap,
2545 .modify_cq = mlx4_ib_modify_cq,
2546 .modify_device = mlx4_ib_modify_device,
2547 .modify_port = mlx4_ib_modify_port,
2548 .modify_qp = mlx4_ib_modify_qp,
2549 .modify_srq = mlx4_ib_modify_srq,
2550 .poll_cq = mlx4_ib_poll_cq,
2551 .post_recv = mlx4_ib_post_recv,
2552 .post_send = mlx4_ib_post_send,
2553 .post_srq_recv = mlx4_ib_post_srq_recv,
2554 .process_mad = mlx4_ib_process_mad,
2555 .query_ah = mlx4_ib_query_ah,
2556 .query_device = mlx4_ib_query_device,
2557 .query_gid = mlx4_ib_query_gid,
2558 .query_pkey = mlx4_ib_query_pkey,
2559 .query_port = mlx4_ib_query_port,
2560 .query_qp = mlx4_ib_query_qp,
2561 .query_srq = mlx4_ib_query_srq,
2562 .reg_user_mr = mlx4_ib_reg_user_mr,
2563 .req_notify_cq = mlx4_ib_arm_cq,
2564 .rereg_user_mr = mlx4_ib_rereg_user_mr,
2565 .resize_cq = mlx4_ib_resize_cq,
2567 INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
2568 INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
2569 INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
2570 INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
2571 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
2574 static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2575 .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2576 .create_wq = mlx4_ib_create_wq,
2577 .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2578 .destroy_wq = mlx4_ib_destroy_wq,
2579 .modify_wq = mlx4_ib_modify_wq,
2581 INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
2585 static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2586 .alloc_mw = mlx4_ib_alloc_mw,
2587 .dealloc_mw = mlx4_ib_dealloc_mw,
2589 INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
2592 static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2593 .alloc_xrcd = mlx4_ib_alloc_xrcd,
2594 .dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2596 INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
2599 static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2600 .create_flow = mlx4_ib_create_flow,
2601 .destroy_flow = mlx4_ib_destroy_flow,
2604 static void *mlx4_ib_add(struct mlx4_dev *dev)
2606 struct mlx4_ib_dev *ibdev;
2610 struct mlx4_ib_iboe *iboe;
2611 int ib_num_ports = 0;
2612 int num_req_counters;
2615 struct counter_index *new_counter_index = NULL;
2617 pr_info_once("%s", mlx4_ib_version);
2620 mlx4_foreach_ib_transport_port(i, dev)
2623 /* No point in registering a device with no ports... */
2627 ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
2629 dev_err(&dev->persist->pdev->dev,
2630 "Device struct alloc failed\n");
2634 iboe = &ibdev->iboe;
2636 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2639 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2642 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2644 if (!ibdev->uar_map)
2646 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2649 ibdev->bond_next_port = 0;
2651 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2652 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2653 ibdev->num_ports = num_ports;
2654 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2655 1 : ibdev->num_ports;
2656 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2657 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
2659 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
2661 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2662 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2663 IB_LINK_LAYER_ETHERNET) ||
2664 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2665 IB_LINK_LAYER_ETHERNET)))
2666 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
2668 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2669 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2670 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
2672 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2673 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
2676 if (check_flow_steering_support(dev)) {
2677 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2678 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
2681 if (!dev->caps.userspace_caps)
2682 ibdev->ib_dev.ops.uverbs_abi_ver =
2683 MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2685 mlx4_ib_alloc_eqs(dev, ibdev);
2687 spin_lock_init(&iboe->lock);
2689 if (init_node_data(ibdev))
2691 mlx4_init_sl2vl_tbl(ibdev);
2693 for (i = 0; i < ibdev->num_ports; ++i) {
2694 mutex_init(&ibdev->counters_table[i].mutex);
2695 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2696 iboe->last_port_state[i] = IB_PORT_DOWN;
2699 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2700 for (i = 0; i < num_req_counters; ++i) {
2701 mutex_init(&ibdev->qp1_proxy_lock[i]);
2703 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2704 IB_LINK_LAYER_ETHERNET) {
2705 err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2706 MLX4_RES_USAGE_DRIVER);
2707 /* if failed to allocate a new counter, use default */
2710 mlx4_get_default_counter_index(dev,
2714 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2715 counter_index = mlx4_get_default_counter_index(dev,
2718 new_counter_index = kmalloc(sizeof(*new_counter_index),
2720 if (!new_counter_index) {
2722 mlx4_counter_free(ibdev->dev, counter_index);
2725 new_counter_index->index = counter_index;
2726 new_counter_index->allocated = allocated;
2727 list_add_tail(&new_counter_index->list,
2728 &ibdev->counters_table[i].counters_list);
2729 ibdev->counters_table[i].default_counter = counter_index;
2730 pr_info("counter index %d for port %d allocated %d\n",
2731 counter_index, i + 1, allocated);
2733 if (mlx4_is_bonded(dev))
2734 for (i = 1; i < ibdev->num_ports ; ++i) {
2736 kmalloc(sizeof(struct counter_index),
2738 if (!new_counter_index)
2740 new_counter_index->index = counter_index;
2741 new_counter_index->allocated = 0;
2742 list_add_tail(&new_counter_index->list,
2743 &ibdev->counters_table[i].counters_list);
2744 ibdev->counters_table[i].default_counter =
2748 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2751 spin_lock_init(&ibdev->sm_lock);
2752 mutex_init(&ibdev->cap_mask_mutex);
2753 INIT_LIST_HEAD(&ibdev->qp_list);
2754 spin_lock_init(&ibdev->reset_flow_resource_lock);
2756 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2758 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2759 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2760 MLX4_IB_UC_STEER_QPN_ALIGN,
2761 &ibdev->steer_qpn_base, 0,
2762 MLX4_RES_USAGE_DRIVER);
2766 ibdev->ib_uc_qpns_bitmap =
2767 kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
2770 if (!ibdev->ib_uc_qpns_bitmap)
2771 goto err_steer_qp_release;
2773 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2774 bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2775 ibdev->steer_qpn_count);
2776 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2777 dev, ibdev->steer_qpn_base,
2778 ibdev->steer_qpn_base +
2779 ibdev->steer_qpn_count - 1);
2781 goto err_steer_free_bitmap;
2783 bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2784 ibdev->steer_qpn_count);
2788 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2789 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2791 if (mlx4_ib_alloc_diag_counters(ibdev))
2792 goto err_steer_free_bitmap;
2794 rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
2795 if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
2796 &dev->persist->pdev->dev))
2797 goto err_diag_counters;
2799 if (mlx4_ib_mad_init(ibdev))
2802 if (mlx4_ib_init_sriov(ibdev))
2805 if (!iboe->nb.notifier_call) {
2806 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2807 err = register_netdevice_notifier(&iboe->nb);
2809 iboe->nb.notifier_call = NULL;
2813 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2814 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2819 ibdev->ib_active = true;
2820 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2821 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2824 if (mlx4_is_mfunc(ibdev->dev))
2827 /* create paravirt contexts for any VFs which are active */
2828 if (mlx4_is_master(ibdev->dev)) {
2829 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2830 if (j == mlx4_master_func_num(ibdev->dev))
2832 if (mlx4_is_slave_active(ibdev->dev, j))
2833 do_slave_init(ibdev, j, 1);
2839 if (ibdev->iboe.nb.notifier_call) {
2840 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2841 pr_warn("failure unregistering notifier\n");
2842 ibdev->iboe.nb.notifier_call = NULL;
2844 flush_workqueue(wq);
2846 mlx4_ib_close_sriov(ibdev);
2849 mlx4_ib_mad_cleanup(ibdev);
2852 ib_unregister_device(&ibdev->ib_dev);
2855 mlx4_ib_diag_cleanup(ibdev);
2857 err_steer_free_bitmap:
2858 kfree(ibdev->ib_uc_qpns_bitmap);
2860 err_steer_qp_release:
2861 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2862 ibdev->steer_qpn_count);
2864 for (i = 0; i < ibdev->num_ports; ++i)
2865 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2868 mlx4_ib_free_eqs(dev, ibdev);
2869 iounmap(ibdev->uar_map);
2872 mlx4_uar_free(dev, &ibdev->priv_uar);
2875 mlx4_pd_free(dev, ibdev->priv_pdn);
2878 ib_dealloc_device(&ibdev->ib_dev);
2883 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2887 WARN_ON(!dev->ib_uc_qpns_bitmap);
2889 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2890 dev->steer_qpn_count,
2891 get_count_order(count));
2895 *qpn = dev->steer_qpn_base + offset;
2899 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2902 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2905 if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
2906 qpn, dev->steer_qpn_base))
2907 /* not supposed to be here */
2910 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2911 qpn - dev->steer_qpn_base,
2912 get_count_order(count));
2915 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2920 struct ib_flow_attr *flow = NULL;
2921 struct ib_flow_spec_ib *ib_spec;
2924 flow_size = sizeof(struct ib_flow_attr) +
2925 sizeof(struct ib_flow_spec_ib);
2926 flow = kzalloc(flow_size, GFP_KERNEL);
2929 flow->port = mqp->port;
2930 flow->num_of_specs = 1;
2931 flow->size = flow_size;
2932 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2933 ib_spec->type = IB_FLOW_SPEC_IB;
2934 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2935 /* Add an empty rule for IB L2 */
2936 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2938 err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
2939 MLX4_FS_REGULAR, &mqp->reg_id);
2941 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2947 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2949 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2953 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2954 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
2955 ibdev->ib_active = false;
2956 flush_workqueue(wq);
2958 if (ibdev->iboe.nb.notifier_call) {
2959 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2960 pr_warn("failure unregistering notifier\n");
2961 ibdev->iboe.nb.notifier_call = NULL;
2964 mlx4_ib_close_sriov(ibdev);
2965 mlx4_ib_mad_cleanup(ibdev);
2966 ib_unregister_device(&ibdev->ib_dev);
2967 mlx4_ib_diag_cleanup(ibdev);
2969 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2970 ibdev->steer_qpn_count);
2971 kfree(ibdev->ib_uc_qpns_bitmap);
2973 iounmap(ibdev->uar_map);
2974 for (p = 0; p < ibdev->num_ports; ++p)
2975 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
2977 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2978 mlx4_CLOSE_PORT(dev, p);
2980 mlx4_ib_free_eqs(dev, ibdev);
2982 mlx4_uar_free(dev, &ibdev->priv_uar);
2983 mlx4_pd_free(dev, ibdev->priv_pdn);
2984 ib_dealloc_device(&ibdev->ib_dev);
2987 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2989 struct mlx4_ib_demux_work **dm = NULL;
2990 struct mlx4_dev *dev = ibdev->dev;
2992 unsigned long flags;
2993 struct mlx4_active_ports actv_ports;
2995 unsigned int first_port;
2997 if (!mlx4_is_master(dev))
3000 actv_ports = mlx4_get_active_ports(dev, slave);
3001 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3002 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3004 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
3008 for (i = 0; i < ports; i++) {
3009 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3015 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
3016 dm[i]->port = first_port + i + 1;
3017 dm[i]->slave = slave;
3018 dm[i]->do_init = do_init;
3021 /* initialize or tear down tunnel QPs for the slave */
3022 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3023 if (!ibdev->sriov.is_going_down) {
3024 for (i = 0; i < ports; i++)
3025 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3026 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3028 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3029 for (i = 0; i < ports; i++)
3037 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3039 struct mlx4_ib_qp *mqp;
3040 unsigned long flags_qp;
3041 unsigned long flags_cq;
3042 struct mlx4_ib_cq *send_mcq, *recv_mcq;
3043 struct list_head cq_notify_list;
3044 struct mlx4_cq *mcq;
3045 unsigned long flags;
3047 pr_warn("mlx4_ib_handle_catas_error was started\n");
3048 INIT_LIST_HEAD(&cq_notify_list);
3050 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3051 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3053 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3054 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3055 if (mqp->sq.tail != mqp->sq.head) {
3056 send_mcq = to_mcq(mqp->ibqp.send_cq);
3057 spin_lock_irqsave(&send_mcq->lock, flags_cq);
3058 if (send_mcq->mcq.comp &&
3059 mqp->ibqp.send_cq->comp_handler) {
3060 if (!send_mcq->mcq.reset_notify_added) {
3061 send_mcq->mcq.reset_notify_added = 1;
3062 list_add_tail(&send_mcq->mcq.reset_notify,
3066 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3068 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3069 /* Now, handle the QP's receive queue */
3070 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3071 /* no handling is needed for SRQ */
3072 if (!mqp->ibqp.srq) {
3073 if (mqp->rq.tail != mqp->rq.head) {
3074 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3075 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3076 if (recv_mcq->mcq.comp &&
3077 mqp->ibqp.recv_cq->comp_handler) {
3078 if (!recv_mcq->mcq.reset_notify_added) {
3079 recv_mcq->mcq.reset_notify_added = 1;
3080 list_add_tail(&recv_mcq->mcq.reset_notify,
3084 spin_unlock_irqrestore(&recv_mcq->lock,
3088 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3091 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3094 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3095 pr_warn("mlx4_ib_handle_catas_error ended\n");
3098 static void handle_bonded_port_state_event(struct work_struct *work)
3100 struct ib_event_work *ew =
3101 container_of(work, struct ib_event_work, work);
3102 struct mlx4_ib_dev *ibdev = ew->ib_dev;
3103 enum ib_port_state bonded_port_state = IB_PORT_NOP;
3105 struct ib_event ibev;
3108 spin_lock_bh(&ibdev->iboe.lock);
3109 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3110 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3111 enum ib_port_state curr_port_state;
3117 (netif_running(curr_netdev) &&
3118 netif_carrier_ok(curr_netdev)) ?
3119 IB_PORT_ACTIVE : IB_PORT_DOWN;
3121 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3122 curr_port_state : IB_PORT_ACTIVE;
3124 spin_unlock_bh(&ibdev->iboe.lock);
3126 ibev.device = &ibdev->ib_dev;
3127 ibev.element.port_num = 1;
3128 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3129 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3131 ib_dispatch_event(&ibev);
3134 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3139 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3141 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
3145 atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3148 static void ib_sl2vl_update_work(struct work_struct *work)
3150 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3151 struct mlx4_ib_dev *mdev = ew->ib_dev;
3152 int port = ew->port;
3154 mlx4_ib_sl2vl_update(mdev, port);
3159 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3162 struct ib_event_work *ew;
3164 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3166 INIT_WORK(&ew->work, ib_sl2vl_update_work);
3169 queue_work(wq, &ew->work);
3173 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
3174 enum mlx4_dev_event event, unsigned long param)
3176 struct ib_event ibev;
3177 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3178 struct mlx4_eqe *eqe = NULL;
3179 struct ib_event_work *ew;
3182 if (mlx4_is_bonded(dev) &&
3183 ((event == MLX4_DEV_EVENT_PORT_UP) ||
3184 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3185 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3188 INIT_WORK(&ew->work, handle_bonded_port_state_event);
3190 queue_work(wq, &ew->work);
3194 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3195 eqe = (struct mlx4_eqe *)param;
3200 case MLX4_DEV_EVENT_PORT_UP:
3201 if (p > ibdev->num_ports)
3203 if (!mlx4_is_slave(dev) &&
3204 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3205 IB_LINK_LAYER_INFINIBAND) {
3206 if (mlx4_is_master(dev))
3207 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3208 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3209 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3210 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3212 ibev.event = IB_EVENT_PORT_ACTIVE;
3215 case MLX4_DEV_EVENT_PORT_DOWN:
3216 if (p > ibdev->num_ports)
3218 ibev.event = IB_EVENT_PORT_ERR;
3221 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3222 ibdev->ib_active = false;
3223 ibev.event = IB_EVENT_DEVICE_FATAL;
3224 mlx4_ib_handle_catas_error(ibdev);
3227 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3228 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
3232 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3233 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3235 /* need to queue only for port owner, which uses GEN_EQE */
3236 if (mlx4_is_master(dev))
3237 queue_work(wq, &ew->work);
3239 handle_port_mgmt_change_event(&ew->work);
3242 case MLX4_DEV_EVENT_SLAVE_INIT:
3243 /* here, p is the slave id */
3244 do_slave_init(ibdev, p, 1);
3245 if (mlx4_is_master(dev)) {
3248 for (i = 1; i <= ibdev->num_ports; i++) {
3249 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3250 == IB_LINK_LAYER_INFINIBAND)
3251 mlx4_ib_slave_alias_guid_event(ibdev,
3258 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3259 if (mlx4_is_master(dev)) {
3262 for (i = 1; i <= ibdev->num_ports; i++) {
3263 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3264 == IB_LINK_LAYER_INFINIBAND)
3265 mlx4_ib_slave_alias_guid_event(ibdev,
3270 /* here, p is the slave id */
3271 do_slave_init(ibdev, p, 0);
3278 ibev.device = ibdev_ptr;
3279 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3281 ib_dispatch_event(&ibev);
3284 static struct mlx4_interface mlx4_ib_interface = {
3286 .remove = mlx4_ib_remove,
3287 .event = mlx4_ib_event,
3288 .protocol = MLX4_PROT_IB_IPV6,
3289 .flags = MLX4_INTFF_BONDING
3292 static int __init mlx4_ib_init(void)
3296 wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
3300 err = mlx4_ib_mcg_init();
3304 err = mlx4_register_interface(&mlx4_ib_interface);
3311 mlx4_ib_mcg_destroy();
3314 destroy_workqueue(wq);
3318 static void __exit mlx4_ib_cleanup(void)
3320 mlx4_unregister_interface(&mlx4_ib_interface);
3321 mlx4_ib_mcg_destroy();
3322 destroy_workqueue(wq);
3325 module_init(mlx4_ib_init);
3326 module_exit(mlx4_ib_cleanup);