2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
54 #include <net/bonding.h>
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
61 #include <rdma/mlx4-abi.h>
63 #define DRV_NAME MLX4_IB_DRV_NAME
64 #define DRV_VERSION "4.0-0"
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0 0xA0
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
74 int mlx4_ib_sm_guid_assign = 0;
75 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
76 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
78 static const char mlx4_ib_version[] =
79 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
86 static struct workqueue_struct *wq;
88 static void init_query_mad(struct ib_smp *mad)
90 mad->base_version = 1;
91 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92 mad->class_version = 1;
93 mad->method = IB_MGMT_METHOD_GET;
96 static int check_flow_steering_support(struct mlx4_dev *dev)
98 int eth_num_ports = 0;
101 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
105 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
107 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
109 dmfs &= (!ib_num_ports ||
110 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
112 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113 if (ib_num_ports && mlx4_is_mfunc(dev)) {
114 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
121 static int num_ib_ports(struct mlx4_dev *dev)
126 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
132 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device,
135 struct mlx4_ib_dev *ibdev = to_mdev(device);
136 struct net_device *dev;
139 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
142 if (mlx4_is_bonded(ibdev->dev)) {
143 struct net_device *upper = NULL;
145 upper = netdev_master_upper_dev_get_rcu(dev);
147 struct net_device *active;
149 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
162 static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
163 struct mlx4_ib_dev *ibdev,
166 struct mlx4_cmd_mailbox *mailbox;
168 struct mlx4_dev *dev = ibdev->dev;
170 union ib_gid *gid_tbl;
172 mailbox = mlx4_alloc_cmd_mailbox(dev);
176 gid_tbl = mailbox->buf;
178 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
179 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
181 err = mlx4_cmd(dev, mailbox->dma,
182 MLX4_SET_PORT_GID_TABLE << 8 | port_num,
183 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
185 if (mlx4_is_bonded(dev))
186 err += mlx4_cmd(dev, mailbox->dma,
187 MLX4_SET_PORT_GID_TABLE << 8 | 2,
188 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
191 mlx4_free_cmd_mailbox(dev, mailbox);
195 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
196 struct mlx4_ib_dev *ibdev,
199 struct mlx4_cmd_mailbox *mailbox;
201 struct mlx4_dev *dev = ibdev->dev;
212 mailbox = mlx4_alloc_cmd_mailbox(dev);
216 gid_tbl = mailbox->buf;
217 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
218 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
219 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
220 gid_tbl[i].version = 2;
221 if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
226 err = mlx4_cmd(dev, mailbox->dma,
227 MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
228 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
230 if (mlx4_is_bonded(dev))
231 err += mlx4_cmd(dev, mailbox->dma,
232 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
233 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
236 mlx4_free_cmd_mailbox(dev, mailbox);
240 static int mlx4_ib_update_gids(struct gid_entry *gids,
241 struct mlx4_ib_dev *ibdev,
244 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
245 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
247 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
250 static void free_gid_entry(struct gid_entry *entry)
252 memset(&entry->gid, 0, sizeof(entry->gid));
257 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
259 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
260 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
261 struct mlx4_port_gid_table *port_gid_table;
262 int free = -1, found = -1;
266 struct gid_entry *gids = NULL;
267 u16 vlan_id = 0xffff;
270 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
273 if (attr->port_num > MLX4_MAX_PORTS)
279 ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
282 port_gid_table = &iboe->gids[attr->port_num - 1];
283 spin_lock_bh(&iboe->lock);
284 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
285 if (!memcmp(&port_gid_table->gids[i].gid,
286 &attr->gid, sizeof(attr->gid)) &&
287 port_gid_table->gids[i].gid_type == attr->gid_type &&
288 port_gid_table->gids[i].vlan_id == vlan_id) {
292 if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
293 free = i; /* HW has space */
300 port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
301 if (!port_gid_table->gids[free].ctx) {
304 *context = port_gid_table->gids[free].ctx;
305 memcpy(&port_gid_table->gids[free].gid,
306 &attr->gid, sizeof(attr->gid));
307 port_gid_table->gids[free].gid_type = attr->gid_type;
308 port_gid_table->gids[free].vlan_id = vlan_id;
309 port_gid_table->gids[free].ctx->real_index = free;
310 port_gid_table->gids[free].ctx->refcount = 1;
315 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
319 if (!ret && hw_update) {
320 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
325 free_gid_entry(&port_gid_table->gids[free]);
327 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
328 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
329 gids[i].gid_type = port_gid_table->gids[i].gid_type;
333 spin_unlock_bh(&iboe->lock);
335 if (!ret && hw_update) {
336 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
338 spin_lock_bh(&iboe->lock);
340 free_gid_entry(&port_gid_table->gids[free]);
341 spin_unlock_bh(&iboe->lock);
349 static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
351 struct gid_cache_context *ctx = *context;
352 struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
353 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
354 struct mlx4_port_gid_table *port_gid_table;
357 struct gid_entry *gids = NULL;
359 if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
362 if (attr->port_num > MLX4_MAX_PORTS)
365 port_gid_table = &iboe->gids[attr->port_num - 1];
366 spin_lock_bh(&iboe->lock);
369 if (!ctx->refcount) {
370 unsigned int real_index = ctx->real_index;
372 free_gid_entry(&port_gid_table->gids[real_index]);
376 if (!ret && hw_update) {
379 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
384 for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
386 &port_gid_table->gids[i].gid,
387 sizeof(union ib_gid));
389 port_gid_table->gids[i].gid_type;
393 spin_unlock_bh(&iboe->lock);
395 if (!ret && hw_update) {
396 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
402 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
403 const struct ib_gid_attr *attr)
405 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
406 struct gid_cache_context *ctx = NULL;
407 struct mlx4_port_gid_table *port_gid_table;
408 int real_index = -EINVAL;
411 u32 port_num = attr->port_num;
413 if (port_num > MLX4_MAX_PORTS)
416 if (mlx4_is_bonded(ibdev->dev))
419 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
422 spin_lock_irqsave(&iboe->lock, flags);
423 port_gid_table = &iboe->gids[port_num - 1];
425 for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
426 if (!memcmp(&port_gid_table->gids[i].gid,
427 &attr->gid, sizeof(attr->gid)) &&
428 attr->gid_type == port_gid_table->gids[i].gid_type) {
429 ctx = port_gid_table->gids[i].ctx;
433 real_index = ctx->real_index;
434 spin_unlock_irqrestore(&iboe->lock, flags);
438 static int mlx4_ib_query_device(struct ib_device *ibdev,
439 struct ib_device_attr *props,
440 struct ib_udata *uhw)
442 struct mlx4_ib_dev *dev = to_mdev(ibdev);
443 struct ib_smp *in_mad = NULL;
444 struct ib_smp *out_mad = NULL;
447 struct mlx4_uverbs_ex_query_device cmd;
448 struct mlx4_uverbs_ex_query_device_resp resp = {};
449 struct mlx4_clock_params clock_params;
452 if (uhw->inlen < sizeof(cmd))
455 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
466 resp.response_length = offsetof(typeof(resp), response_length) +
467 sizeof(resp.response_length);
468 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
469 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
471 if (!in_mad || !out_mad)
474 init_query_mad(in_mad);
475 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
477 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
478 1, NULL, NULL, in_mad, out_mad);
482 memset(props, 0, sizeof *props);
484 have_ib_ports = num_ib_ports(dev->dev);
486 props->fw_ver = dev->dev->caps.fw_ver;
487 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
488 IB_DEVICE_PORT_ACTIVE_EVENT |
489 IB_DEVICE_SYS_IMAGE_GUID |
490 IB_DEVICE_RC_RNR_NAK_GEN |
491 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
492 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
493 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
494 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
495 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
496 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
497 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
498 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
499 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
500 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
501 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
502 if (dev->dev->caps.max_gso_sz &&
503 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
504 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
505 props->device_cap_flags |= IB_DEVICE_UD_TSO;
506 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
507 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
508 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
509 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
510 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
511 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
512 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
513 props->device_cap_flags |= IB_DEVICE_XRC;
514 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
515 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
516 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
517 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
518 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
520 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
522 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
523 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
525 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
527 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
529 props->vendor_part_id = dev->dev->persist->pdev->device;
530 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
531 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
533 props->max_mr_size = ~0ull;
534 props->page_size_cap = dev->dev->caps.page_size_cap;
535 props->max_qp = dev->dev->quotas.qp;
536 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
537 props->max_send_sge =
538 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
539 props->max_recv_sge =
540 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
541 props->max_sge_rd = MLX4_MAX_SGE_RD;
542 props->max_cq = dev->dev->quotas.cq;
543 props->max_cqe = dev->dev->caps.max_cqes;
544 props->max_mr = dev->dev->quotas.mpt;
545 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
546 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
547 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
548 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
549 props->max_srq = dev->dev->quotas.srq;
550 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
551 props->max_srq_sge = dev->dev->caps.max_srq_sge;
552 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
553 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
554 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
555 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
556 props->masked_atomic_cap = props->atomic_cap;
557 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
558 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
559 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
560 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
561 props->max_mcast_grp;
562 props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
563 props->timestamp_mask = 0xFFFFFFFFFFFFULL;
564 props->max_ah = INT_MAX;
566 if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
567 mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
568 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
569 props->rss_caps.max_rwq_indirection_tables =
571 props->rss_caps.max_rwq_indirection_table_size =
572 dev->dev->caps.max_rss_tbl_sz;
573 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
574 props->max_wq_type_rq = props->max_qp;
577 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
578 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
581 props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
582 props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
584 if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
585 resp.response_length += sizeof(resp.hca_core_clock_offset);
586 if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
587 resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
588 resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
592 if (uhw->outlen >= resp.response_length +
593 sizeof(resp.max_inl_recv_sz)) {
594 resp.response_length += sizeof(resp.max_inl_recv_sz);
595 resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg *
596 sizeof(struct mlx4_wqe_data_seg);
599 if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
600 if (props->rss_caps.supported_qpts) {
601 resp.rss_caps.rx_hash_function =
602 MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
604 resp.rss_caps.rx_hash_fields_mask =
605 MLX4_IB_RX_HASH_SRC_IPV4 |
606 MLX4_IB_RX_HASH_DST_IPV4 |
607 MLX4_IB_RX_HASH_SRC_IPV6 |
608 MLX4_IB_RX_HASH_DST_IPV6 |
609 MLX4_IB_RX_HASH_SRC_PORT_TCP |
610 MLX4_IB_RX_HASH_DST_PORT_TCP |
611 MLX4_IB_RX_HASH_SRC_PORT_UDP |
612 MLX4_IB_RX_HASH_DST_PORT_UDP;
614 if (dev->dev->caps.tunnel_offload_mode ==
615 MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
616 resp.rss_caps.rx_hash_fields_mask |=
617 MLX4_IB_RX_HASH_INNER;
619 resp.response_length = offsetof(typeof(resp), rss_caps) +
620 sizeof(resp.rss_caps);
623 if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
624 if (dev->dev->caps.max_gso_sz &&
625 ((mlx4_ib_port_link_layer(ibdev, 1) ==
626 IB_LINK_LAYER_ETHERNET) ||
627 (mlx4_ib_port_link_layer(ibdev, 2) ==
628 IB_LINK_LAYER_ETHERNET))) {
629 resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
630 resp.tso_caps.supported_qpts |=
631 1 << IB_QPT_RAW_PACKET;
633 resp.response_length = offsetof(typeof(resp), tso_caps) +
634 sizeof(resp.tso_caps);
638 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
649 static enum rdma_link_layer
650 mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num)
652 struct mlx4_dev *dev = to_mdev(device)->dev;
654 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
655 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
658 static int ib_link_query_port(struct ib_device *ibdev, u32 port,
659 struct ib_port_attr *props, int netw_view)
661 struct ib_smp *in_mad = NULL;
662 struct ib_smp *out_mad = NULL;
663 int ext_active_speed;
664 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
667 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
668 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
669 if (!in_mad || !out_mad)
672 init_query_mad(in_mad);
673 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
674 in_mad->attr_mod = cpu_to_be32(port);
676 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
677 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
679 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
685 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
686 props->lmc = out_mad->data[34] & 0x7;
687 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
688 props->sm_sl = out_mad->data[36] & 0xf;
689 props->state = out_mad->data[32] & 0xf;
690 props->phys_state = out_mad->data[33] >> 4;
691 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
693 props->gid_tbl_len = out_mad->data[50];
695 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
696 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
697 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
698 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
699 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
700 props->active_width = out_mad->data[31] & 0xf;
701 props->active_speed = out_mad->data[35] >> 4;
702 props->max_mtu = out_mad->data[41] & 0xf;
703 props->active_mtu = out_mad->data[36] >> 4;
704 props->subnet_timeout = out_mad->data[51] & 0x1f;
705 props->max_vl_num = out_mad->data[37] >> 4;
706 props->init_type_reply = out_mad->data[41] >> 4;
708 /* Check if extended speeds (EDR/FDR/...) are supported */
709 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
710 ext_active_speed = out_mad->data[62] >> 4;
712 switch (ext_active_speed) {
714 props->active_speed = IB_SPEED_FDR;
717 props->active_speed = IB_SPEED_EDR;
722 /* If reported active speed is QDR, check if is FDR-10 */
723 if (props->active_speed == IB_SPEED_QDR) {
724 init_query_mad(in_mad);
725 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
726 in_mad->attr_mod = cpu_to_be32(port);
728 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
729 NULL, NULL, in_mad, out_mad);
733 /* Checking LinkSpeedActive for FDR-10 */
734 if (out_mad->data[15] & 0x1)
735 props->active_speed = IB_SPEED_FDR10;
738 /* Avoid wrong speed value returned by FW if the IB link is down. */
739 if (props->state == IB_PORT_DOWN)
740 props->active_speed = IB_SPEED_SDR;
748 static u8 state_to_phys_state(enum ib_port_state state)
750 return state == IB_PORT_ACTIVE ?
751 IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
754 static int eth_link_query_port(struct ib_device *ibdev, u32 port,
755 struct ib_port_attr *props)
758 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
759 struct mlx4_ib_iboe *iboe = &mdev->iboe;
760 struct net_device *ndev;
762 struct mlx4_cmd_mailbox *mailbox;
764 int is_bonded = mlx4_is_bonded(mdev->dev);
766 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
768 return PTR_ERR(mailbox);
770 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
771 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
776 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
777 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
778 IB_WIDTH_4X : IB_WIDTH_1X;
779 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
780 IB_SPEED_FDR : IB_SPEED_QDR;
781 props->port_cap_flags = IB_PORT_CM_SUP;
782 props->ip_gids = true;
783 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
784 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
785 if (mdev->dev->caps.pkey_table_len[port])
786 props->pkey_tbl_len = 1;
787 props->max_mtu = IB_MTU_4096;
788 props->max_vl_num = 2;
789 props->state = IB_PORT_DOWN;
790 props->phys_state = state_to_phys_state(props->state);
791 props->active_mtu = IB_MTU_256;
792 spin_lock_bh(&iboe->lock);
793 ndev = iboe->netdevs[port - 1];
794 if (ndev && is_bonded) {
795 rcu_read_lock(); /* required to get upper dev */
796 ndev = netdev_master_upper_dev_get_rcu(ndev);
802 tmp = iboe_get_mtu(ndev->mtu);
803 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
805 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
806 IB_PORT_ACTIVE : IB_PORT_DOWN;
807 props->phys_state = state_to_phys_state(props->state);
809 spin_unlock_bh(&iboe->lock);
811 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
815 int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
816 struct ib_port_attr *props, int netw_view)
820 /* props being zeroed by the caller, avoid zeroing it here */
822 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
823 ib_link_query_port(ibdev, port, props, netw_view) :
824 eth_link_query_port(ibdev, port, props);
829 static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
830 struct ib_port_attr *props)
832 /* returns host view */
833 return __mlx4_ib_query_port(ibdev, port, props, 0);
836 int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
837 union ib_gid *gid, int netw_view)
839 struct ib_smp *in_mad = NULL;
840 struct ib_smp *out_mad = NULL;
842 struct mlx4_ib_dev *dev = to_mdev(ibdev);
844 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
846 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
847 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
848 if (!in_mad || !out_mad)
851 init_query_mad(in_mad);
852 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
853 in_mad->attr_mod = cpu_to_be32(port);
855 if (mlx4_is_mfunc(dev->dev) && netw_view)
856 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
858 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
862 memcpy(gid->raw, out_mad->data + 8, 8);
864 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
866 /* For any index > 0, return the null guid */
873 init_query_mad(in_mad);
874 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
875 in_mad->attr_mod = cpu_to_be32(index / 8);
877 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
878 NULL, NULL, in_mad, out_mad);
882 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
886 memset(gid->raw + 8, 0, 8);
892 static int mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
895 if (rdma_protocol_ib(ibdev, port))
896 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
900 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
903 union sl2vl_tbl_to_u64 sl2vl64;
904 struct ib_smp *in_mad = NULL;
905 struct ib_smp *out_mad = NULL;
906 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
910 if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
915 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
916 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
917 if (!in_mad || !out_mad)
920 init_query_mad(in_mad);
921 in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
922 in_mad->attr_mod = 0;
924 if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
925 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
927 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
932 for (jj = 0; jj < 8; jj++)
933 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
934 *sl2vl_tbl = sl2vl64.sl64;
942 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
948 for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
949 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
951 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
953 pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n",
957 atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
961 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
962 u16 *pkey, int netw_view)
964 struct ib_smp *in_mad = NULL;
965 struct ib_smp *out_mad = NULL;
966 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
969 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
970 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
971 if (!in_mad || !out_mad)
974 init_query_mad(in_mad);
975 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
976 in_mad->attr_mod = cpu_to_be32(index / 32);
978 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
979 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
981 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
986 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
994 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
997 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
1000 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
1001 struct ib_device_modify *props)
1003 struct mlx4_cmd_mailbox *mailbox;
1004 unsigned long flags;
1006 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1009 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1012 if (mlx4_is_slave(to_mdev(ibdev)->dev))
1015 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1016 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1017 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1020 * If possible, pass node desc to FW, so it can generate
1021 * a 144 trap. If cmd fails, just ignore.
1023 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1024 if (IS_ERR(mailbox))
1027 memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1028 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1029 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1031 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1036 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u32 port,
1037 int reset_qkey_viols, u32 cap_mask)
1039 struct mlx4_cmd_mailbox *mailbox;
1042 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1043 if (IS_ERR(mailbox))
1044 return PTR_ERR(mailbox);
1046 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1047 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
1048 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1050 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
1051 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1054 err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1055 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1058 mlx4_free_cmd_mailbox(dev->dev, mailbox);
1062 static int mlx4_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1063 struct ib_port_modify *props)
1065 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1066 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1067 struct ib_port_attr attr;
1071 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1072 * of whether port link layer is ETH or IB. For ETH ports, qkey
1073 * violations and port capabilities are not meaningful.
1078 mutex_lock(&mdev->cap_mask_mutex);
1080 err = ib_query_port(ibdev, port, &attr);
1084 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1085 ~props->clr_port_cap_mask;
1087 err = mlx4_ib_SET_PORT(mdev, port,
1088 !!(mask & IB_PORT_RESET_QKEY_CNTR),
1092 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1096 static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1097 struct ib_udata *udata)
1099 struct ib_device *ibdev = uctx->device;
1100 struct mlx4_ib_dev *dev = to_mdev(ibdev);
1101 struct mlx4_ib_ucontext *context = to_mucontext(uctx);
1102 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1103 struct mlx4_ib_alloc_ucontext_resp resp;
1106 if (!dev->ib_active)
1109 if (ibdev->ops.uverbs_abi_ver ==
1110 MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1111 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
1112 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
1113 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1115 resp.dev_caps = dev->dev->caps.userspace_caps;
1116 resp.qp_tab_size = dev->dev->caps.num_qps;
1117 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
1118 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1119 resp.cqe_size = dev->dev->caps.cqe_size;
1122 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1126 INIT_LIST_HEAD(&context->db_page_list);
1127 mutex_init(&context->db_page_mutex);
1129 INIT_LIST_HEAD(&context->wqn_ranges_list);
1130 mutex_init(&context->wqn_ranges_mutex);
1132 if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1133 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1135 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1138 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1145 static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1147 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1149 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1152 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1156 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1158 struct mlx4_ib_dev *dev = to_mdev(context->device);
1160 switch (vma->vm_pgoff) {
1162 return rdma_user_mmap_io(context, vma,
1163 to_mucontext(context)->uar.pfn,
1165 pgprot_noncached(vma->vm_page_prot),
1169 if (dev->dev->caps.bf_reg_size == 0)
1171 return rdma_user_mmap_io(
1173 to_mucontext(context)->uar.pfn +
1174 dev->dev->caps.num_uars,
1175 PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
1179 struct mlx4_clock_params params;
1182 ret = mlx4_get_internal_clock_params(dev->dev, ¶ms);
1186 return rdma_user_mmap_io(
1188 (pci_resource_start(dev->dev->persist->pdev,
1192 PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
1201 static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
1203 struct mlx4_ib_pd *pd = to_mpd(ibpd);
1204 struct ib_device *ibdev = ibpd->device;
1207 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1211 if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1212 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1218 static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
1220 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1224 static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
1226 struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
1227 struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
1228 struct ib_cq_init_attr cq_attr = {};
1231 if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1234 err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
1238 xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
1239 if (IS_ERR(xrcd->pd)) {
1240 err = PTR_ERR(xrcd->pd);
1245 xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
1246 if (IS_ERR(xrcd->cq)) {
1247 err = PTR_ERR(xrcd->cq);
1254 ib_dealloc_pd(xrcd->pd);
1256 mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
1260 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
1262 ib_destroy_cq(to_mxrcd(xrcd)->cq);
1263 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1264 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1268 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1270 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1271 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1272 struct mlx4_ib_gid_entry *ge;
1274 ge = kzalloc(sizeof *ge, GFP_KERNEL);
1279 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1280 ge->port = mqp->port;
1284 mutex_lock(&mqp->mutex);
1285 list_add_tail(&ge->list, &mqp->gid_list);
1286 mutex_unlock(&mqp->mutex);
1291 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1292 struct mlx4_ib_counters *ctr_table)
1294 struct counter_index *counter, *tmp_count;
1296 mutex_lock(&ctr_table->mutex);
1297 list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1299 if (counter->allocated)
1300 mlx4_counter_free(ibdev->dev, counter->index);
1301 list_del(&counter->list);
1304 mutex_unlock(&ctr_table->mutex);
1307 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1310 struct net_device *ndev;
1316 spin_lock_bh(&mdev->iboe.lock);
1317 ndev = mdev->iboe.netdevs[mqp->port - 1];
1320 spin_unlock_bh(&mdev->iboe.lock);
1330 struct mlx4_ib_steering {
1331 struct list_head list;
1332 struct mlx4_flow_reg_id reg_id;
1336 #define LAST_ETH_FIELD vlan_tag
1337 #define LAST_IB_FIELD sl
1338 #define LAST_IPV4_FIELD dst_ip
1339 #define LAST_TCP_UDP_FIELD src_port
1341 /* Field is the last supported field */
1342 #define FIELDS_NOT_SUPPORTED(filter, field)\
1343 memchr_inv((void *)&filter.field +\
1344 sizeof(filter.field), 0,\
1346 offsetof(typeof(filter), field) -\
1347 sizeof(filter.field))
1349 static int parse_flow_attr(struct mlx4_dev *dev,
1351 union ib_flow_spec *ib_spec,
1352 struct _rule_hw *mlx4_spec)
1354 enum mlx4_net_trans_rule_id type;
1356 switch (ib_spec->type) {
1357 case IB_FLOW_SPEC_ETH:
1358 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1361 type = MLX4_NET_TRANS_RULE_ID_ETH;
1362 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1364 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1366 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1367 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1369 case IB_FLOW_SPEC_IB:
1370 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1373 type = MLX4_NET_TRANS_RULE_ID_IB;
1374 mlx4_spec->ib.l3_qpn =
1375 cpu_to_be32(qp_num);
1376 mlx4_spec->ib.qpn_mask =
1377 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1381 case IB_FLOW_SPEC_IPV4:
1382 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1385 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1386 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1387 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1388 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1389 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1392 case IB_FLOW_SPEC_TCP:
1393 case IB_FLOW_SPEC_UDP:
1394 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1397 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1398 MLX4_NET_TRANS_RULE_ID_TCP :
1399 MLX4_NET_TRANS_RULE_ID_UDP;
1400 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1401 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1402 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1403 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1409 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1410 mlx4_hw_rule_sz(dev, type) < 0)
1412 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1413 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1414 return mlx4_hw_rule_sz(dev, type);
1417 struct default_rules {
1418 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1419 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1420 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1423 static const struct default_rules default_table[] = {
1425 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1426 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1427 .rules_create_list = {IB_FLOW_SPEC_IB},
1428 .link_layer = IB_LINK_LAYER_INFINIBAND
1432 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1433 struct ib_flow_attr *flow_attr)
1437 const struct default_rules *pdefault_rules = default_table;
1438 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1440 for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1441 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1442 memset(&field_types, 0, sizeof(field_types));
1444 if (link_layer != pdefault_rules->link_layer)
1447 ib_flow = flow_attr + 1;
1448 /* we assume the specs are sorted */
1449 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1450 j < flow_attr->num_of_specs; k++) {
1451 union ib_flow_spec *current_flow =
1452 (union ib_flow_spec *)ib_flow;
1454 /* same layer but different type */
1455 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1456 (pdefault_rules->mandatory_fields[k] &
1457 IB_FLOW_SPEC_LAYER_MASK)) &&
1458 (current_flow->type !=
1459 pdefault_rules->mandatory_fields[k]))
1462 /* same layer, try match next one */
1463 if (current_flow->type ==
1464 pdefault_rules->mandatory_fields[k]) {
1467 ((union ib_flow_spec *)ib_flow)->size;
1471 ib_flow = flow_attr + 1;
1472 for (j = 0; j < flow_attr->num_of_specs;
1473 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1474 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1475 /* same layer and same type */
1476 if (((union ib_flow_spec *)ib_flow)->type ==
1477 pdefault_rules->mandatory_not_fields[k])
1486 static int __mlx4_ib_create_default_rules(
1487 struct mlx4_ib_dev *mdev,
1489 const struct default_rules *pdefault_rules,
1490 struct _rule_hw *mlx4_spec) {
1494 for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1495 union ib_flow_spec ib_spec = {};
1498 switch (pdefault_rules->rules_create_list[i]) {
1502 case IB_FLOW_SPEC_IB:
1503 ib_spec.type = IB_FLOW_SPEC_IB;
1504 ib_spec.size = sizeof(struct ib_flow_spec_ib);
1511 /* We must put empty rule, qpn is being ignored */
1512 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1515 pr_info("invalid parsing\n");
1519 mlx4_spec = (void *)mlx4_spec + ret;
1525 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1527 enum mlx4_net_trans_promisc_mode flow_type,
1533 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1534 struct mlx4_cmd_mailbox *mailbox;
1535 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1538 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1539 pr_err("Invalid priority value %d\n", flow_attr->priority);
1543 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1546 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1547 if (IS_ERR(mailbox))
1548 return PTR_ERR(mailbox);
1549 ctrl = mailbox->buf;
1551 ctrl->prio = cpu_to_be16(domain | flow_attr->priority);
1552 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1553 ctrl->port = flow_attr->port;
1554 ctrl->qpn = cpu_to_be32(qp->qp_num);
1556 ib_flow = flow_attr + 1;
1557 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1558 /* Add default flows */
1559 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1560 if (default_flow >= 0) {
1561 ret = __mlx4_ib_create_default_rules(
1562 mdev, qp, default_table + default_flow,
1563 mailbox->buf + size);
1565 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1570 for (i = 0; i < flow_attr->num_of_specs; i++) {
1571 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1572 mailbox->buf + size);
1574 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1577 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1581 if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1582 flow_attr->num_of_specs == 1) {
1583 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1584 enum ib_flow_spec_type header_spec =
1585 ((union ib_flow_spec *)(flow_attr + 1))->type;
1587 if (header_spec == IB_FLOW_SPEC_ETH)
1588 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1591 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1592 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1595 pr_err("mcg table is full. Fail to register network rule.\n");
1596 else if (ret == -ENXIO)
1597 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1599 pr_err("Invalid argument. Fail to register network rule.\n");
1601 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1605 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1608 err = mlx4_cmd(dev, reg_id, 0, 0,
1609 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1612 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1617 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1621 union ib_flow_spec *ib_spec;
1622 struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1625 if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1626 dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1627 return 0; /* do nothing */
1629 ib_flow = flow_attr + 1;
1630 ib_spec = (union ib_flow_spec *)ib_flow;
1632 if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1633 return 0; /* do nothing */
1635 err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1636 flow_attr->port, qp->qp_num,
1637 MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1642 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1643 struct ib_flow_attr *flow_attr,
1644 enum mlx4_net_trans_promisc_mode *type)
1648 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1649 (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1650 (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1654 if (flow_attr->num_of_specs == 0) {
1655 type[0] = MLX4_FS_MC_SNIFFER;
1656 type[1] = MLX4_FS_UC_SNIFFER;
1658 union ib_flow_spec *ib_spec;
1660 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1661 if (ib_spec->type != IB_FLOW_SPEC_ETH)
1664 /* if all is zero than MC and UC */
1665 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1666 type[0] = MLX4_FS_MC_SNIFFER;
1667 type[1] = MLX4_FS_UC_SNIFFER;
1669 u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1670 ib_spec->eth.mask.dst_mac[1],
1671 ib_spec->eth.mask.dst_mac[2],
1672 ib_spec->eth.mask.dst_mac[3],
1673 ib_spec->eth.mask.dst_mac[4],
1674 ib_spec->eth.mask.dst_mac[5]};
1676 /* Above xor was only on MC bit, non empty mask is valid
1677 * only if this bit is set and rest are zero.
1679 if (!is_zero_ether_addr(&mac[0]))
1682 if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1683 type[0] = MLX4_FS_MC_SNIFFER;
1685 type[0] = MLX4_FS_UC_SNIFFER;
1692 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1693 struct ib_flow_attr *flow_attr,
1694 struct ib_udata *udata)
1696 int err = 0, i = 0, j = 0;
1697 struct mlx4_ib_flow *mflow;
1698 enum mlx4_net_trans_promisc_mode type[2];
1699 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1700 int is_bonded = mlx4_is_bonded(dev);
1702 if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1703 return ERR_PTR(-EOPNOTSUPP);
1705 if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1706 (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1707 return ERR_PTR(-EOPNOTSUPP);
1710 udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1711 return ERR_PTR(-EOPNOTSUPP);
1713 memset(type, 0, sizeof(type));
1715 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1721 switch (flow_attr->type) {
1722 case IB_FLOW_ATTR_NORMAL:
1723 /* If dont trap flag (continue match) is set, under specific
1724 * condition traffic be replicated to given qp,
1725 * without stealing it
1727 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1728 err = mlx4_ib_add_dont_trap_rule(dev,
1734 type[0] = MLX4_FS_REGULAR;
1738 case IB_FLOW_ATTR_ALL_DEFAULT:
1739 type[0] = MLX4_FS_ALL_DEFAULT;
1742 case IB_FLOW_ATTR_MC_DEFAULT:
1743 type[0] = MLX4_FS_MC_DEFAULT;
1746 case IB_FLOW_ATTR_SNIFFER:
1747 type[0] = MLX4_FS_MIRROR_RX_PORT;
1748 type[1] = MLX4_FS_MIRROR_SX_PORT;
1756 while (i < ARRAY_SIZE(type) && type[i]) {
1757 err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS,
1758 type[i], &mflow->reg_id[i].id);
1760 goto err_create_flow;
1762 /* Application always sees one port so the mirror rule
1763 * must be on port #2
1765 flow_attr->port = 2;
1766 err = __mlx4_ib_create_flow(qp, flow_attr,
1767 MLX4_DOMAIN_UVERBS, type[j],
1768 &mflow->reg_id[j].mirror);
1769 flow_attr->port = 1;
1771 goto err_create_flow;
1778 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1779 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1780 &mflow->reg_id[i].id);
1782 goto err_create_flow;
1785 flow_attr->port = 2;
1786 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1787 &mflow->reg_id[j].mirror);
1788 flow_attr->port = 1;
1790 goto err_create_flow;
1793 /* function to create mirror rule */
1797 return &mflow->ibflow;
1801 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1802 mflow->reg_id[i].id);
1807 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1808 mflow->reg_id[j].mirror);
1813 return ERR_PTR(err);
1816 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1820 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1821 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1823 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1824 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1827 if (mflow->reg_id[i].mirror) {
1828 err = __mlx4_ib_destroy_flow(mdev->dev,
1829 mflow->reg_id[i].mirror);
1840 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1843 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1844 struct mlx4_dev *dev = mdev->dev;
1845 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1846 struct mlx4_ib_steering *ib_steering = NULL;
1847 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1848 struct mlx4_flow_reg_id reg_id;
1850 if (mdev->dev->caps.steering_mode ==
1851 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1852 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1857 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1859 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1862 pr_err("multicast attach op failed, err %d\n", err);
1867 if (mlx4_is_bonded(dev)) {
1868 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1869 (mqp->port == 1) ? 2 : 1,
1871 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1872 prot, ®_id.mirror);
1877 err = add_gid_entry(ibqp, gid);
1882 memcpy(ib_steering->gid.raw, gid->raw, 16);
1883 ib_steering->reg_id = reg_id;
1884 mutex_lock(&mqp->mutex);
1885 list_add(&ib_steering->list, &mqp->steering_rules);
1886 mutex_unlock(&mqp->mutex);
1891 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1894 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1895 prot, reg_id.mirror);
1902 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1904 struct mlx4_ib_gid_entry *ge;
1905 struct mlx4_ib_gid_entry *tmp;
1906 struct mlx4_ib_gid_entry *ret = NULL;
1908 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1909 if (!memcmp(raw, ge->gid.raw, 16)) {
1918 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1921 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1922 struct mlx4_dev *dev = mdev->dev;
1923 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1924 struct net_device *ndev;
1925 struct mlx4_ib_gid_entry *ge;
1926 struct mlx4_flow_reg_id reg_id = {0, 0};
1927 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1929 if (mdev->dev->caps.steering_mode ==
1930 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1931 struct mlx4_ib_steering *ib_steering;
1933 mutex_lock(&mqp->mutex);
1934 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1935 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1936 list_del(&ib_steering->list);
1940 mutex_unlock(&mqp->mutex);
1941 if (&ib_steering->list == &mqp->steering_rules) {
1942 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1945 reg_id = ib_steering->reg_id;
1949 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1954 if (mlx4_is_bonded(dev)) {
1955 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1956 prot, reg_id.mirror);
1961 mutex_lock(&mqp->mutex);
1962 ge = find_gid_entry(mqp, gid->raw);
1964 spin_lock_bh(&mdev->iboe.lock);
1965 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1968 spin_unlock_bh(&mdev->iboe.lock);
1971 list_del(&ge->list);
1974 pr_warn("could not find mgid entry\n");
1976 mutex_unlock(&mqp->mutex);
1981 static int init_node_data(struct mlx4_ib_dev *dev)
1983 struct ib_smp *in_mad = NULL;
1984 struct ib_smp *out_mad = NULL;
1985 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1988 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1989 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1990 if (!in_mad || !out_mad)
1993 init_query_mad(in_mad);
1994 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1995 if (mlx4_is_master(dev->dev))
1996 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1998 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2002 memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
2004 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2006 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2010 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2011 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2019 static ssize_t hca_type_show(struct device *device,
2020 struct device_attribute *attr, char *buf)
2022 struct mlx4_ib_dev *dev =
2023 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2025 return sysfs_emit(buf, "MT%d\n", dev->dev->persist->pdev->device);
2027 static DEVICE_ATTR_RO(hca_type);
2029 static ssize_t hw_rev_show(struct device *device,
2030 struct device_attribute *attr, char *buf)
2032 struct mlx4_ib_dev *dev =
2033 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2035 return sysfs_emit(buf, "%x\n", dev->dev->rev_id);
2037 static DEVICE_ATTR_RO(hw_rev);
2039 static ssize_t board_id_show(struct device *device,
2040 struct device_attribute *attr, char *buf)
2042 struct mlx4_ib_dev *dev =
2043 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2045 return sysfs_emit(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id);
2047 static DEVICE_ATTR_RO(board_id);
2049 static struct attribute *mlx4_class_attributes[] = {
2050 &dev_attr_hw_rev.attr,
2051 &dev_attr_hca_type.attr,
2052 &dev_attr_board_id.attr,
2056 static const struct attribute_group mlx4_attr_group = {
2057 .attrs = mlx4_class_attributes,
2060 struct diag_counter {
2065 #define DIAG_COUNTER(_name, _offset) \
2066 { .name = #_name, .offset = _offset }
2068 static const struct diag_counter diag_basic[] = {
2069 DIAG_COUNTER(rq_num_lle, 0x00),
2070 DIAG_COUNTER(sq_num_lle, 0x04),
2071 DIAG_COUNTER(rq_num_lqpoe, 0x08),
2072 DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2073 DIAG_COUNTER(rq_num_lpe, 0x18),
2074 DIAG_COUNTER(sq_num_lpe, 0x1C),
2075 DIAG_COUNTER(rq_num_wrfe, 0x20),
2076 DIAG_COUNTER(sq_num_wrfe, 0x24),
2077 DIAG_COUNTER(sq_num_mwbe, 0x2C),
2078 DIAG_COUNTER(sq_num_bre, 0x34),
2079 DIAG_COUNTER(sq_num_rire, 0x44),
2080 DIAG_COUNTER(rq_num_rire, 0x48),
2081 DIAG_COUNTER(sq_num_rae, 0x4C),
2082 DIAG_COUNTER(rq_num_rae, 0x50),
2083 DIAG_COUNTER(sq_num_roe, 0x54),
2084 DIAG_COUNTER(sq_num_tree, 0x5C),
2085 DIAG_COUNTER(sq_num_rree, 0x64),
2086 DIAG_COUNTER(rq_num_rnr, 0x68),
2087 DIAG_COUNTER(sq_num_rnr, 0x6C),
2088 DIAG_COUNTER(rq_num_oos, 0x100),
2089 DIAG_COUNTER(sq_num_oos, 0x104),
2092 static const struct diag_counter diag_ext[] = {
2093 DIAG_COUNTER(rq_num_dup, 0x130),
2094 DIAG_COUNTER(sq_num_to, 0x134),
2097 static const struct diag_counter diag_device_only[] = {
2098 DIAG_COUNTER(num_cqovf, 0x1A0),
2099 DIAG_COUNTER(rq_num_udsdprd, 0x118),
2102 static struct rdma_hw_stats *
2103 mlx4_ib_alloc_hw_device_stats(struct ib_device *ibdev)
2105 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2106 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2111 return rdma_alloc_hw_stats_struct(diag[0].name, diag[0].num_counters,
2112 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2115 static struct rdma_hw_stats *
2116 mlx4_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
2118 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2119 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2124 return rdma_alloc_hw_stats_struct(diag[1].name, diag[1].num_counters,
2125 RDMA_HW_STATS_DEFAULT_LIFESPAN);
2128 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2129 struct rdma_hw_stats *stats,
2130 u32 port, int index)
2132 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2133 struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2134 u32 hw_value[ARRAY_SIZE(diag_device_only) +
2135 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2139 ret = mlx4_query_diag_counters(dev->dev,
2140 MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2141 diag[!!port].offset, hw_value,
2142 diag[!!port].num_counters, port);
2147 for (i = 0; i < diag[!!port].num_counters; i++)
2148 stats->value[i] = hw_value[i];
2150 return diag[!!port].num_counters;
2153 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2161 num_counters = ARRAY_SIZE(diag_basic);
2163 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2164 num_counters += ARRAY_SIZE(diag_ext);
2167 num_counters += ARRAY_SIZE(diag_device_only);
2169 *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2173 *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2177 *num = num_counters;
2186 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2194 for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2195 name[i] = diag_basic[i].name;
2196 offset[i] = diag_basic[i].offset;
2199 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2200 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2201 name[j] = diag_ext[i].name;
2202 offset[j] = diag_ext[i].offset;
2207 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2208 name[j] = diag_device_only[i].name;
2209 offset[j] = diag_device_only[i].offset;
2214 static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2215 .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats,
2216 .alloc_hw_port_stats = mlx4_ib_alloc_hw_port_stats,
2217 .get_hw_stats = mlx4_ib_get_hw_stats,
2220 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2222 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2225 bool per_port = !!(ibdev->dev->caps.flags2 &
2226 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2228 if (mlx4_is_slave(ibdev->dev))
2231 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2232 /* i == 1 means we are building port counters */
2236 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2238 &diag[i].num_counters, i);
2242 mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2246 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
2252 kfree(diag[i - 1].name);
2253 kfree(diag[i - 1].offset);
2259 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2263 for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2264 kfree(ibdev->diag_counters[i].offset);
2265 kfree(ibdev->diag_counters[i].name);
2269 #define MLX4_IB_INVALID_MAC ((u64)-1)
2270 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2271 struct net_device *dev,
2275 u64 release_mac = MLX4_IB_INVALID_MAC;
2276 struct mlx4_ib_qp *qp;
2278 new_smac = mlx4_mac_to_u64(dev->dev_addr);
2279 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2281 /* no need for update QP1 and mac registration in non-SRIOV */
2282 if (!mlx4_is_mfunc(ibdev->dev))
2285 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2286 qp = ibdev->qp1_proxy[port - 1];
2290 struct mlx4_update_qp_params update_params;
2292 mutex_lock(&qp->mutex);
2293 old_smac = qp->pri.smac;
2294 if (new_smac == old_smac)
2297 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2299 if (new_smac_index < 0)
2302 update_params.smac_index = new_smac_index;
2303 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2305 release_mac = new_smac;
2308 /* if old port was zero, no mac was yet registered for this QP */
2309 if (qp->pri.smac_port)
2310 release_mac = old_smac;
2311 qp->pri.smac = new_smac;
2312 qp->pri.smac_port = port;
2313 qp->pri.smac_index = new_smac_index;
2317 if (release_mac != MLX4_IB_INVALID_MAC)
2318 mlx4_unregister_mac(ibdev->dev, port, release_mac);
2320 mutex_unlock(&qp->mutex);
2321 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2324 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2325 struct net_device *dev,
2326 unsigned long event)
2329 struct mlx4_ib_iboe *iboe;
2330 int update_qps_port = -1;
2335 iboe = &ibdev->iboe;
2337 spin_lock_bh(&iboe->lock);
2338 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2340 iboe->netdevs[port - 1] =
2341 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2343 if (dev == iboe->netdevs[port - 1] &&
2344 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2345 event == NETDEV_UP || event == NETDEV_CHANGE))
2346 update_qps_port = port;
2348 if (dev == iboe->netdevs[port - 1] &&
2349 (event == NETDEV_UP || event == NETDEV_DOWN)) {
2350 enum ib_port_state port_state;
2351 struct ib_event ibev = { };
2353 if (ib_get_cached_port_state(&ibdev->ib_dev, port,
2357 if (event == NETDEV_UP &&
2358 (port_state != IB_PORT_ACTIVE ||
2359 iboe->last_port_state[port - 1] != IB_PORT_DOWN))
2361 if (event == NETDEV_DOWN &&
2362 (port_state != IB_PORT_DOWN ||
2363 iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
2365 iboe->last_port_state[port - 1] = port_state;
2367 ibev.device = &ibdev->ib_dev;
2368 ibev.element.port_num = port;
2369 ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
2371 ib_dispatch_event(&ibev);
2375 spin_unlock_bh(&iboe->lock);
2377 if (update_qps_port > 0)
2378 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2381 static int mlx4_ib_netdev_event(struct notifier_block *this,
2382 unsigned long event, void *ptr)
2384 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2385 struct mlx4_ib_dev *ibdev;
2387 if (!net_eq(dev_net(dev), &init_net))
2390 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2391 mlx4_ib_scan_netdevs(ibdev, dev, event);
2396 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2402 if (mlx4_is_master(ibdev->dev)) {
2403 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2405 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2407 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2409 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2410 /* master has the identity virt2phys pkey mapping */
2411 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2412 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2413 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2414 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2418 /* initialize pkey cache */
2419 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2421 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2423 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2429 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2431 int i, j, eq = 0, total_eqs = 0;
2433 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2434 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2435 if (!ibdev->eq_table)
2438 for (i = 1; i <= dev->caps.num_ports; i++) {
2439 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2441 if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
2443 ibdev->eq_table[eq] = total_eqs;
2444 if (!mlx4_assign_eq(dev, i,
2445 &ibdev->eq_table[eq]))
2448 ibdev->eq_table[eq] = -1;
2452 for (i = eq; i < dev->caps.num_comp_vectors;
2453 ibdev->eq_table[i++] = -1)
2456 /* Advertise the new number of EQs to clients */
2457 ibdev->ib_dev.num_comp_vectors = eq;
2460 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2463 int total_eqs = ibdev->ib_dev.num_comp_vectors;
2465 /* no eqs were allocated */
2466 if (!ibdev->eq_table)
2469 /* Reset the advertised EQ number */
2470 ibdev->ib_dev.num_comp_vectors = 0;
2472 for (i = 0; i < total_eqs; i++)
2473 mlx4_release_eq(dev, ibdev->eq_table[i]);
2475 kfree(ibdev->eq_table);
2476 ibdev->eq_table = NULL;
2479 static int mlx4_port_immutable(struct ib_device *ibdev, u32 port_num,
2480 struct ib_port_immutable *immutable)
2482 struct ib_port_attr attr;
2483 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2486 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2487 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2488 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2490 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2491 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2492 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2493 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2494 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2495 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2496 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2497 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2498 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2501 err = ib_query_port(ibdev, port_num, &attr);
2505 immutable->pkey_tbl_len = attr.pkey_tbl_len;
2506 immutable->gid_tbl_len = attr.gid_tbl_len;
2511 static void get_fw_ver_str(struct ib_device *device, char *str)
2513 struct mlx4_ib_dev *dev =
2514 container_of(device, struct mlx4_ib_dev, ib_dev);
2515 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
2516 (int) (dev->dev->caps.fw_ver >> 32),
2517 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2518 (int) dev->dev->caps.fw_ver & 0xffff);
2521 static const struct ib_device_ops mlx4_ib_dev_ops = {
2522 .owner = THIS_MODULE,
2523 .driver_id = RDMA_DRIVER_MLX4,
2524 .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
2526 .add_gid = mlx4_ib_add_gid,
2527 .alloc_mr = mlx4_ib_alloc_mr,
2528 .alloc_pd = mlx4_ib_alloc_pd,
2529 .alloc_ucontext = mlx4_ib_alloc_ucontext,
2530 .attach_mcast = mlx4_ib_mcg_attach,
2531 .create_ah = mlx4_ib_create_ah,
2532 .create_cq = mlx4_ib_create_cq,
2533 .create_qp = mlx4_ib_create_qp,
2534 .create_srq = mlx4_ib_create_srq,
2535 .dealloc_pd = mlx4_ib_dealloc_pd,
2536 .dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2537 .del_gid = mlx4_ib_del_gid,
2538 .dereg_mr = mlx4_ib_dereg_mr,
2539 .destroy_ah = mlx4_ib_destroy_ah,
2540 .destroy_cq = mlx4_ib_destroy_cq,
2541 .destroy_qp = mlx4_ib_destroy_qp,
2542 .destroy_srq = mlx4_ib_destroy_srq,
2543 .detach_mcast = mlx4_ib_mcg_detach,
2544 .device_group = &mlx4_attr_group,
2545 .disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2546 .drain_rq = mlx4_ib_drain_rq,
2547 .drain_sq = mlx4_ib_drain_sq,
2548 .get_dev_fw_str = get_fw_ver_str,
2549 .get_dma_mr = mlx4_ib_get_dma_mr,
2550 .get_link_layer = mlx4_ib_port_link_layer,
2551 .get_netdev = mlx4_ib_get_netdev,
2552 .get_port_immutable = mlx4_port_immutable,
2553 .map_mr_sg = mlx4_ib_map_mr_sg,
2554 .mmap = mlx4_ib_mmap,
2555 .modify_cq = mlx4_ib_modify_cq,
2556 .modify_device = mlx4_ib_modify_device,
2557 .modify_port = mlx4_ib_modify_port,
2558 .modify_qp = mlx4_ib_modify_qp,
2559 .modify_srq = mlx4_ib_modify_srq,
2560 .poll_cq = mlx4_ib_poll_cq,
2561 .post_recv = mlx4_ib_post_recv,
2562 .post_send = mlx4_ib_post_send,
2563 .post_srq_recv = mlx4_ib_post_srq_recv,
2564 .process_mad = mlx4_ib_process_mad,
2565 .query_ah = mlx4_ib_query_ah,
2566 .query_device = mlx4_ib_query_device,
2567 .query_gid = mlx4_ib_query_gid,
2568 .query_pkey = mlx4_ib_query_pkey,
2569 .query_port = mlx4_ib_query_port,
2570 .query_qp = mlx4_ib_query_qp,
2571 .query_srq = mlx4_ib_query_srq,
2572 .reg_user_mr = mlx4_ib_reg_user_mr,
2573 .req_notify_cq = mlx4_ib_arm_cq,
2574 .rereg_user_mr = mlx4_ib_rereg_user_mr,
2575 .resize_cq = mlx4_ib_resize_cq,
2577 INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
2578 INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
2579 INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
2580 INIT_RDMA_OBJ_SIZE(ib_qp, mlx4_ib_qp, ibqp),
2581 INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
2582 INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
2585 static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2586 .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2587 .create_wq = mlx4_ib_create_wq,
2588 .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2589 .destroy_wq = mlx4_ib_destroy_wq,
2590 .modify_wq = mlx4_ib_modify_wq,
2592 INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
2596 static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2597 .alloc_mw = mlx4_ib_alloc_mw,
2598 .dealloc_mw = mlx4_ib_dealloc_mw,
2600 INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
2603 static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2604 .alloc_xrcd = mlx4_ib_alloc_xrcd,
2605 .dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2607 INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
2610 static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2611 .create_flow = mlx4_ib_create_flow,
2612 .destroy_flow = mlx4_ib_destroy_flow,
2615 static void *mlx4_ib_add(struct mlx4_dev *dev)
2617 struct mlx4_ib_dev *ibdev;
2621 struct mlx4_ib_iboe *iboe;
2622 int ib_num_ports = 0;
2623 int num_req_counters;
2626 struct counter_index *new_counter_index = NULL;
2628 pr_info_once("%s", mlx4_ib_version);
2631 mlx4_foreach_ib_transport_port(i, dev)
2634 /* No point in registering a device with no ports... */
2638 ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
2640 dev_err(&dev->persist->pdev->dev,
2641 "Device struct alloc failed\n");
2645 iboe = &ibdev->iboe;
2647 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2650 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2653 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2655 if (!ibdev->uar_map)
2657 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2660 ibdev->bond_next_port = 0;
2662 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2663 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2664 ibdev->num_ports = num_ports;
2665 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2666 1 : ibdev->num_ports;
2667 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2668 ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev;
2670 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
2672 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2673 ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2674 IB_LINK_LAYER_ETHERNET) ||
2675 (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2676 IB_LINK_LAYER_ETHERNET)))
2677 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
2679 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2680 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2681 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
2683 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2684 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
2687 if (check_flow_steering_support(dev)) {
2688 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2689 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
2692 if (!dev->caps.userspace_caps)
2693 ibdev->ib_dev.ops.uverbs_abi_ver =
2694 MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2696 mlx4_ib_alloc_eqs(dev, ibdev);
2698 spin_lock_init(&iboe->lock);
2700 if (init_node_data(ibdev))
2702 mlx4_init_sl2vl_tbl(ibdev);
2704 for (i = 0; i < ibdev->num_ports; ++i) {
2705 mutex_init(&ibdev->counters_table[i].mutex);
2706 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2707 iboe->last_port_state[i] = IB_PORT_DOWN;
2710 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2711 for (i = 0; i < num_req_counters; ++i) {
2712 mutex_init(&ibdev->qp1_proxy_lock[i]);
2714 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2715 IB_LINK_LAYER_ETHERNET) {
2716 err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2717 MLX4_RES_USAGE_DRIVER);
2718 /* if failed to allocate a new counter, use default */
2721 mlx4_get_default_counter_index(dev,
2725 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2726 counter_index = mlx4_get_default_counter_index(dev,
2729 new_counter_index = kmalloc(sizeof(*new_counter_index),
2731 if (!new_counter_index) {
2733 mlx4_counter_free(ibdev->dev, counter_index);
2736 new_counter_index->index = counter_index;
2737 new_counter_index->allocated = allocated;
2738 list_add_tail(&new_counter_index->list,
2739 &ibdev->counters_table[i].counters_list);
2740 ibdev->counters_table[i].default_counter = counter_index;
2741 pr_info("counter index %d for port %d allocated %d\n",
2742 counter_index, i + 1, allocated);
2744 if (mlx4_is_bonded(dev))
2745 for (i = 1; i < ibdev->num_ports ; ++i) {
2747 kmalloc(sizeof(struct counter_index),
2749 if (!new_counter_index)
2751 new_counter_index->index = counter_index;
2752 new_counter_index->allocated = 0;
2753 list_add_tail(&new_counter_index->list,
2754 &ibdev->counters_table[i].counters_list);
2755 ibdev->counters_table[i].default_counter =
2759 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2762 spin_lock_init(&ibdev->sm_lock);
2763 mutex_init(&ibdev->cap_mask_mutex);
2764 INIT_LIST_HEAD(&ibdev->qp_list);
2765 spin_lock_init(&ibdev->reset_flow_resource_lock);
2767 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2769 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2770 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2771 MLX4_IB_UC_STEER_QPN_ALIGN,
2772 &ibdev->steer_qpn_base, 0,
2773 MLX4_RES_USAGE_DRIVER);
2777 ibdev->ib_uc_qpns_bitmap =
2778 kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
2781 if (!ibdev->ib_uc_qpns_bitmap)
2782 goto err_steer_qp_release;
2784 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2785 bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2786 ibdev->steer_qpn_count);
2787 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2788 dev, ibdev->steer_qpn_base,
2789 ibdev->steer_qpn_base +
2790 ibdev->steer_qpn_count - 1);
2792 goto err_steer_free_bitmap;
2794 bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2795 ibdev->steer_qpn_count);
2799 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2800 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2802 if (mlx4_ib_alloc_diag_counters(ibdev))
2803 goto err_steer_free_bitmap;
2805 if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
2806 &dev->persist->pdev->dev))
2807 goto err_diag_counters;
2809 if (mlx4_ib_mad_init(ibdev))
2812 if (mlx4_ib_init_sriov(ibdev))
2815 if (!iboe->nb.notifier_call) {
2816 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2817 err = register_netdevice_notifier(&iboe->nb);
2819 iboe->nb.notifier_call = NULL;
2823 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2824 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2829 ibdev->ib_active = true;
2830 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2831 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2834 if (mlx4_is_mfunc(ibdev->dev))
2837 /* create paravirt contexts for any VFs which are active */
2838 if (mlx4_is_master(ibdev->dev)) {
2839 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2840 if (j == mlx4_master_func_num(ibdev->dev))
2842 if (mlx4_is_slave_active(ibdev->dev, j))
2843 do_slave_init(ibdev, j, 1);
2849 if (ibdev->iboe.nb.notifier_call) {
2850 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2851 pr_warn("failure unregistering notifier\n");
2852 ibdev->iboe.nb.notifier_call = NULL;
2854 flush_workqueue(wq);
2856 mlx4_ib_close_sriov(ibdev);
2859 mlx4_ib_mad_cleanup(ibdev);
2862 ib_unregister_device(&ibdev->ib_dev);
2865 mlx4_ib_diag_cleanup(ibdev);
2867 err_steer_free_bitmap:
2868 kfree(ibdev->ib_uc_qpns_bitmap);
2870 err_steer_qp_release:
2871 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2872 ibdev->steer_qpn_count);
2874 for (i = 0; i < ibdev->num_ports; ++i)
2875 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2878 mlx4_ib_free_eqs(dev, ibdev);
2879 iounmap(ibdev->uar_map);
2882 mlx4_uar_free(dev, &ibdev->priv_uar);
2885 mlx4_pd_free(dev, ibdev->priv_pdn);
2888 ib_dealloc_device(&ibdev->ib_dev);
2893 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2897 WARN_ON(!dev->ib_uc_qpns_bitmap);
2899 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2900 dev->steer_qpn_count,
2901 get_count_order(count));
2905 *qpn = dev->steer_qpn_base + offset;
2909 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2912 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2915 if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
2916 qpn, dev->steer_qpn_base))
2917 /* not supposed to be here */
2920 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2921 qpn - dev->steer_qpn_base,
2922 get_count_order(count));
2925 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2930 struct ib_flow_attr *flow = NULL;
2931 struct ib_flow_spec_ib *ib_spec;
2934 flow_size = sizeof(struct ib_flow_attr) +
2935 sizeof(struct ib_flow_spec_ib);
2936 flow = kzalloc(flow_size, GFP_KERNEL);
2939 flow->port = mqp->port;
2940 flow->num_of_specs = 1;
2941 flow->size = flow_size;
2942 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2943 ib_spec->type = IB_FLOW_SPEC_IB;
2944 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2945 /* Add an empty rule for IB L2 */
2946 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2948 err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
2949 MLX4_FS_REGULAR, &mqp->reg_id);
2951 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2957 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2959 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2963 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2964 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
2965 ibdev->ib_active = false;
2966 flush_workqueue(wq);
2968 if (ibdev->iboe.nb.notifier_call) {
2969 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2970 pr_warn("failure unregistering notifier\n");
2971 ibdev->iboe.nb.notifier_call = NULL;
2974 mlx4_ib_close_sriov(ibdev);
2975 mlx4_ib_mad_cleanup(ibdev);
2976 ib_unregister_device(&ibdev->ib_dev);
2977 mlx4_ib_diag_cleanup(ibdev);
2979 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2980 ibdev->steer_qpn_count);
2981 kfree(ibdev->ib_uc_qpns_bitmap);
2983 iounmap(ibdev->uar_map);
2984 for (p = 0; p < ibdev->num_ports; ++p)
2985 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
2987 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2988 mlx4_CLOSE_PORT(dev, p);
2990 mlx4_ib_free_eqs(dev, ibdev);
2992 mlx4_uar_free(dev, &ibdev->priv_uar);
2993 mlx4_pd_free(dev, ibdev->priv_pdn);
2994 ib_dealloc_device(&ibdev->ib_dev);
2997 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2999 struct mlx4_ib_demux_work **dm = NULL;
3000 struct mlx4_dev *dev = ibdev->dev;
3002 unsigned long flags;
3003 struct mlx4_active_ports actv_ports;
3005 unsigned int first_port;
3007 if (!mlx4_is_master(dev))
3010 actv_ports = mlx4_get_active_ports(dev, slave);
3011 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3012 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3014 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
3018 for (i = 0; i < ports; i++) {
3019 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3025 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
3026 dm[i]->port = first_port + i + 1;
3027 dm[i]->slave = slave;
3028 dm[i]->do_init = do_init;
3031 /* initialize or tear down tunnel QPs for the slave */
3032 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3033 if (!ibdev->sriov.is_going_down) {
3034 for (i = 0; i < ports; i++)
3035 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3036 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3038 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3039 for (i = 0; i < ports; i++)
3047 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3049 struct mlx4_ib_qp *mqp;
3050 unsigned long flags_qp;
3051 unsigned long flags_cq;
3052 struct mlx4_ib_cq *send_mcq, *recv_mcq;
3053 struct list_head cq_notify_list;
3054 struct mlx4_cq *mcq;
3055 unsigned long flags;
3057 pr_warn("mlx4_ib_handle_catas_error was started\n");
3058 INIT_LIST_HEAD(&cq_notify_list);
3060 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3061 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3063 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3064 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3065 if (mqp->sq.tail != mqp->sq.head) {
3066 send_mcq = to_mcq(mqp->ibqp.send_cq);
3067 spin_lock_irqsave(&send_mcq->lock, flags_cq);
3068 if (send_mcq->mcq.comp &&
3069 mqp->ibqp.send_cq->comp_handler) {
3070 if (!send_mcq->mcq.reset_notify_added) {
3071 send_mcq->mcq.reset_notify_added = 1;
3072 list_add_tail(&send_mcq->mcq.reset_notify,
3076 spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3078 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3079 /* Now, handle the QP's receive queue */
3080 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3081 /* no handling is needed for SRQ */
3082 if (!mqp->ibqp.srq) {
3083 if (mqp->rq.tail != mqp->rq.head) {
3084 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3085 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3086 if (recv_mcq->mcq.comp &&
3087 mqp->ibqp.recv_cq->comp_handler) {
3088 if (!recv_mcq->mcq.reset_notify_added) {
3089 recv_mcq->mcq.reset_notify_added = 1;
3090 list_add_tail(&recv_mcq->mcq.reset_notify,
3094 spin_unlock_irqrestore(&recv_mcq->lock,
3098 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3101 list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3104 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3105 pr_warn("mlx4_ib_handle_catas_error ended\n");
3108 static void handle_bonded_port_state_event(struct work_struct *work)
3110 struct ib_event_work *ew =
3111 container_of(work, struct ib_event_work, work);
3112 struct mlx4_ib_dev *ibdev = ew->ib_dev;
3113 enum ib_port_state bonded_port_state = IB_PORT_NOP;
3115 struct ib_event ibev;
3118 spin_lock_bh(&ibdev->iboe.lock);
3119 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3120 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3121 enum ib_port_state curr_port_state;
3127 (netif_running(curr_netdev) &&
3128 netif_carrier_ok(curr_netdev)) ?
3129 IB_PORT_ACTIVE : IB_PORT_DOWN;
3131 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3132 curr_port_state : IB_PORT_ACTIVE;
3134 spin_unlock_bh(&ibdev->iboe.lock);
3136 ibev.device = &ibdev->ib_dev;
3137 ibev.element.port_num = 1;
3138 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3139 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3141 ib_dispatch_event(&ibev);
3144 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3149 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3151 pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n",
3155 atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3158 static void ib_sl2vl_update_work(struct work_struct *work)
3160 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3161 struct mlx4_ib_dev *mdev = ew->ib_dev;
3162 int port = ew->port;
3164 mlx4_ib_sl2vl_update(mdev, port);
3169 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3172 struct ib_event_work *ew;
3174 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3176 INIT_WORK(&ew->work, ib_sl2vl_update_work);
3179 queue_work(wq, &ew->work);
3183 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
3184 enum mlx4_dev_event event, unsigned long param)
3186 struct ib_event ibev;
3187 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3188 struct mlx4_eqe *eqe = NULL;
3189 struct ib_event_work *ew;
3192 if (mlx4_is_bonded(dev) &&
3193 ((event == MLX4_DEV_EVENT_PORT_UP) ||
3194 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3195 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3198 INIT_WORK(&ew->work, handle_bonded_port_state_event);
3200 queue_work(wq, &ew->work);
3204 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3205 eqe = (struct mlx4_eqe *)param;
3210 case MLX4_DEV_EVENT_PORT_UP:
3211 if (p > ibdev->num_ports)
3213 if (!mlx4_is_slave(dev) &&
3214 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3215 IB_LINK_LAYER_INFINIBAND) {
3216 if (mlx4_is_master(dev))
3217 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3218 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3219 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3220 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3222 ibev.event = IB_EVENT_PORT_ACTIVE;
3225 case MLX4_DEV_EVENT_PORT_DOWN:
3226 if (p > ibdev->num_ports)
3228 ibev.event = IB_EVENT_PORT_ERR;
3231 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3232 ibdev->ib_active = false;
3233 ibev.event = IB_EVENT_DEVICE_FATAL;
3234 mlx4_ib_handle_catas_error(ibdev);
3237 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3238 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
3242 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3243 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3245 /* need to queue only for port owner, which uses GEN_EQE */
3246 if (mlx4_is_master(dev))
3247 queue_work(wq, &ew->work);
3249 handle_port_mgmt_change_event(&ew->work);
3252 case MLX4_DEV_EVENT_SLAVE_INIT:
3253 /* here, p is the slave id */
3254 do_slave_init(ibdev, p, 1);
3255 if (mlx4_is_master(dev)) {
3258 for (i = 1; i <= ibdev->num_ports; i++) {
3259 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3260 == IB_LINK_LAYER_INFINIBAND)
3261 mlx4_ib_slave_alias_guid_event(ibdev,
3268 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3269 if (mlx4_is_master(dev)) {
3272 for (i = 1; i <= ibdev->num_ports; i++) {
3273 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3274 == IB_LINK_LAYER_INFINIBAND)
3275 mlx4_ib_slave_alias_guid_event(ibdev,
3280 /* here, p is the slave id */
3281 do_slave_init(ibdev, p, 0);
3288 ibev.device = ibdev_ptr;
3289 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3291 ib_dispatch_event(&ibev);
3294 static struct mlx4_interface mlx4_ib_interface = {
3296 .remove = mlx4_ib_remove,
3297 .event = mlx4_ib_event,
3298 .protocol = MLX4_PROT_IB_IPV6,
3299 .flags = MLX4_INTFF_BONDING
3302 static int __init mlx4_ib_init(void)
3306 wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
3310 err = mlx4_ib_mcg_init();
3314 err = mlx4_register_interface(&mlx4_ib_interface);
3321 mlx4_ib_mcg_destroy();
3324 destroy_workqueue(wq);
3328 static void __exit mlx4_ib_cleanup(void)
3330 mlx4_unregister_interface(&mlx4_ib_interface);
3331 mlx4_ib_mcg_destroy();
3332 destroy_workqueue(wq);
3335 module_init(mlx4_ib_init);
3336 module_exit(mlx4_ib_cleanup);