2ac932834f989114483189781e2298b88399a4d6
[linux-2.6-microblaze.git] / drivers / infiniband / hw / mlx4 / main.c
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
44
45 #include <net/ipv6.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
48
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
53
54 #include <net/bonding.h>
55
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
59
60 #include "mlx4_ib.h"
61 #include <rdma/mlx4-abi.h>
62
63 #define DRV_NAME        MLX4_IB_DRV_NAME
64 #define DRV_VERSION     "4.0-0"
65
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0   0xA0
69
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
73
74 int mlx4_ib_sm_guid_assign = 0;
75 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
76 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
77
78 static const char mlx4_ib_version[] =
79         DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
80         DRV_VERSION "\n";
81
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
84                                                     u8 port_num);
85
86 static struct workqueue_struct *wq;
87
88 static void init_query_mad(struct ib_smp *mad)
89 {
90         mad->base_version  = 1;
91         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92         mad->class_version = 1;
93         mad->method        = IB_MGMT_METHOD_GET;
94 }
95
96 static int check_flow_steering_support(struct mlx4_dev *dev)
97 {
98         int eth_num_ports = 0;
99         int ib_num_ports = 0;
100
101         int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
102
103         if (dmfs) {
104                 int i;
105                 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
106                         eth_num_ports++;
107                 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
108                         ib_num_ports++;
109                 dmfs &= (!ib_num_ports ||
110                          (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
111                         (!eth_num_ports ||
112                          (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113                 if (ib_num_ports && mlx4_is_mfunc(dev)) {
114                         pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
115                         dmfs = 0;
116                 }
117         }
118         return dmfs;
119 }
120
121 static int num_ib_ports(struct mlx4_dev *dev)
122 {
123         int ib_ports = 0;
124         int i;
125
126         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
127                 ib_ports++;
128
129         return ib_ports;
130 }
131
132 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, u8 port_num)
133 {
134         struct mlx4_ib_dev *ibdev = to_mdev(device);
135         struct net_device *dev;
136
137         rcu_read_lock();
138         dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
139
140         if (dev) {
141                 if (mlx4_is_bonded(ibdev->dev)) {
142                         struct net_device *upper = NULL;
143
144                         upper = netdev_master_upper_dev_get_rcu(dev);
145                         if (upper) {
146                                 struct net_device *active;
147
148                                 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
149                                 if (active)
150                                         dev = active;
151                         }
152                 }
153         }
154         if (dev)
155                 dev_hold(dev);
156
157         rcu_read_unlock();
158         return dev;
159 }
160
161 static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
162                                   struct mlx4_ib_dev *ibdev,
163                                   u8 port_num)
164 {
165         struct mlx4_cmd_mailbox *mailbox;
166         int err;
167         struct mlx4_dev *dev = ibdev->dev;
168         int i;
169         union ib_gid *gid_tbl;
170
171         mailbox = mlx4_alloc_cmd_mailbox(dev);
172         if (IS_ERR(mailbox))
173                 return -ENOMEM;
174
175         gid_tbl = mailbox->buf;
176
177         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
178                 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
179
180         err = mlx4_cmd(dev, mailbox->dma,
181                        MLX4_SET_PORT_GID_TABLE << 8 | port_num,
182                        1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
183                        MLX4_CMD_WRAPPED);
184         if (mlx4_is_bonded(dev))
185                 err += mlx4_cmd(dev, mailbox->dma,
186                                 MLX4_SET_PORT_GID_TABLE << 8 | 2,
187                                 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
188                                 MLX4_CMD_WRAPPED);
189
190         mlx4_free_cmd_mailbox(dev, mailbox);
191         return err;
192 }
193
194 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
195                                      struct mlx4_ib_dev *ibdev,
196                                      u8 port_num)
197 {
198         struct mlx4_cmd_mailbox *mailbox;
199         int err;
200         struct mlx4_dev *dev = ibdev->dev;
201         int i;
202         struct {
203                 union ib_gid    gid;
204                 __be32          rsrvd1[2];
205                 __be16          rsrvd2;
206                 u8              type;
207                 u8              version;
208                 __be32          rsrvd3;
209         } *gid_tbl;
210
211         mailbox = mlx4_alloc_cmd_mailbox(dev);
212         if (IS_ERR(mailbox))
213                 return -ENOMEM;
214
215         gid_tbl = mailbox->buf;
216         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
217                 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
218                 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
219                         gid_tbl[i].version = 2;
220                         if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
221                                 gid_tbl[i].type = 1;
222                 }
223         }
224
225         err = mlx4_cmd(dev, mailbox->dma,
226                        MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
227                        1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
228                        MLX4_CMD_WRAPPED);
229         if (mlx4_is_bonded(dev))
230                 err += mlx4_cmd(dev, mailbox->dma,
231                                 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
232                                 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
233                                 MLX4_CMD_WRAPPED);
234
235         mlx4_free_cmd_mailbox(dev, mailbox);
236         return err;
237 }
238
239 static int mlx4_ib_update_gids(struct gid_entry *gids,
240                                struct mlx4_ib_dev *ibdev,
241                                u8 port_num)
242 {
243         if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
244                 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
245
246         return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
247 }
248
249 static void free_gid_entry(struct gid_entry *entry)
250 {
251         memset(&entry->gid, 0, sizeof(entry->gid));
252         kfree(entry->ctx);
253         entry->ctx = NULL;
254 }
255
256 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
257 {
258         struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
259         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
260         struct mlx4_port_gid_table   *port_gid_table;
261         int free = -1, found = -1;
262         int ret = 0;
263         int hw_update = 0;
264         int i;
265         struct gid_entry *gids = NULL;
266         u16 vlan_id = 0xffff;
267         u8 mac[ETH_ALEN];
268
269         if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
270                 return -EINVAL;
271
272         if (attr->port_num > MLX4_MAX_PORTS)
273                 return -EINVAL;
274
275         if (!context)
276                 return -EINVAL;
277
278         ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
279         if (ret)
280                 return ret;
281         port_gid_table = &iboe->gids[attr->port_num - 1];
282         spin_lock_bh(&iboe->lock);
283         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
284                 if (!memcmp(&port_gid_table->gids[i].gid,
285                             &attr->gid, sizeof(attr->gid)) &&
286                     port_gid_table->gids[i].gid_type == attr->gid_type &&
287                     port_gid_table->gids[i].vlan_id == vlan_id)  {
288                         found = i;
289                         break;
290                 }
291                 if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
292                         free = i; /* HW has space */
293         }
294
295         if (found < 0) {
296                 if (free < 0) {
297                         ret = -ENOSPC;
298                 } else {
299                         port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
300                         if (!port_gid_table->gids[free].ctx) {
301                                 ret = -ENOMEM;
302                         } else {
303                                 *context = port_gid_table->gids[free].ctx;
304                                 memcpy(&port_gid_table->gids[free].gid,
305                                        &attr->gid, sizeof(attr->gid));
306                                 port_gid_table->gids[free].gid_type = attr->gid_type;
307                                 port_gid_table->gids[free].vlan_id = vlan_id;
308                                 port_gid_table->gids[free].ctx->real_index = free;
309                                 port_gid_table->gids[free].ctx->refcount = 1;
310                                 hw_update = 1;
311                         }
312                 }
313         } else {
314                 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
315                 *context = ctx;
316                 ctx->refcount++;
317         }
318         if (!ret && hw_update) {
319                 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
320                                      GFP_ATOMIC);
321                 if (!gids) {
322                         ret = -ENOMEM;
323                         *context = NULL;
324                         free_gid_entry(&port_gid_table->gids[free]);
325                 } else {
326                         for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
327                                 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
328                                 gids[i].gid_type = port_gid_table->gids[i].gid_type;
329                         }
330                 }
331         }
332         spin_unlock_bh(&iboe->lock);
333
334         if (!ret && hw_update) {
335                 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
336                 if (ret) {
337                         spin_lock_bh(&iboe->lock);
338                         *context = NULL;
339                         free_gid_entry(&port_gid_table->gids[free]);
340                         spin_unlock_bh(&iboe->lock);
341                 }
342                 kfree(gids);
343         }
344
345         return ret;
346 }
347
348 static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
349 {
350         struct gid_cache_context *ctx = *context;
351         struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
352         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
353         struct mlx4_port_gid_table   *port_gid_table;
354         int ret = 0;
355         int hw_update = 0;
356         struct gid_entry *gids = NULL;
357
358         if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
359                 return -EINVAL;
360
361         if (attr->port_num > MLX4_MAX_PORTS)
362                 return -EINVAL;
363
364         port_gid_table = &iboe->gids[attr->port_num - 1];
365         spin_lock_bh(&iboe->lock);
366         if (ctx) {
367                 ctx->refcount--;
368                 if (!ctx->refcount) {
369                         unsigned int real_index = ctx->real_index;
370
371                         free_gid_entry(&port_gid_table->gids[real_index]);
372                         hw_update = 1;
373                 }
374         }
375         if (!ret && hw_update) {
376                 int i;
377
378                 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
379                                      GFP_ATOMIC);
380                 if (!gids) {
381                         ret = -ENOMEM;
382                 } else {
383                         for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
384                                 memcpy(&gids[i].gid,
385                                        &port_gid_table->gids[i].gid,
386                                        sizeof(union ib_gid));
387                                 gids[i].gid_type =
388                                     port_gid_table->gids[i].gid_type;
389                         }
390                 }
391         }
392         spin_unlock_bh(&iboe->lock);
393
394         if (!ret && hw_update) {
395                 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
396                 kfree(gids);
397         }
398         return ret;
399 }
400
401 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
402                                     const struct ib_gid_attr *attr)
403 {
404         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
405         struct gid_cache_context *ctx = NULL;
406         struct mlx4_port_gid_table   *port_gid_table;
407         int real_index = -EINVAL;
408         int i;
409         unsigned long flags;
410         u8 port_num = attr->port_num;
411
412         if (port_num > MLX4_MAX_PORTS)
413                 return -EINVAL;
414
415         if (mlx4_is_bonded(ibdev->dev))
416                 port_num = 1;
417
418         if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
419                 return attr->index;
420
421         spin_lock_irqsave(&iboe->lock, flags);
422         port_gid_table = &iboe->gids[port_num - 1];
423
424         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
425                 if (!memcmp(&port_gid_table->gids[i].gid,
426                             &attr->gid, sizeof(attr->gid)) &&
427                     attr->gid_type == port_gid_table->gids[i].gid_type) {
428                         ctx = port_gid_table->gids[i].ctx;
429                         break;
430                 }
431         if (ctx)
432                 real_index = ctx->real_index;
433         spin_unlock_irqrestore(&iboe->lock, flags);
434         return real_index;
435 }
436
437 static int mlx4_ib_query_device(struct ib_device *ibdev,
438                                 struct ib_device_attr *props,
439                                 struct ib_udata *uhw)
440 {
441         struct mlx4_ib_dev *dev = to_mdev(ibdev);
442         struct ib_smp *in_mad  = NULL;
443         struct ib_smp *out_mad = NULL;
444         int err;
445         int have_ib_ports;
446         struct mlx4_uverbs_ex_query_device cmd;
447         struct mlx4_uverbs_ex_query_device_resp resp = {};
448         struct mlx4_clock_params clock_params;
449
450         if (uhw->inlen) {
451                 if (uhw->inlen < sizeof(cmd))
452                         return -EINVAL;
453
454                 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
455                 if (err)
456                         return err;
457
458                 if (cmd.comp_mask)
459                         return -EINVAL;
460
461                 if (cmd.reserved)
462                         return -EINVAL;
463         }
464
465         resp.response_length = offsetof(typeof(resp), response_length) +
466                 sizeof(resp.response_length);
467         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
468         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
469         err = -ENOMEM;
470         if (!in_mad || !out_mad)
471                 goto out;
472
473         init_query_mad(in_mad);
474         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
475
476         err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
477                            1, NULL, NULL, in_mad, out_mad);
478         if (err)
479                 goto out;
480
481         memset(props, 0, sizeof *props);
482
483         have_ib_ports = num_ib_ports(dev->dev);
484
485         props->fw_ver = dev->dev->caps.fw_ver;
486         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
487                 IB_DEVICE_PORT_ACTIVE_EVENT             |
488                 IB_DEVICE_SYS_IMAGE_GUID                |
489                 IB_DEVICE_RC_RNR_NAK_GEN                |
490                 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
491         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
492                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
493         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
494                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
495         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
496                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
497         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
498                 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
499         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
500                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
501         if (dev->dev->caps.max_gso_sz &&
502             (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
503             (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
504                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
505         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
506                 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
507         if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
508             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
509             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
510                 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
511         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
512                 props->device_cap_flags |= IB_DEVICE_XRC;
513         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
514                 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
515         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
516                 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
517                         props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
518                 else
519                         props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
520         }
521         if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
522                 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
523
524         props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
525
526         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
527                 0xffffff;
528         props->vendor_part_id      = dev->dev->persist->pdev->device;
529         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
530         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
531
532         props->max_mr_size         = ~0ull;
533         props->page_size_cap       = dev->dev->caps.page_size_cap;
534         props->max_qp              = dev->dev->quotas.qp;
535         props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
536         props->max_send_sge =
537                 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
538         props->max_recv_sge =
539                 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
540         props->max_sge_rd = MLX4_MAX_SGE_RD;
541         props->max_cq              = dev->dev->quotas.cq;
542         props->max_cqe             = dev->dev->caps.max_cqes;
543         props->max_mr              = dev->dev->quotas.mpt;
544         props->max_pd              = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
545         props->max_qp_rd_atom      = dev->dev->caps.max_qp_dest_rdma;
546         props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
547         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
548         props->max_srq             = dev->dev->quotas.srq;
549         props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
550         props->max_srq_sge         = dev->dev->caps.max_srq_sge;
551         props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
552         props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
553         props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
554                 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
555         props->masked_atomic_cap   = props->atomic_cap;
556         props->max_pkeys           = dev->dev->caps.pkey_table_len[1];
557         props->max_mcast_grp       = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
558         props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
559         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
560                                            props->max_mcast_grp;
561         props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
562         props->timestamp_mask = 0xFFFFFFFFFFFFULL;
563         props->max_ah = INT_MAX;
564
565         if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
566             mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
567                 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
568                         props->rss_caps.max_rwq_indirection_tables =
569                                 props->max_qp;
570                         props->rss_caps.max_rwq_indirection_table_size =
571                                 dev->dev->caps.max_rss_tbl_sz;
572                         props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
573                         props->max_wq_type_rq = props->max_qp;
574                 }
575
576                 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
577                         props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
578         }
579
580         props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
581         props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
582
583         if (!mlx4_is_slave(dev->dev))
584                 err = mlx4_get_internal_clock_params(dev->dev, &clock_params);
585
586         if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
587                 resp.response_length += sizeof(resp.hca_core_clock_offset);
588                 if (!err && !mlx4_is_slave(dev->dev)) {
589                         resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
590                         resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
591                 }
592         }
593
594         if (uhw->outlen >= resp.response_length +
595             sizeof(resp.max_inl_recv_sz)) {
596                 resp.response_length += sizeof(resp.max_inl_recv_sz);
597                 resp.max_inl_recv_sz  = dev->dev->caps.max_rq_sg *
598                         sizeof(struct mlx4_wqe_data_seg);
599         }
600
601         if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
602                 if (props->rss_caps.supported_qpts) {
603                         resp.rss_caps.rx_hash_function =
604                                 MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
605
606                         resp.rss_caps.rx_hash_fields_mask =
607                                 MLX4_IB_RX_HASH_SRC_IPV4 |
608                                 MLX4_IB_RX_HASH_DST_IPV4 |
609                                 MLX4_IB_RX_HASH_SRC_IPV6 |
610                                 MLX4_IB_RX_HASH_DST_IPV6 |
611                                 MLX4_IB_RX_HASH_SRC_PORT_TCP |
612                                 MLX4_IB_RX_HASH_DST_PORT_TCP |
613                                 MLX4_IB_RX_HASH_SRC_PORT_UDP |
614                                 MLX4_IB_RX_HASH_DST_PORT_UDP;
615
616                         if (dev->dev->caps.tunnel_offload_mode ==
617                             MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
618                                 resp.rss_caps.rx_hash_fields_mask |=
619                                         MLX4_IB_RX_HASH_INNER;
620                 }
621                 resp.response_length = offsetof(typeof(resp), rss_caps) +
622                                        sizeof(resp.rss_caps);
623         }
624
625         if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
626                 if (dev->dev->caps.max_gso_sz &&
627                     ((mlx4_ib_port_link_layer(ibdev, 1) ==
628                     IB_LINK_LAYER_ETHERNET) ||
629                     (mlx4_ib_port_link_layer(ibdev, 2) ==
630                     IB_LINK_LAYER_ETHERNET))) {
631                         resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
632                         resp.tso_caps.supported_qpts |=
633                                 1 << IB_QPT_RAW_PACKET;
634                 }
635                 resp.response_length = offsetof(typeof(resp), tso_caps) +
636                                        sizeof(resp.tso_caps);
637         }
638
639         if (uhw->outlen) {
640                 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
641                 if (err)
642                         goto out;
643         }
644 out:
645         kfree(in_mad);
646         kfree(out_mad);
647
648         return err;
649 }
650
651 static enum rdma_link_layer
652 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
653 {
654         struct mlx4_dev *dev = to_mdev(device)->dev;
655
656         return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
657                 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
658 }
659
660 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
661                               struct ib_port_attr *props, int netw_view)
662 {
663         struct ib_smp *in_mad  = NULL;
664         struct ib_smp *out_mad = NULL;
665         int ext_active_speed;
666         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
667         int err = -ENOMEM;
668
669         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
670         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
671         if (!in_mad || !out_mad)
672                 goto out;
673
674         init_query_mad(in_mad);
675         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
676         in_mad->attr_mod = cpu_to_be32(port);
677
678         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
679                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
680
681         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
682                                 in_mad, out_mad);
683         if (err)
684                 goto out;
685
686
687         props->lid              = be16_to_cpup((__be16 *) (out_mad->data + 16));
688         props->lmc              = out_mad->data[34] & 0x7;
689         props->sm_lid           = be16_to_cpup((__be16 *) (out_mad->data + 18));
690         props->sm_sl            = out_mad->data[36] & 0xf;
691         props->state            = out_mad->data[32] & 0xf;
692         props->phys_state       = out_mad->data[33] >> 4;
693         props->port_cap_flags   = be32_to_cpup((__be32 *) (out_mad->data + 20));
694         if (netw_view)
695                 props->gid_tbl_len = out_mad->data[50];
696         else
697                 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
698         props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
699         props->pkey_tbl_len     = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
700         props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
701         props->qkey_viol_cntr   = be16_to_cpup((__be16 *) (out_mad->data + 48));
702         props->active_width     = out_mad->data[31] & 0xf;
703         props->active_speed     = out_mad->data[35] >> 4;
704         props->max_mtu          = out_mad->data[41] & 0xf;
705         props->active_mtu       = out_mad->data[36] >> 4;
706         props->subnet_timeout   = out_mad->data[51] & 0x1f;
707         props->max_vl_num       = out_mad->data[37] >> 4;
708         props->init_type_reply  = out_mad->data[41] >> 4;
709
710         /* Check if extended speeds (EDR/FDR/...) are supported */
711         if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
712                 ext_active_speed = out_mad->data[62] >> 4;
713
714                 switch (ext_active_speed) {
715                 case 1:
716                         props->active_speed = IB_SPEED_FDR;
717                         break;
718                 case 2:
719                         props->active_speed = IB_SPEED_EDR;
720                         break;
721                 }
722         }
723
724         /* If reported active speed is QDR, check if is FDR-10 */
725         if (props->active_speed == IB_SPEED_QDR) {
726                 init_query_mad(in_mad);
727                 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
728                 in_mad->attr_mod = cpu_to_be32(port);
729
730                 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
731                                    NULL, NULL, in_mad, out_mad);
732                 if (err)
733                         goto out;
734
735                 /* Checking LinkSpeedActive for FDR-10 */
736                 if (out_mad->data[15] & 0x1)
737                         props->active_speed = IB_SPEED_FDR10;
738         }
739
740         /* Avoid wrong speed value returned by FW if the IB link is down. */
741         if (props->state == IB_PORT_DOWN)
742                  props->active_speed = IB_SPEED_SDR;
743
744 out:
745         kfree(in_mad);
746         kfree(out_mad);
747         return err;
748 }
749
750 static u8 state_to_phys_state(enum ib_port_state state)
751 {
752         return state == IB_PORT_ACTIVE ?
753                 IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
754 }
755
756 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
757                                struct ib_port_attr *props)
758 {
759
760         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
761         struct mlx4_ib_iboe *iboe = &mdev->iboe;
762         struct net_device *ndev;
763         enum ib_mtu tmp;
764         struct mlx4_cmd_mailbox *mailbox;
765         int err = 0;
766         int is_bonded = mlx4_is_bonded(mdev->dev);
767
768         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
769         if (IS_ERR(mailbox))
770                 return PTR_ERR(mailbox);
771
772         err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
773                            MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
774                            MLX4_CMD_WRAPPED);
775         if (err)
776                 goto out;
777
778         props->active_width     =  (((u8 *)mailbox->buf)[5] == 0x40) ||
779                                    (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
780                                            IB_WIDTH_4X : IB_WIDTH_1X;
781         props->active_speed     =  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
782                                            IB_SPEED_FDR : IB_SPEED_QDR;
783         props->port_cap_flags   = IB_PORT_CM_SUP;
784         props->ip_gids = true;
785         props->gid_tbl_len      = mdev->dev->caps.gid_table_len[port];
786         props->max_msg_sz       = mdev->dev->caps.max_msg_sz;
787         if (mdev->dev->caps.pkey_table_len[port])
788                 props->pkey_tbl_len = 1;
789         props->max_mtu          = IB_MTU_4096;
790         props->max_vl_num       = 2;
791         props->state            = IB_PORT_DOWN;
792         props->phys_state       = state_to_phys_state(props->state);
793         props->active_mtu       = IB_MTU_256;
794         spin_lock_bh(&iboe->lock);
795         ndev = iboe->netdevs[port - 1];
796         if (ndev && is_bonded) {
797                 rcu_read_lock(); /* required to get upper dev */
798                 ndev = netdev_master_upper_dev_get_rcu(ndev);
799                 rcu_read_unlock();
800         }
801         if (!ndev)
802                 goto out_unlock;
803
804         tmp = iboe_get_mtu(ndev->mtu);
805         props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
806
807         props->state            = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
808                                         IB_PORT_ACTIVE : IB_PORT_DOWN;
809         props->phys_state       = state_to_phys_state(props->state);
810 out_unlock:
811         spin_unlock_bh(&iboe->lock);
812 out:
813         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
814         return err;
815 }
816
817 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
818                          struct ib_port_attr *props, int netw_view)
819 {
820         int err;
821
822         /* props being zeroed by the caller, avoid zeroing it here */
823
824         err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
825                 ib_link_query_port(ibdev, port, props, netw_view) :
826                                 eth_link_query_port(ibdev, port, props);
827
828         return err;
829 }
830
831 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
832                               struct ib_port_attr *props)
833 {
834         /* returns host view */
835         return __mlx4_ib_query_port(ibdev, port, props, 0);
836 }
837
838 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
839                         union ib_gid *gid, int netw_view)
840 {
841         struct ib_smp *in_mad  = NULL;
842         struct ib_smp *out_mad = NULL;
843         int err = -ENOMEM;
844         struct mlx4_ib_dev *dev = to_mdev(ibdev);
845         int clear = 0;
846         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
847
848         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
849         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
850         if (!in_mad || !out_mad)
851                 goto out;
852
853         init_query_mad(in_mad);
854         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
855         in_mad->attr_mod = cpu_to_be32(port);
856
857         if (mlx4_is_mfunc(dev->dev) && netw_view)
858                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
859
860         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
861         if (err)
862                 goto out;
863
864         memcpy(gid->raw, out_mad->data + 8, 8);
865
866         if (mlx4_is_mfunc(dev->dev) && !netw_view) {
867                 if (index) {
868                         /* For any index > 0, return the null guid */
869                         err = 0;
870                         clear = 1;
871                         goto out;
872                 }
873         }
874
875         init_query_mad(in_mad);
876         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
877         in_mad->attr_mod = cpu_to_be32(index / 8);
878
879         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
880                            NULL, NULL, in_mad, out_mad);
881         if (err)
882                 goto out;
883
884         memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
885
886 out:
887         if (clear)
888                 memset(gid->raw + 8, 0, 8);
889         kfree(in_mad);
890         kfree(out_mad);
891         return err;
892 }
893
894 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
895                              union ib_gid *gid)
896 {
897         if (rdma_protocol_ib(ibdev, port))
898                 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
899         return 0;
900 }
901
902 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
903 {
904         union sl2vl_tbl_to_u64 sl2vl64;
905         struct ib_smp *in_mad  = NULL;
906         struct ib_smp *out_mad = NULL;
907         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
908         int err = -ENOMEM;
909         int jj;
910
911         if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
912                 *sl2vl_tbl = 0;
913                 return 0;
914         }
915
916         in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
917         out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
918         if (!in_mad || !out_mad)
919                 goto out;
920
921         init_query_mad(in_mad);
922         in_mad->attr_id  = IB_SMP_ATTR_SL_TO_VL_TABLE;
923         in_mad->attr_mod = 0;
924
925         if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
926                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
927
928         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
929                            in_mad, out_mad);
930         if (err)
931                 goto out;
932
933         for (jj = 0; jj < 8; jj++)
934                 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
935         *sl2vl_tbl = sl2vl64.sl64;
936
937 out:
938         kfree(in_mad);
939         kfree(out_mad);
940         return err;
941 }
942
943 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
944 {
945         u64 sl2vl;
946         int i;
947         int err;
948
949         for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
950                 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
951                         continue;
952                 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
953                 if (err) {
954                         pr_err("Unable to get default sl to vl mapping for port %d.  Using all zeroes (%d)\n",
955                                i, err);
956                         sl2vl = 0;
957                 }
958                 atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
959         }
960 }
961
962 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
963                          u16 *pkey, int netw_view)
964 {
965         struct ib_smp *in_mad  = NULL;
966         struct ib_smp *out_mad = NULL;
967         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
968         int err = -ENOMEM;
969
970         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
971         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
972         if (!in_mad || !out_mad)
973                 goto out;
974
975         init_query_mad(in_mad);
976         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
977         in_mad->attr_mod = cpu_to_be32(index / 32);
978
979         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
980                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
981
982         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
983                            in_mad, out_mad);
984         if (err)
985                 goto out;
986
987         *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
988
989 out:
990         kfree(in_mad);
991         kfree(out_mad);
992         return err;
993 }
994
995 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
996 {
997         return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
998 }
999
1000 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
1001                                  struct ib_device_modify *props)
1002 {
1003         struct mlx4_cmd_mailbox *mailbox;
1004         unsigned long flags;
1005
1006         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1007                 return -EOPNOTSUPP;
1008
1009         if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1010                 return 0;
1011
1012         if (mlx4_is_slave(to_mdev(ibdev)->dev))
1013                 return -EOPNOTSUPP;
1014
1015         spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1016         memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1017         spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1018
1019         /*
1020          * If possible, pass node desc to FW, so it can generate
1021          * a 144 trap.  If cmd fails, just ignore.
1022          */
1023         mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1024         if (IS_ERR(mailbox))
1025                 return 0;
1026
1027         memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1028         mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1029                  MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1030
1031         mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1032
1033         return 0;
1034 }
1035
1036 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
1037                             u32 cap_mask)
1038 {
1039         struct mlx4_cmd_mailbox *mailbox;
1040         int err;
1041
1042         mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1043         if (IS_ERR(mailbox))
1044                 return PTR_ERR(mailbox);
1045
1046         if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1047                 *(u8 *) mailbox->buf         = !!reset_qkey_viols << 6;
1048                 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1049         } else {
1050                 ((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
1051                 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1052         }
1053
1054         err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1055                        MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1056                        MLX4_CMD_WRAPPED);
1057
1058         mlx4_free_cmd_mailbox(dev->dev, mailbox);
1059         return err;
1060 }
1061
1062 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1063                                struct ib_port_modify *props)
1064 {
1065         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1066         u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1067         struct ib_port_attr attr;
1068         u32 cap_mask;
1069         int err;
1070
1071         /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1072          * of whether port link layer is ETH or IB. For ETH ports, qkey
1073          * violations and port capabilities are not meaningful.
1074          */
1075         if (is_eth)
1076                 return 0;
1077
1078         mutex_lock(&mdev->cap_mask_mutex);
1079
1080         err = ib_query_port(ibdev, port, &attr);
1081         if (err)
1082                 goto out;
1083
1084         cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1085                 ~props->clr_port_cap_mask;
1086
1087         err = mlx4_ib_SET_PORT(mdev, port,
1088                                !!(mask & IB_PORT_RESET_QKEY_CNTR),
1089                                cap_mask);
1090
1091 out:
1092         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1093         return err;
1094 }
1095
1096 static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1097                                   struct ib_udata *udata)
1098 {
1099         struct ib_device *ibdev = uctx->device;
1100         struct mlx4_ib_dev *dev = to_mdev(ibdev);
1101         struct mlx4_ib_ucontext *context = to_mucontext(uctx);
1102         struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1103         struct mlx4_ib_alloc_ucontext_resp resp;
1104         int err;
1105
1106         if (!dev->ib_active)
1107                 return -EAGAIN;
1108
1109         if (ibdev->ops.uverbs_abi_ver ==
1110             MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1111                 resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
1112                 resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
1113                 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1114         } else {
1115                 resp.dev_caps         = dev->dev->caps.userspace_caps;
1116                 resp.qp_tab_size      = dev->dev->caps.num_qps;
1117                 resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
1118                 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1119                 resp.cqe_size         = dev->dev->caps.cqe_size;
1120         }
1121
1122         err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1123         if (err)
1124                 return err;
1125
1126         INIT_LIST_HEAD(&context->db_page_list);
1127         mutex_init(&context->db_page_mutex);
1128
1129         INIT_LIST_HEAD(&context->wqn_ranges_list);
1130         mutex_init(&context->wqn_ranges_mutex);
1131
1132         if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1133                 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1134         else
1135                 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1136
1137         if (err) {
1138                 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1139                 return -EFAULT;
1140         }
1141
1142         return err;
1143 }
1144
1145 static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1146 {
1147         struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1148
1149         mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1150 }
1151
1152 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1153 {
1154 }
1155
1156 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1157 {
1158         struct mlx4_ib_dev *dev = to_mdev(context->device);
1159
1160         switch (vma->vm_pgoff) {
1161         case 0:
1162                 return rdma_user_mmap_io(context, vma,
1163                                          to_mucontext(context)->uar.pfn,
1164                                          PAGE_SIZE,
1165                                          pgprot_noncached(vma->vm_page_prot),
1166                                          NULL);
1167
1168         case 1:
1169                 if (dev->dev->caps.bf_reg_size == 0)
1170                         return -EINVAL;
1171                 return rdma_user_mmap_io(
1172                         context, vma,
1173                         to_mucontext(context)->uar.pfn +
1174                                 dev->dev->caps.num_uars,
1175                         PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
1176                         NULL);
1177
1178         case 3: {
1179                 struct mlx4_clock_params params;
1180                 int ret;
1181
1182                 ret = mlx4_get_internal_clock_params(dev->dev, &params);
1183                 if (ret)
1184                         return ret;
1185
1186                 return rdma_user_mmap_io(
1187                         context, vma,
1188                         (pci_resource_start(dev->dev->persist->pdev,
1189                                             params.bar) +
1190                          params.offset) >>
1191                                 PAGE_SHIFT,
1192                         PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
1193                         NULL);
1194         }
1195
1196         default:
1197                 return -EINVAL;
1198         }
1199 }
1200
1201 static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
1202 {
1203         struct mlx4_ib_pd *pd = to_mpd(ibpd);
1204         struct ib_device *ibdev = ibpd->device;
1205         int err;
1206
1207         err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1208         if (err)
1209                 return err;
1210
1211         if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1212                 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1213                 return -EFAULT;
1214         }
1215         return 0;
1216 }
1217
1218 static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
1219 {
1220         mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1221         return 0;
1222 }
1223
1224 static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
1225 {
1226         struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
1227         struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
1228         struct ib_cq_init_attr cq_attr = {};
1229         int err;
1230
1231         if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1232                 return -EOPNOTSUPP;
1233
1234         err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
1235         if (err)
1236                 return err;
1237
1238         xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
1239         if (IS_ERR(xrcd->pd)) {
1240                 err = PTR_ERR(xrcd->pd);
1241                 goto err2;
1242         }
1243
1244         cq_attr.cqe = 1;
1245         xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
1246         if (IS_ERR(xrcd->cq)) {
1247                 err = PTR_ERR(xrcd->cq);
1248                 goto err3;
1249         }
1250
1251         return 0;
1252
1253 err3:
1254         ib_dealloc_pd(xrcd->pd);
1255 err2:
1256         mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
1257         return err;
1258 }
1259
1260 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
1261 {
1262         ib_destroy_cq(to_mxrcd(xrcd)->cq);
1263         ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1264         mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1265         return 0;
1266 }
1267
1268 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1269 {
1270         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1271         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1272         struct mlx4_ib_gid_entry *ge;
1273
1274         ge = kzalloc(sizeof *ge, GFP_KERNEL);
1275         if (!ge)
1276                 return -ENOMEM;
1277
1278         ge->gid = *gid;
1279         if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1280                 ge->port = mqp->port;
1281                 ge->added = 1;
1282         }
1283
1284         mutex_lock(&mqp->mutex);
1285         list_add_tail(&ge->list, &mqp->gid_list);
1286         mutex_unlock(&mqp->mutex);
1287
1288         return 0;
1289 }
1290
1291 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1292                                           struct mlx4_ib_counters *ctr_table)
1293 {
1294         struct counter_index *counter, *tmp_count;
1295
1296         mutex_lock(&ctr_table->mutex);
1297         list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1298                                  list) {
1299                 if (counter->allocated)
1300                         mlx4_counter_free(ibdev->dev, counter->index);
1301                 list_del(&counter->list);
1302                 kfree(counter);
1303         }
1304         mutex_unlock(&ctr_table->mutex);
1305 }
1306
1307 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1308                    union ib_gid *gid)
1309 {
1310         struct net_device *ndev;
1311         int ret = 0;
1312
1313         if (!mqp->port)
1314                 return 0;
1315
1316         spin_lock_bh(&mdev->iboe.lock);
1317         ndev = mdev->iboe.netdevs[mqp->port - 1];
1318         if (ndev)
1319                 dev_hold(ndev);
1320         spin_unlock_bh(&mdev->iboe.lock);
1321
1322         if (ndev) {
1323                 ret = 1;
1324                 dev_put(ndev);
1325         }
1326
1327         return ret;
1328 }
1329
1330 struct mlx4_ib_steering {
1331         struct list_head list;
1332         struct mlx4_flow_reg_id reg_id;
1333         union ib_gid gid;
1334 };
1335
1336 #define LAST_ETH_FIELD vlan_tag
1337 #define LAST_IB_FIELD sl
1338 #define LAST_IPV4_FIELD dst_ip
1339 #define LAST_TCP_UDP_FIELD src_port
1340
1341 /* Field is the last supported field */
1342 #define FIELDS_NOT_SUPPORTED(filter, field)\
1343         memchr_inv((void *)&filter.field  +\
1344                    sizeof(filter.field), 0,\
1345                    sizeof(filter) -\
1346                    offsetof(typeof(filter), field) -\
1347                    sizeof(filter.field))
1348
1349 static int parse_flow_attr(struct mlx4_dev *dev,
1350                            u32 qp_num,
1351                            union ib_flow_spec *ib_spec,
1352                            struct _rule_hw *mlx4_spec)
1353 {
1354         enum mlx4_net_trans_rule_id type;
1355
1356         switch (ib_spec->type) {
1357         case IB_FLOW_SPEC_ETH:
1358                 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1359                         return -ENOTSUPP;
1360
1361                 type = MLX4_NET_TRANS_RULE_ID_ETH;
1362                 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1363                        ETH_ALEN);
1364                 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1365                        ETH_ALEN);
1366                 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1367                 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1368                 break;
1369         case IB_FLOW_SPEC_IB:
1370                 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1371                         return -ENOTSUPP;
1372
1373                 type = MLX4_NET_TRANS_RULE_ID_IB;
1374                 mlx4_spec->ib.l3_qpn =
1375                         cpu_to_be32(qp_num);
1376                 mlx4_spec->ib.qpn_mask =
1377                         cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1378                 break;
1379
1380
1381         case IB_FLOW_SPEC_IPV4:
1382                 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1383                         return -ENOTSUPP;
1384
1385                 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1386                 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1387                 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1388                 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1389                 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1390                 break;
1391
1392         case IB_FLOW_SPEC_TCP:
1393         case IB_FLOW_SPEC_UDP:
1394                 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1395                         return -ENOTSUPP;
1396
1397                 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1398                                         MLX4_NET_TRANS_RULE_ID_TCP :
1399                                         MLX4_NET_TRANS_RULE_ID_UDP;
1400                 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1401                 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1402                 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1403                 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1404                 break;
1405
1406         default:
1407                 return -EINVAL;
1408         }
1409         if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1410             mlx4_hw_rule_sz(dev, type) < 0)
1411                 return -EINVAL;
1412         mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1413         mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1414         return mlx4_hw_rule_sz(dev, type);
1415 }
1416
1417 struct default_rules {
1418         __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1419         __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1420         __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1421         __u8  link_layer;
1422 };
1423 static const struct default_rules default_table[] = {
1424         {
1425                 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1426                 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1427                 .rules_create_list = {IB_FLOW_SPEC_IB},
1428                 .link_layer = IB_LINK_LAYER_INFINIBAND
1429         }
1430 };
1431
1432 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1433                                          struct ib_flow_attr *flow_attr)
1434 {
1435         int i, j, k;
1436         void *ib_flow;
1437         const struct default_rules *pdefault_rules = default_table;
1438         u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1439
1440         for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1441                 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1442                 memset(&field_types, 0, sizeof(field_types));
1443
1444                 if (link_layer != pdefault_rules->link_layer)
1445                         continue;
1446
1447                 ib_flow = flow_attr + 1;
1448                 /* we assume the specs are sorted */
1449                 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1450                      j < flow_attr->num_of_specs; k++) {
1451                         union ib_flow_spec *current_flow =
1452                                 (union ib_flow_spec *)ib_flow;
1453
1454                         /* same layer but different type */
1455                         if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1456                              (pdefault_rules->mandatory_fields[k] &
1457                               IB_FLOW_SPEC_LAYER_MASK)) &&
1458                             (current_flow->type !=
1459                              pdefault_rules->mandatory_fields[k]))
1460                                 goto out;
1461
1462                         /* same layer, try match next one */
1463                         if (current_flow->type ==
1464                             pdefault_rules->mandatory_fields[k]) {
1465                                 j++;
1466                                 ib_flow +=
1467                                         ((union ib_flow_spec *)ib_flow)->size;
1468                         }
1469                 }
1470
1471                 ib_flow = flow_attr + 1;
1472                 for (j = 0; j < flow_attr->num_of_specs;
1473                      j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1474                         for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1475                                 /* same layer and same type */
1476                                 if (((union ib_flow_spec *)ib_flow)->type ==
1477                                     pdefault_rules->mandatory_not_fields[k])
1478                                         goto out;
1479
1480                 return i;
1481         }
1482 out:
1483         return -1;
1484 }
1485
1486 static int __mlx4_ib_create_default_rules(
1487                 struct mlx4_ib_dev *mdev,
1488                 struct ib_qp *qp,
1489                 const struct default_rules *pdefault_rules,
1490                 struct _rule_hw *mlx4_spec) {
1491         int size = 0;
1492         int i;
1493
1494         for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1495                 union ib_flow_spec ib_spec = {};
1496                 int ret;
1497
1498                 switch (pdefault_rules->rules_create_list[i]) {
1499                 case 0:
1500                         /* no rule */
1501                         continue;
1502                 case IB_FLOW_SPEC_IB:
1503                         ib_spec.type = IB_FLOW_SPEC_IB;
1504                         ib_spec.size = sizeof(struct ib_flow_spec_ib);
1505
1506                         break;
1507                 default:
1508                         /* invalid rule */
1509                         return -EINVAL;
1510                 }
1511                 /* We must put empty rule, qpn is being ignored */
1512                 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1513                                       mlx4_spec);
1514                 if (ret < 0) {
1515                         pr_info("invalid parsing\n");
1516                         return -EINVAL;
1517                 }
1518
1519                 mlx4_spec = (void *)mlx4_spec + ret;
1520                 size += ret;
1521         }
1522         return size;
1523 }
1524
1525 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1526                           int domain,
1527                           enum mlx4_net_trans_promisc_mode flow_type,
1528                           u64 *reg_id)
1529 {
1530         int ret, i;
1531         int size = 0;
1532         void *ib_flow;
1533         struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1534         struct mlx4_cmd_mailbox *mailbox;
1535         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1536         int default_flow;
1537
1538         if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1539                 pr_err("Invalid priority value %d\n", flow_attr->priority);
1540                 return -EINVAL;
1541         }
1542
1543         if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1544                 return -EINVAL;
1545
1546         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1547         if (IS_ERR(mailbox))
1548                 return PTR_ERR(mailbox);
1549         ctrl = mailbox->buf;
1550
1551         ctrl->prio = cpu_to_be16(domain | flow_attr->priority);
1552         ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1553         ctrl->port = flow_attr->port;
1554         ctrl->qpn = cpu_to_be32(qp->qp_num);
1555
1556         ib_flow = flow_attr + 1;
1557         size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1558         /* Add default flows */
1559         default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1560         if (default_flow >= 0) {
1561                 ret = __mlx4_ib_create_default_rules(
1562                                 mdev, qp, default_table + default_flow,
1563                                 mailbox->buf + size);
1564                 if (ret < 0) {
1565                         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1566                         return -EINVAL;
1567                 }
1568                 size += ret;
1569         }
1570         for (i = 0; i < flow_attr->num_of_specs; i++) {
1571                 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1572                                       mailbox->buf + size);
1573                 if (ret < 0) {
1574                         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1575                         return -EINVAL;
1576                 }
1577                 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1578                 size += ret;
1579         }
1580
1581         if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1582             flow_attr->num_of_specs == 1) {
1583                 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1584                 enum ib_flow_spec_type header_spec =
1585                         ((union ib_flow_spec *)(flow_attr + 1))->type;
1586
1587                 if (header_spec == IB_FLOW_SPEC_ETH)
1588                         mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1589         }
1590
1591         ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1592                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1593                            MLX4_CMD_NATIVE);
1594         if (ret == -ENOMEM)
1595                 pr_err("mcg table is full. Fail to register network rule.\n");
1596         else if (ret == -ENXIO)
1597                 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1598         else if (ret)
1599                 pr_err("Invalid argument. Fail to register network rule.\n");
1600
1601         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1602         return ret;
1603 }
1604
1605 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1606 {
1607         int err;
1608         err = mlx4_cmd(dev, reg_id, 0, 0,
1609                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1610                        MLX4_CMD_NATIVE);
1611         if (err)
1612                 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1613                        reg_id);
1614         return err;
1615 }
1616
1617 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1618                                     u64 *reg_id)
1619 {
1620         void *ib_flow;
1621         union ib_flow_spec *ib_spec;
1622         struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1623         int err = 0;
1624
1625         if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1626             dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1627                 return 0; /* do nothing */
1628
1629         ib_flow = flow_attr + 1;
1630         ib_spec = (union ib_flow_spec *)ib_flow;
1631
1632         if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1633                 return 0; /* do nothing */
1634
1635         err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1636                                     flow_attr->port, qp->qp_num,
1637                                     MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1638                                     reg_id);
1639         return err;
1640 }
1641
1642 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1643                                       struct ib_flow_attr *flow_attr,
1644                                       enum mlx4_net_trans_promisc_mode *type)
1645 {
1646         int err = 0;
1647
1648         if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1649             (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1650             (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1651                 return -EOPNOTSUPP;
1652         }
1653
1654         if (flow_attr->num_of_specs == 0) {
1655                 type[0] = MLX4_FS_MC_SNIFFER;
1656                 type[1] = MLX4_FS_UC_SNIFFER;
1657         } else {
1658                 union ib_flow_spec *ib_spec;
1659
1660                 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1661                 if (ib_spec->type !=  IB_FLOW_SPEC_ETH)
1662                         return -EINVAL;
1663
1664                 /* if all is zero than MC and UC */
1665                 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1666                         type[0] = MLX4_FS_MC_SNIFFER;
1667                         type[1] = MLX4_FS_UC_SNIFFER;
1668                 } else {
1669                         u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1670                                             ib_spec->eth.mask.dst_mac[1],
1671                                             ib_spec->eth.mask.dst_mac[2],
1672                                             ib_spec->eth.mask.dst_mac[3],
1673                                             ib_spec->eth.mask.dst_mac[4],
1674                                             ib_spec->eth.mask.dst_mac[5]};
1675
1676                         /* Above xor was only on MC bit, non empty mask is valid
1677                          * only if this bit is set and rest are zero.
1678                          */
1679                         if (!is_zero_ether_addr(&mac[0]))
1680                                 return -EINVAL;
1681
1682                         if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1683                                 type[0] = MLX4_FS_MC_SNIFFER;
1684                         else
1685                                 type[0] = MLX4_FS_UC_SNIFFER;
1686                 }
1687         }
1688
1689         return err;
1690 }
1691
1692 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1693                                            struct ib_flow_attr *flow_attr,
1694                                            struct ib_udata *udata)
1695 {
1696         int err = 0, i = 0, j = 0;
1697         struct mlx4_ib_flow *mflow;
1698         enum mlx4_net_trans_promisc_mode type[2];
1699         struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1700         int is_bonded = mlx4_is_bonded(dev);
1701
1702         if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1703                 return ERR_PTR(-EINVAL);
1704
1705         if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1706                 return ERR_PTR(-EOPNOTSUPP);
1707
1708         if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1709             (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1710                 return ERR_PTR(-EOPNOTSUPP);
1711
1712         if (udata &&
1713             udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1714                 return ERR_PTR(-EOPNOTSUPP);
1715
1716         memset(type, 0, sizeof(type));
1717
1718         mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1719         if (!mflow) {
1720                 err = -ENOMEM;
1721                 goto err_free;
1722         }
1723
1724         switch (flow_attr->type) {
1725         case IB_FLOW_ATTR_NORMAL:
1726                 /* If dont trap flag (continue match) is set, under specific
1727                  * condition traffic be replicated to given qp,
1728                  * without stealing it
1729                  */
1730                 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1731                         err = mlx4_ib_add_dont_trap_rule(dev,
1732                                                          flow_attr,
1733                                                          type);
1734                         if (err)
1735                                 goto err_free;
1736                 } else {
1737                         type[0] = MLX4_FS_REGULAR;
1738                 }
1739                 break;
1740
1741         case IB_FLOW_ATTR_ALL_DEFAULT:
1742                 type[0] = MLX4_FS_ALL_DEFAULT;
1743                 break;
1744
1745         case IB_FLOW_ATTR_MC_DEFAULT:
1746                 type[0] = MLX4_FS_MC_DEFAULT;
1747                 break;
1748
1749         case IB_FLOW_ATTR_SNIFFER:
1750                 type[0] = MLX4_FS_MIRROR_RX_PORT;
1751                 type[1] = MLX4_FS_MIRROR_SX_PORT;
1752                 break;
1753
1754         default:
1755                 err = -EINVAL;
1756                 goto err_free;
1757         }
1758
1759         while (i < ARRAY_SIZE(type) && type[i]) {
1760                 err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS,
1761                                             type[i], &mflow->reg_id[i].id);
1762                 if (err)
1763                         goto err_create_flow;
1764                 if (is_bonded) {
1765                         /* Application always sees one port so the mirror rule
1766                          * must be on port #2
1767                          */
1768                         flow_attr->port = 2;
1769                         err = __mlx4_ib_create_flow(qp, flow_attr,
1770                                                     MLX4_DOMAIN_UVERBS, type[j],
1771                                                     &mflow->reg_id[j].mirror);
1772                         flow_attr->port = 1;
1773                         if (err)
1774                                 goto err_create_flow;
1775                         j++;
1776                 }
1777
1778                 i++;
1779         }
1780
1781         if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1782                 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1783                                                &mflow->reg_id[i].id);
1784                 if (err)
1785                         goto err_create_flow;
1786
1787                 if (is_bonded) {
1788                         flow_attr->port = 2;
1789                         err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1790                                                        &mflow->reg_id[j].mirror);
1791                         flow_attr->port = 1;
1792                         if (err)
1793                                 goto err_create_flow;
1794                         j++;
1795                 }
1796                 /* function to create mirror rule */
1797                 i++;
1798         }
1799
1800         return &mflow->ibflow;
1801
1802 err_create_flow:
1803         while (i) {
1804                 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1805                                              mflow->reg_id[i].id);
1806                 i--;
1807         }
1808
1809         while (j) {
1810                 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1811                                              mflow->reg_id[j].mirror);
1812                 j--;
1813         }
1814 err_free:
1815         kfree(mflow);
1816         return ERR_PTR(err);
1817 }
1818
1819 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1820 {
1821         int err, ret = 0;
1822         int i = 0;
1823         struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1824         struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1825
1826         while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1827                 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1828                 if (err)
1829                         ret = err;
1830                 if (mflow->reg_id[i].mirror) {
1831                         err = __mlx4_ib_destroy_flow(mdev->dev,
1832                                                      mflow->reg_id[i].mirror);
1833                         if (err)
1834                                 ret = err;
1835                 }
1836                 i++;
1837         }
1838
1839         kfree(mflow);
1840         return ret;
1841 }
1842
1843 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1844 {
1845         int err;
1846         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1847         struct mlx4_dev *dev = mdev->dev;
1848         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1849         struct mlx4_ib_steering *ib_steering = NULL;
1850         enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1851         struct mlx4_flow_reg_id reg_id;
1852
1853         if (mdev->dev->caps.steering_mode ==
1854             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1855                 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1856                 if (!ib_steering)
1857                         return -ENOMEM;
1858         }
1859
1860         err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1861                                     !!(mqp->flags &
1862                                        MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1863                                     prot, &reg_id.id);
1864         if (err) {
1865                 pr_err("multicast attach op failed, err %d\n", err);
1866                 goto err_malloc;
1867         }
1868
1869         reg_id.mirror = 0;
1870         if (mlx4_is_bonded(dev)) {
1871                 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1872                                             (mqp->port == 1) ? 2 : 1,
1873                                             !!(mqp->flags &
1874                                             MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1875                                             prot, &reg_id.mirror);
1876                 if (err)
1877                         goto err_add;
1878         }
1879
1880         err = add_gid_entry(ibqp, gid);
1881         if (err)
1882                 goto err_add;
1883
1884         if (ib_steering) {
1885                 memcpy(ib_steering->gid.raw, gid->raw, 16);
1886                 ib_steering->reg_id = reg_id;
1887                 mutex_lock(&mqp->mutex);
1888                 list_add(&ib_steering->list, &mqp->steering_rules);
1889                 mutex_unlock(&mqp->mutex);
1890         }
1891         return 0;
1892
1893 err_add:
1894         mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1895                               prot, reg_id.id);
1896         if (reg_id.mirror)
1897                 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1898                                       prot, reg_id.mirror);
1899 err_malloc:
1900         kfree(ib_steering);
1901
1902         return err;
1903 }
1904
1905 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1906 {
1907         struct mlx4_ib_gid_entry *ge;
1908         struct mlx4_ib_gid_entry *tmp;
1909         struct mlx4_ib_gid_entry *ret = NULL;
1910
1911         list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1912                 if (!memcmp(raw, ge->gid.raw, 16)) {
1913                         ret = ge;
1914                         break;
1915                 }
1916         }
1917
1918         return ret;
1919 }
1920
1921 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1922 {
1923         int err;
1924         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1925         struct mlx4_dev *dev = mdev->dev;
1926         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1927         struct net_device *ndev;
1928         struct mlx4_ib_gid_entry *ge;
1929         struct mlx4_flow_reg_id reg_id = {0, 0};
1930         enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
1931
1932         if (mdev->dev->caps.steering_mode ==
1933             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1934                 struct mlx4_ib_steering *ib_steering;
1935
1936                 mutex_lock(&mqp->mutex);
1937                 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1938                         if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1939                                 list_del(&ib_steering->list);
1940                                 break;
1941                         }
1942                 }
1943                 mutex_unlock(&mqp->mutex);
1944                 if (&ib_steering->list == &mqp->steering_rules) {
1945                         pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1946                         return -EINVAL;
1947                 }
1948                 reg_id = ib_steering->reg_id;
1949                 kfree(ib_steering);
1950         }
1951
1952         err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1953                                     prot, reg_id.id);
1954         if (err)
1955                 return err;
1956
1957         if (mlx4_is_bonded(dev)) {
1958                 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1959                                             prot, reg_id.mirror);
1960                 if (err)
1961                         return err;
1962         }
1963
1964         mutex_lock(&mqp->mutex);
1965         ge = find_gid_entry(mqp, gid->raw);
1966         if (ge) {
1967                 spin_lock_bh(&mdev->iboe.lock);
1968                 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1969                 if (ndev)
1970                         dev_hold(ndev);
1971                 spin_unlock_bh(&mdev->iboe.lock);
1972                 if (ndev)
1973                         dev_put(ndev);
1974                 list_del(&ge->list);
1975                 kfree(ge);
1976         } else
1977                 pr_warn("could not find mgid entry\n");
1978
1979         mutex_unlock(&mqp->mutex);
1980
1981         return 0;
1982 }
1983
1984 static int init_node_data(struct mlx4_ib_dev *dev)
1985 {
1986         struct ib_smp *in_mad  = NULL;
1987         struct ib_smp *out_mad = NULL;
1988         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1989         int err = -ENOMEM;
1990
1991         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1992         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1993         if (!in_mad || !out_mad)
1994                 goto out;
1995
1996         init_query_mad(in_mad);
1997         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1998         if (mlx4_is_master(dev->dev))
1999                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
2000
2001         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2002         if (err)
2003                 goto out;
2004
2005         memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
2006
2007         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2008
2009         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2010         if (err)
2011                 goto out;
2012
2013         dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2014         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2015
2016 out:
2017         kfree(in_mad);
2018         kfree(out_mad);
2019         return err;
2020 }
2021
2022 static ssize_t hca_type_show(struct device *device,
2023                              struct device_attribute *attr, char *buf)
2024 {
2025         struct mlx4_ib_dev *dev =
2026                 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2027         return sysfs_emit(buf, "MT%d\n", dev->dev->persist->pdev->device);
2028 }
2029 static DEVICE_ATTR_RO(hca_type);
2030
2031 static ssize_t hw_rev_show(struct device *device,
2032                            struct device_attribute *attr, char *buf)
2033 {
2034         struct mlx4_ib_dev *dev =
2035                 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2036         return sysfs_emit(buf, "%x\n", dev->dev->rev_id);
2037 }
2038 static DEVICE_ATTR_RO(hw_rev);
2039
2040 static ssize_t board_id_show(struct device *device,
2041                              struct device_attribute *attr, char *buf)
2042 {
2043         struct mlx4_ib_dev *dev =
2044                 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2045
2046         return sysfs_emit(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id);
2047 }
2048 static DEVICE_ATTR_RO(board_id);
2049
2050 static struct attribute *mlx4_class_attributes[] = {
2051         &dev_attr_hw_rev.attr,
2052         &dev_attr_hca_type.attr,
2053         &dev_attr_board_id.attr,
2054         NULL
2055 };
2056
2057 static const struct attribute_group mlx4_attr_group = {
2058         .attrs = mlx4_class_attributes,
2059 };
2060
2061 struct diag_counter {
2062         const char *name;
2063         u32 offset;
2064 };
2065
2066 #define DIAG_COUNTER(_name, _offset)                    \
2067         { .name = #_name, .offset = _offset }
2068
2069 static const struct diag_counter diag_basic[] = {
2070         DIAG_COUNTER(rq_num_lle, 0x00),
2071         DIAG_COUNTER(sq_num_lle, 0x04),
2072         DIAG_COUNTER(rq_num_lqpoe, 0x08),
2073         DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2074         DIAG_COUNTER(rq_num_lpe, 0x18),
2075         DIAG_COUNTER(sq_num_lpe, 0x1C),
2076         DIAG_COUNTER(rq_num_wrfe, 0x20),
2077         DIAG_COUNTER(sq_num_wrfe, 0x24),
2078         DIAG_COUNTER(sq_num_mwbe, 0x2C),
2079         DIAG_COUNTER(sq_num_bre, 0x34),
2080         DIAG_COUNTER(sq_num_rire, 0x44),
2081         DIAG_COUNTER(rq_num_rire, 0x48),
2082         DIAG_COUNTER(sq_num_rae, 0x4C),
2083         DIAG_COUNTER(rq_num_rae, 0x50),
2084         DIAG_COUNTER(sq_num_roe, 0x54),
2085         DIAG_COUNTER(sq_num_tree, 0x5C),
2086         DIAG_COUNTER(sq_num_rree, 0x64),
2087         DIAG_COUNTER(rq_num_rnr, 0x68),
2088         DIAG_COUNTER(sq_num_rnr, 0x6C),
2089         DIAG_COUNTER(rq_num_oos, 0x100),
2090         DIAG_COUNTER(sq_num_oos, 0x104),
2091 };
2092
2093 static const struct diag_counter diag_ext[] = {
2094         DIAG_COUNTER(rq_num_dup, 0x130),
2095         DIAG_COUNTER(sq_num_to, 0x134),
2096 };
2097
2098 static const struct diag_counter diag_device_only[] = {
2099         DIAG_COUNTER(num_cqovf, 0x1A0),
2100         DIAG_COUNTER(rq_num_udsdprd, 0x118),
2101 };
2102
2103 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2104                                                     u8 port_num)
2105 {
2106         struct mlx4_ib_dev *dev = to_mdev(ibdev);
2107         struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2108
2109         if (!diag[!!port_num].name)
2110                 return NULL;
2111
2112         return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2113                                           diag[!!port_num].num_counters,
2114                                           RDMA_HW_STATS_DEFAULT_LIFESPAN);
2115 }
2116
2117 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2118                                 struct rdma_hw_stats *stats,
2119                                 u8 port, int index)
2120 {
2121         struct mlx4_ib_dev *dev = to_mdev(ibdev);
2122         struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2123         u32 hw_value[ARRAY_SIZE(diag_device_only) +
2124                 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2125         int ret;
2126         int i;
2127
2128         ret = mlx4_query_diag_counters(dev->dev,
2129                                        MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2130                                        diag[!!port].offset, hw_value,
2131                                        diag[!!port].num_counters, port);
2132
2133         if (ret)
2134                 return ret;
2135
2136         for (i = 0; i < diag[!!port].num_counters; i++)
2137                 stats->value[i] = hw_value[i];
2138
2139         return diag[!!port].num_counters;
2140 }
2141
2142 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2143                                          const char ***name,
2144                                          u32 **offset,
2145                                          u32 *num,
2146                                          bool port)
2147 {
2148         u32 num_counters;
2149
2150         num_counters = ARRAY_SIZE(diag_basic);
2151
2152         if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2153                 num_counters += ARRAY_SIZE(diag_ext);
2154
2155         if (!port)
2156                 num_counters += ARRAY_SIZE(diag_device_only);
2157
2158         *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2159         if (!*name)
2160                 return -ENOMEM;
2161
2162         *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2163         if (!*offset)
2164                 goto err_name;
2165
2166         *num = num_counters;
2167
2168         return 0;
2169
2170 err_name:
2171         kfree(*name);
2172         return -ENOMEM;
2173 }
2174
2175 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2176                                        const char **name,
2177                                        u32 *offset,
2178                                        bool port)
2179 {
2180         int i;
2181         int j;
2182
2183         for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2184                 name[i] = diag_basic[i].name;
2185                 offset[i] = diag_basic[i].offset;
2186         }
2187
2188         if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2189                 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2190                         name[j] = diag_ext[i].name;
2191                         offset[j] = diag_ext[i].offset;
2192                 }
2193         }
2194
2195         if (!port) {
2196                 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2197                         name[j] = diag_device_only[i].name;
2198                         offset[j] = diag_device_only[i].offset;
2199                 }
2200         }
2201 }
2202
2203 static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2204         .alloc_hw_stats = mlx4_ib_alloc_hw_stats,
2205         .get_hw_stats = mlx4_ib_get_hw_stats,
2206 };
2207
2208 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2209 {
2210         struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2211         int i;
2212         int ret;
2213         bool per_port = !!(ibdev->dev->caps.flags2 &
2214                 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2215
2216         if (mlx4_is_slave(ibdev->dev))
2217                 return 0;
2218
2219         for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2220                 /* i == 1 means we are building port counters */
2221                 if (i && !per_port)
2222                         continue;
2223
2224                 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2225                                                     &diag[i].offset,
2226                                                     &diag[i].num_counters, i);
2227                 if (ret)
2228                         goto err_alloc;
2229
2230                 mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2231                                            diag[i].offset, i);
2232         }
2233
2234         ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
2235
2236         return 0;
2237
2238 err_alloc:
2239         if (i) {
2240                 kfree(diag[i - 1].name);
2241                 kfree(diag[i - 1].offset);
2242         }
2243
2244         return ret;
2245 }
2246
2247 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2248 {
2249         int i;
2250
2251         for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2252                 kfree(ibdev->diag_counters[i].offset);
2253                 kfree(ibdev->diag_counters[i].name);
2254         }
2255 }
2256
2257 #define MLX4_IB_INVALID_MAC     ((u64)-1)
2258 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2259                                struct net_device *dev,
2260                                int port)
2261 {
2262         u64 new_smac = 0;
2263         u64 release_mac = MLX4_IB_INVALID_MAC;
2264         struct mlx4_ib_qp *qp;
2265
2266         read_lock(&dev_base_lock);
2267         new_smac = mlx4_mac_to_u64(dev->dev_addr);
2268         read_unlock(&dev_base_lock);
2269
2270         atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2271
2272         /* no need for update QP1 and mac registration in non-SRIOV */
2273         if (!mlx4_is_mfunc(ibdev->dev))
2274                 return;
2275
2276         mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2277         qp = ibdev->qp1_proxy[port - 1];
2278         if (qp) {
2279                 int new_smac_index;
2280                 u64 old_smac;
2281                 struct mlx4_update_qp_params update_params;
2282
2283                 mutex_lock(&qp->mutex);
2284                 old_smac = qp->pri.smac;
2285                 if (new_smac == old_smac)
2286                         goto unlock;
2287
2288                 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2289
2290                 if (new_smac_index < 0)
2291                         goto unlock;
2292
2293                 update_params.smac_index = new_smac_index;
2294                 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2295                                    &update_params)) {
2296                         release_mac = new_smac;
2297                         goto unlock;
2298                 }
2299                 /* if old port was zero, no mac was yet registered for this QP */
2300                 if (qp->pri.smac_port)
2301                         release_mac = old_smac;
2302                 qp->pri.smac = new_smac;
2303                 qp->pri.smac_port = port;
2304                 qp->pri.smac_index = new_smac_index;
2305         }
2306
2307 unlock:
2308         if (release_mac != MLX4_IB_INVALID_MAC)
2309                 mlx4_unregister_mac(ibdev->dev, port, release_mac);
2310         if (qp)
2311                 mutex_unlock(&qp->mutex);
2312         mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2313 }
2314
2315 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2316                                  struct net_device *dev,
2317                                  unsigned long event)
2318
2319 {
2320         struct mlx4_ib_iboe *iboe;
2321         int update_qps_port = -1;
2322         int port;
2323
2324         ASSERT_RTNL();
2325
2326         iboe = &ibdev->iboe;
2327
2328         spin_lock_bh(&iboe->lock);
2329         mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2330
2331                 iboe->netdevs[port - 1] =
2332                         mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2333
2334                 if (dev == iboe->netdevs[port - 1] &&
2335                     (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2336                      event == NETDEV_UP || event == NETDEV_CHANGE))
2337                         update_qps_port = port;
2338
2339                 if (dev == iboe->netdevs[port - 1] &&
2340                     (event == NETDEV_UP || event == NETDEV_DOWN)) {
2341                         enum ib_port_state port_state;
2342                         struct ib_event ibev = { };
2343
2344                         if (ib_get_cached_port_state(&ibdev->ib_dev, port,
2345                                                      &port_state))
2346                                 continue;
2347
2348                         if (event == NETDEV_UP &&
2349                             (port_state != IB_PORT_ACTIVE ||
2350                              iboe->last_port_state[port - 1] != IB_PORT_DOWN))
2351                                 continue;
2352                         if (event == NETDEV_DOWN &&
2353                             (port_state != IB_PORT_DOWN ||
2354                              iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
2355                                 continue;
2356                         iboe->last_port_state[port - 1] = port_state;
2357
2358                         ibev.device = &ibdev->ib_dev;
2359                         ibev.element.port_num = port;
2360                         ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
2361                                                           IB_EVENT_PORT_ERR;
2362                         ib_dispatch_event(&ibev);
2363                 }
2364
2365         }
2366         spin_unlock_bh(&iboe->lock);
2367
2368         if (update_qps_port > 0)
2369                 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2370 }
2371
2372 static int mlx4_ib_netdev_event(struct notifier_block *this,
2373                                 unsigned long event, void *ptr)
2374 {
2375         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2376         struct mlx4_ib_dev *ibdev;
2377
2378         if (!net_eq(dev_net(dev), &init_net))
2379                 return NOTIFY_DONE;
2380
2381         ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2382         mlx4_ib_scan_netdevs(ibdev, dev, event);
2383
2384         return NOTIFY_DONE;
2385 }
2386
2387 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2388 {
2389         int port;
2390         int slave;
2391         int i;
2392
2393         if (mlx4_is_master(ibdev->dev)) {
2394                 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2395                      ++slave) {
2396                         for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2397                                 for (i = 0;
2398                                      i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2399                                      ++i) {
2400                                         ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2401                                         /* master has the identity virt2phys pkey mapping */
2402                                                 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2403                                                         ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2404                                         mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2405                                                              ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2406                                 }
2407                         }
2408                 }
2409                 /* initialize pkey cache */
2410                 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2411                         for (i = 0;
2412                              i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2413                              ++i)
2414                                 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2415                                         (i) ? 0 : 0xFFFF;
2416                 }
2417         }
2418 }
2419
2420 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2421 {
2422         int i, j, eq = 0, total_eqs = 0;
2423
2424         ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2425                                   sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2426         if (!ibdev->eq_table)
2427                 return;
2428
2429         for (i = 1; i <= dev->caps.num_ports; i++) {
2430                 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2431                      j++, total_eqs++) {
2432                         if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
2433                                 continue;
2434                         ibdev->eq_table[eq] = total_eqs;
2435                         if (!mlx4_assign_eq(dev, i,
2436                                             &ibdev->eq_table[eq]))
2437                                 eq++;
2438                         else
2439                                 ibdev->eq_table[eq] = -1;
2440                 }
2441         }
2442
2443         for (i = eq; i < dev->caps.num_comp_vectors;
2444              ibdev->eq_table[i++] = -1)
2445                 ;
2446
2447         /* Advertise the new number of EQs to clients */
2448         ibdev->ib_dev.num_comp_vectors = eq;
2449 }
2450
2451 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2452 {
2453         int i;
2454         int total_eqs = ibdev->ib_dev.num_comp_vectors;
2455
2456         /* no eqs were allocated */
2457         if (!ibdev->eq_table)
2458                 return;
2459
2460         /* Reset the advertised EQ number */
2461         ibdev->ib_dev.num_comp_vectors = 0;
2462
2463         for (i = 0; i < total_eqs; i++)
2464                 mlx4_release_eq(dev, ibdev->eq_table[i]);
2465
2466         kfree(ibdev->eq_table);
2467         ibdev->eq_table = NULL;
2468 }
2469
2470 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2471                                struct ib_port_immutable *immutable)
2472 {
2473         struct ib_port_attr attr;
2474         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2475         int err;
2476
2477         if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2478                 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2479                 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2480         } else {
2481                 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2482                         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2483                 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2484                         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2485                                 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2486                 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2487                 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2488                     RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2489                         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2490         }
2491
2492         err = ib_query_port(ibdev, port_num, &attr);
2493         if (err)
2494                 return err;
2495
2496         immutable->pkey_tbl_len = attr.pkey_tbl_len;
2497         immutable->gid_tbl_len = attr.gid_tbl_len;
2498
2499         return 0;
2500 }
2501
2502 static void get_fw_ver_str(struct ib_device *device, char *str)
2503 {
2504         struct mlx4_ib_dev *dev =
2505                 container_of(device, struct mlx4_ib_dev, ib_dev);
2506         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
2507                  (int) (dev->dev->caps.fw_ver >> 32),
2508                  (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2509                  (int) dev->dev->caps.fw_ver & 0xffff);
2510 }
2511
2512 static const struct ib_device_ops mlx4_ib_dev_ops = {
2513         .owner = THIS_MODULE,
2514         .driver_id = RDMA_DRIVER_MLX4,
2515         .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
2516
2517         .add_gid = mlx4_ib_add_gid,
2518         .alloc_mr = mlx4_ib_alloc_mr,
2519         .alloc_pd = mlx4_ib_alloc_pd,
2520         .alloc_ucontext = mlx4_ib_alloc_ucontext,
2521         .attach_mcast = mlx4_ib_mcg_attach,
2522         .create_ah = mlx4_ib_create_ah,
2523         .create_cq = mlx4_ib_create_cq,
2524         .create_qp = mlx4_ib_create_qp,
2525         .create_srq = mlx4_ib_create_srq,
2526         .dealloc_pd = mlx4_ib_dealloc_pd,
2527         .dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2528         .del_gid = mlx4_ib_del_gid,
2529         .dereg_mr = mlx4_ib_dereg_mr,
2530         .destroy_ah = mlx4_ib_destroy_ah,
2531         .destroy_cq = mlx4_ib_destroy_cq,
2532         .destroy_qp = mlx4_ib_destroy_qp,
2533         .destroy_srq = mlx4_ib_destroy_srq,
2534         .detach_mcast = mlx4_ib_mcg_detach,
2535         .disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2536         .drain_rq = mlx4_ib_drain_rq,
2537         .drain_sq = mlx4_ib_drain_sq,
2538         .get_dev_fw_str = get_fw_ver_str,
2539         .get_dma_mr = mlx4_ib_get_dma_mr,
2540         .get_link_layer = mlx4_ib_port_link_layer,
2541         .get_netdev = mlx4_ib_get_netdev,
2542         .get_port_immutable = mlx4_port_immutable,
2543         .map_mr_sg = mlx4_ib_map_mr_sg,
2544         .mmap = mlx4_ib_mmap,
2545         .modify_cq = mlx4_ib_modify_cq,
2546         .modify_device = mlx4_ib_modify_device,
2547         .modify_port = mlx4_ib_modify_port,
2548         .modify_qp = mlx4_ib_modify_qp,
2549         .modify_srq = mlx4_ib_modify_srq,
2550         .poll_cq = mlx4_ib_poll_cq,
2551         .post_recv = mlx4_ib_post_recv,
2552         .post_send = mlx4_ib_post_send,
2553         .post_srq_recv = mlx4_ib_post_srq_recv,
2554         .process_mad = mlx4_ib_process_mad,
2555         .query_ah = mlx4_ib_query_ah,
2556         .query_device = mlx4_ib_query_device,
2557         .query_gid = mlx4_ib_query_gid,
2558         .query_pkey = mlx4_ib_query_pkey,
2559         .query_port = mlx4_ib_query_port,
2560         .query_qp = mlx4_ib_query_qp,
2561         .query_srq = mlx4_ib_query_srq,
2562         .reg_user_mr = mlx4_ib_reg_user_mr,
2563         .req_notify_cq = mlx4_ib_arm_cq,
2564         .rereg_user_mr = mlx4_ib_rereg_user_mr,
2565         .resize_cq = mlx4_ib_resize_cq,
2566
2567         INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
2568         INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
2569         INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
2570         INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
2571         INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
2572 };
2573
2574 static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2575         .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2576         .create_wq = mlx4_ib_create_wq,
2577         .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2578         .destroy_wq = mlx4_ib_destroy_wq,
2579         .modify_wq = mlx4_ib_modify_wq,
2580
2581         INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
2582                            ib_rwq_ind_tbl),
2583 };
2584
2585 static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2586         .alloc_mw = mlx4_ib_alloc_mw,
2587         .dealloc_mw = mlx4_ib_dealloc_mw,
2588
2589         INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
2590 };
2591
2592 static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2593         .alloc_xrcd = mlx4_ib_alloc_xrcd,
2594         .dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2595
2596         INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
2597 };
2598
2599 static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2600         .create_flow = mlx4_ib_create_flow,
2601         .destroy_flow = mlx4_ib_destroy_flow,
2602 };
2603
2604 static void *mlx4_ib_add(struct mlx4_dev *dev)
2605 {
2606         struct mlx4_ib_dev *ibdev;
2607         int num_ports = 0;
2608         int i, j;
2609         int err;
2610         struct mlx4_ib_iboe *iboe;
2611         int ib_num_ports = 0;
2612         int num_req_counters;
2613         int allocated;
2614         u32 counter_index;
2615         struct counter_index *new_counter_index = NULL;
2616
2617         pr_info_once("%s", mlx4_ib_version);
2618
2619         num_ports = 0;
2620         mlx4_foreach_ib_transport_port(i, dev)
2621                 num_ports++;
2622
2623         /* No point in registering a device with no ports... */
2624         if (num_ports == 0)
2625                 return NULL;
2626
2627         ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
2628         if (!ibdev) {
2629                 dev_err(&dev->persist->pdev->dev,
2630                         "Device struct alloc failed\n");
2631                 return NULL;
2632         }
2633
2634         iboe = &ibdev->iboe;
2635
2636         if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2637                 goto err_dealloc;
2638
2639         if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2640                 goto err_pd;
2641
2642         ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2643                                  PAGE_SIZE);
2644         if (!ibdev->uar_map)
2645                 goto err_uar;
2646         MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2647
2648         ibdev->dev = dev;
2649         ibdev->bond_next_port   = 0;
2650
2651         ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
2652         ibdev->ib_dev.local_dma_lkey    = dev->caps.reserved_lkey;
2653         ibdev->num_ports                = num_ports;
2654         ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
2655                                                 1 : ibdev->num_ports;
2656         ibdev->ib_dev.num_comp_vectors  = dev->caps.num_comp_vectors;
2657         ibdev->ib_dev.dev.parent        = &dev->persist->pdev->dev;
2658
2659         ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
2660
2661         if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2662             ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2663             IB_LINK_LAYER_ETHERNET) ||
2664             (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2665             IB_LINK_LAYER_ETHERNET)))
2666                 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
2667
2668         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2669             dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2670                 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
2671
2672         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2673                 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
2674         }
2675
2676         if (check_flow_steering_support(dev)) {
2677                 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2678                 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
2679         }
2680
2681         if (!dev->caps.userspace_caps)
2682                 ibdev->ib_dev.ops.uverbs_abi_ver =
2683                         MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2684
2685         mlx4_ib_alloc_eqs(dev, ibdev);
2686
2687         spin_lock_init(&iboe->lock);
2688
2689         if (init_node_data(ibdev))
2690                 goto err_map;
2691         mlx4_init_sl2vl_tbl(ibdev);
2692
2693         for (i = 0; i < ibdev->num_ports; ++i) {
2694                 mutex_init(&ibdev->counters_table[i].mutex);
2695                 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2696                 iboe->last_port_state[i] = IB_PORT_DOWN;
2697         }
2698
2699         num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2700         for (i = 0; i < num_req_counters; ++i) {
2701                 mutex_init(&ibdev->qp1_proxy_lock[i]);
2702                 allocated = 0;
2703                 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2704                                                 IB_LINK_LAYER_ETHERNET) {
2705                         err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2706                                                  MLX4_RES_USAGE_DRIVER);
2707                         /* if failed to allocate a new counter, use default */
2708                         if (err)
2709                                 counter_index =
2710                                         mlx4_get_default_counter_index(dev,
2711                                                                        i + 1);
2712                         else
2713                                 allocated = 1;
2714                 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2715                         counter_index = mlx4_get_default_counter_index(dev,
2716                                                                        i + 1);
2717                 }
2718                 new_counter_index = kmalloc(sizeof(*new_counter_index),
2719                                             GFP_KERNEL);
2720                 if (!new_counter_index) {
2721                         if (allocated)
2722                                 mlx4_counter_free(ibdev->dev, counter_index);
2723                         goto err_counter;
2724                 }
2725                 new_counter_index->index = counter_index;
2726                 new_counter_index->allocated = allocated;
2727                 list_add_tail(&new_counter_index->list,
2728                               &ibdev->counters_table[i].counters_list);
2729                 ibdev->counters_table[i].default_counter = counter_index;
2730                 pr_info("counter index %d for port %d allocated %d\n",
2731                         counter_index, i + 1, allocated);
2732         }
2733         if (mlx4_is_bonded(dev))
2734                 for (i = 1; i < ibdev->num_ports ; ++i) {
2735                         new_counter_index =
2736                                         kmalloc(sizeof(struct counter_index),
2737                                                 GFP_KERNEL);
2738                         if (!new_counter_index)
2739                                 goto err_counter;
2740                         new_counter_index->index = counter_index;
2741                         new_counter_index->allocated = 0;
2742                         list_add_tail(&new_counter_index->list,
2743                                       &ibdev->counters_table[i].counters_list);
2744                         ibdev->counters_table[i].default_counter =
2745                                                                 counter_index;
2746                 }
2747
2748         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2749                 ib_num_ports++;
2750
2751         spin_lock_init(&ibdev->sm_lock);
2752         mutex_init(&ibdev->cap_mask_mutex);
2753         INIT_LIST_HEAD(&ibdev->qp_list);
2754         spin_lock_init(&ibdev->reset_flow_resource_lock);
2755
2756         if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2757             ib_num_ports) {
2758                 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2759                 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2760                                             MLX4_IB_UC_STEER_QPN_ALIGN,
2761                                             &ibdev->steer_qpn_base, 0,
2762                                             MLX4_RES_USAGE_DRIVER);
2763                 if (err)
2764                         goto err_counter;
2765
2766                 ibdev->ib_uc_qpns_bitmap =
2767                         kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
2768                                       sizeof(long),
2769                                       GFP_KERNEL);
2770                 if (!ibdev->ib_uc_qpns_bitmap)
2771                         goto err_steer_qp_release;
2772
2773                 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2774                         bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2775                                     ibdev->steer_qpn_count);
2776                         err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2777                                         dev, ibdev->steer_qpn_base,
2778                                         ibdev->steer_qpn_base +
2779                                         ibdev->steer_qpn_count - 1);
2780                         if (err)
2781                                 goto err_steer_free_bitmap;
2782                 } else {
2783                         bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2784                                     ibdev->steer_qpn_count);
2785                 }
2786         }
2787
2788         for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2789                 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2790
2791         if (mlx4_ib_alloc_diag_counters(ibdev))
2792                 goto err_steer_free_bitmap;
2793
2794         rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
2795         if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
2796                                &dev->persist->pdev->dev))
2797                 goto err_diag_counters;
2798
2799         if (mlx4_ib_mad_init(ibdev))
2800                 goto err_reg;
2801
2802         if (mlx4_ib_init_sriov(ibdev))
2803                 goto err_mad;
2804
2805         if (!iboe->nb.notifier_call) {
2806                 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2807                 err = register_netdevice_notifier(&iboe->nb);
2808                 if (err) {
2809                         iboe->nb.notifier_call = NULL;
2810                         goto err_notif;
2811                 }
2812         }
2813         if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2814                 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2815                 if (err)
2816                         goto err_notif;
2817         }
2818
2819         ibdev->ib_active = true;
2820         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2821                 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2822                                          &ibdev->ib_dev);
2823
2824         if (mlx4_is_mfunc(ibdev->dev))
2825                 init_pkeys(ibdev);
2826
2827         /* create paravirt contexts for any VFs which are active */
2828         if (mlx4_is_master(ibdev->dev)) {
2829                 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2830                         if (j == mlx4_master_func_num(ibdev->dev))
2831                                 continue;
2832                         if (mlx4_is_slave_active(ibdev->dev, j))
2833                                 do_slave_init(ibdev, j, 1);
2834                 }
2835         }
2836         return ibdev;
2837
2838 err_notif:
2839         if (ibdev->iboe.nb.notifier_call) {
2840                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2841                         pr_warn("failure unregistering notifier\n");
2842                 ibdev->iboe.nb.notifier_call = NULL;
2843         }
2844         flush_workqueue(wq);
2845
2846         mlx4_ib_close_sriov(ibdev);
2847
2848 err_mad:
2849         mlx4_ib_mad_cleanup(ibdev);
2850
2851 err_reg:
2852         ib_unregister_device(&ibdev->ib_dev);
2853
2854 err_diag_counters:
2855         mlx4_ib_diag_cleanup(ibdev);
2856
2857 err_steer_free_bitmap:
2858         kfree(ibdev->ib_uc_qpns_bitmap);
2859
2860 err_steer_qp_release:
2861         mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2862                               ibdev->steer_qpn_count);
2863 err_counter:
2864         for (i = 0; i < ibdev->num_ports; ++i)
2865                 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2866
2867 err_map:
2868         mlx4_ib_free_eqs(dev, ibdev);
2869         iounmap(ibdev->uar_map);
2870
2871 err_uar:
2872         mlx4_uar_free(dev, &ibdev->priv_uar);
2873
2874 err_pd:
2875         mlx4_pd_free(dev, ibdev->priv_pdn);
2876
2877 err_dealloc:
2878         ib_dealloc_device(&ibdev->ib_dev);
2879
2880         return NULL;
2881 }
2882
2883 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2884 {
2885         int offset;
2886
2887         WARN_ON(!dev->ib_uc_qpns_bitmap);
2888
2889         offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2890                                          dev->steer_qpn_count,
2891                                          get_count_order(count));
2892         if (offset < 0)
2893                 return offset;
2894
2895         *qpn = dev->steer_qpn_base + offset;
2896         return 0;
2897 }
2898
2899 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2900 {
2901         if (!qpn ||
2902             dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2903                 return;
2904
2905         if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
2906                  qpn, dev->steer_qpn_base))
2907                 /* not supposed to be here */
2908                 return;
2909
2910         bitmap_release_region(dev->ib_uc_qpns_bitmap,
2911                               qpn - dev->steer_qpn_base,
2912                               get_count_order(count));
2913 }
2914
2915 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2916                          int is_attach)
2917 {
2918         int err;
2919         size_t flow_size;
2920         struct ib_flow_attr *flow = NULL;
2921         struct ib_flow_spec_ib *ib_spec;
2922
2923         if (is_attach) {
2924                 flow_size = sizeof(struct ib_flow_attr) +
2925                             sizeof(struct ib_flow_spec_ib);
2926                 flow = kzalloc(flow_size, GFP_KERNEL);
2927                 if (!flow)
2928                         return -ENOMEM;
2929                 flow->port = mqp->port;
2930                 flow->num_of_specs = 1;
2931                 flow->size = flow_size;
2932                 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2933                 ib_spec->type = IB_FLOW_SPEC_IB;
2934                 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2935                 /* Add an empty rule for IB L2 */
2936                 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2937
2938                 err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
2939                                             MLX4_FS_REGULAR, &mqp->reg_id);
2940         } else {
2941                 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2942         }
2943         kfree(flow);
2944         return err;
2945 }
2946
2947 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2948 {
2949         struct mlx4_ib_dev *ibdev = ibdev_ptr;
2950         int p;
2951         int i;
2952
2953         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2954                 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
2955         ibdev->ib_active = false;
2956         flush_workqueue(wq);
2957
2958         if (ibdev->iboe.nb.notifier_call) {
2959                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2960                         pr_warn("failure unregistering notifier\n");
2961                 ibdev->iboe.nb.notifier_call = NULL;
2962         }
2963
2964         mlx4_ib_close_sriov(ibdev);
2965         mlx4_ib_mad_cleanup(ibdev);
2966         ib_unregister_device(&ibdev->ib_dev);
2967         mlx4_ib_diag_cleanup(ibdev);
2968
2969         mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2970                               ibdev->steer_qpn_count);
2971         kfree(ibdev->ib_uc_qpns_bitmap);
2972
2973         iounmap(ibdev->uar_map);
2974         for (p = 0; p < ibdev->num_ports; ++p)
2975                 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
2976
2977         mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2978                 mlx4_CLOSE_PORT(dev, p);
2979
2980         mlx4_ib_free_eqs(dev, ibdev);
2981
2982         mlx4_uar_free(dev, &ibdev->priv_uar);
2983         mlx4_pd_free(dev, ibdev->priv_pdn);
2984         ib_dealloc_device(&ibdev->ib_dev);
2985 }
2986
2987 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2988 {
2989         struct mlx4_ib_demux_work **dm = NULL;
2990         struct mlx4_dev *dev = ibdev->dev;
2991         int i;
2992         unsigned long flags;
2993         struct mlx4_active_ports actv_ports;
2994         unsigned int ports;
2995         unsigned int first_port;
2996
2997         if (!mlx4_is_master(dev))
2998                 return;
2999
3000         actv_ports = mlx4_get_active_ports(dev, slave);
3001         ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
3002         first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
3003
3004         dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
3005         if (!dm)
3006                 return;
3007
3008         for (i = 0; i < ports; i++) {
3009                 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3010                 if (!dm[i]) {
3011                         while (--i >= 0)
3012                                 kfree(dm[i]);
3013                         goto out;
3014                 }
3015                 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
3016                 dm[i]->port = first_port + i + 1;
3017                 dm[i]->slave = slave;
3018                 dm[i]->do_init = do_init;
3019                 dm[i]->dev = ibdev;
3020         }
3021         /* initialize or tear down tunnel QPs for the slave */
3022         spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3023         if (!ibdev->sriov.is_going_down) {
3024                 for (i = 0; i < ports; i++)
3025                         queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3026                 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3027         } else {
3028                 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3029                 for (i = 0; i < ports; i++)
3030                         kfree(dm[i]);
3031         }
3032 out:
3033         kfree(dm);
3034         return;
3035 }
3036
3037 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3038 {
3039         struct mlx4_ib_qp *mqp;
3040         unsigned long flags_qp;
3041         unsigned long flags_cq;
3042         struct mlx4_ib_cq *send_mcq, *recv_mcq;
3043         struct list_head    cq_notify_list;
3044         struct mlx4_cq *mcq;
3045         unsigned long flags;
3046
3047         pr_warn("mlx4_ib_handle_catas_error was started\n");
3048         INIT_LIST_HEAD(&cq_notify_list);
3049
3050         /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3051         spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3052
3053         list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3054                 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3055                 if (mqp->sq.tail != mqp->sq.head) {
3056                         send_mcq = to_mcq(mqp->ibqp.send_cq);
3057                         spin_lock_irqsave(&send_mcq->lock, flags_cq);
3058                         if (send_mcq->mcq.comp &&
3059                             mqp->ibqp.send_cq->comp_handler) {
3060                                 if (!send_mcq->mcq.reset_notify_added) {
3061                                         send_mcq->mcq.reset_notify_added = 1;
3062                                         list_add_tail(&send_mcq->mcq.reset_notify,
3063                                                       &cq_notify_list);
3064                                 }
3065                         }
3066                         spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3067                 }
3068                 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3069                 /* Now, handle the QP's receive queue */
3070                 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3071                 /* no handling is needed for SRQ */
3072                 if (!mqp->ibqp.srq) {
3073                         if (mqp->rq.tail != mqp->rq.head) {
3074                                 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3075                                 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3076                                 if (recv_mcq->mcq.comp &&
3077                                     mqp->ibqp.recv_cq->comp_handler) {
3078                                         if (!recv_mcq->mcq.reset_notify_added) {
3079                                                 recv_mcq->mcq.reset_notify_added = 1;
3080                                                 list_add_tail(&recv_mcq->mcq.reset_notify,
3081                                                               &cq_notify_list);
3082                                         }
3083                                 }
3084                                 spin_unlock_irqrestore(&recv_mcq->lock,
3085                                                        flags_cq);
3086                         }
3087                 }
3088                 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3089         }
3090
3091         list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3092                 mcq->comp(mcq);
3093         }
3094         spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3095         pr_warn("mlx4_ib_handle_catas_error ended\n");
3096 }
3097
3098 static void handle_bonded_port_state_event(struct work_struct *work)
3099 {
3100         struct ib_event_work *ew =
3101                 container_of(work, struct ib_event_work, work);
3102         struct mlx4_ib_dev *ibdev = ew->ib_dev;
3103         enum ib_port_state bonded_port_state = IB_PORT_NOP;
3104         int i;
3105         struct ib_event ibev;
3106
3107         kfree(ew);
3108         spin_lock_bh(&ibdev->iboe.lock);
3109         for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3110                 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3111                 enum ib_port_state curr_port_state;
3112
3113                 if (!curr_netdev)
3114                         continue;
3115
3116                 curr_port_state =
3117                         (netif_running(curr_netdev) &&
3118                          netif_carrier_ok(curr_netdev)) ?
3119                         IB_PORT_ACTIVE : IB_PORT_DOWN;
3120
3121                 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3122                         curr_port_state : IB_PORT_ACTIVE;
3123         }
3124         spin_unlock_bh(&ibdev->iboe.lock);
3125
3126         ibev.device = &ibdev->ib_dev;
3127         ibev.element.port_num = 1;
3128         ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3129                 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3130
3131         ib_dispatch_event(&ibev);
3132 }
3133
3134 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3135 {
3136         u64 sl2vl;
3137         int err;
3138
3139         err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3140         if (err) {
3141                 pr_err("Unable to get current sl to vl mapping for port %d.  Using all zeroes (%d)\n",
3142                        port, err);
3143                 sl2vl = 0;
3144         }
3145         atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3146 }
3147
3148 static void ib_sl2vl_update_work(struct work_struct *work)
3149 {
3150         struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3151         struct mlx4_ib_dev *mdev = ew->ib_dev;
3152         int port = ew->port;
3153
3154         mlx4_ib_sl2vl_update(mdev, port);
3155
3156         kfree(ew);
3157 }
3158
3159 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3160                                      int port)
3161 {
3162         struct ib_event_work *ew;
3163
3164         ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3165         if (ew) {
3166                 INIT_WORK(&ew->work, ib_sl2vl_update_work);
3167                 ew->port = port;
3168                 ew->ib_dev = ibdev;
3169                 queue_work(wq, &ew->work);
3170         }
3171 }
3172
3173 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
3174                           enum mlx4_dev_event event, unsigned long param)
3175 {
3176         struct ib_event ibev;
3177         struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3178         struct mlx4_eqe *eqe = NULL;
3179         struct ib_event_work *ew;
3180         int p = 0;
3181
3182         if (mlx4_is_bonded(dev) &&
3183             ((event == MLX4_DEV_EVENT_PORT_UP) ||
3184             (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3185                 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3186                 if (!ew)
3187                         return;
3188                 INIT_WORK(&ew->work, handle_bonded_port_state_event);
3189                 ew->ib_dev = ibdev;
3190                 queue_work(wq, &ew->work);
3191                 return;
3192         }
3193
3194         if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3195                 eqe = (struct mlx4_eqe *)param;
3196         else
3197                 p = (int) param;
3198
3199         switch (event) {
3200         case MLX4_DEV_EVENT_PORT_UP:
3201                 if (p > ibdev->num_ports)
3202                         return;
3203                 if (!mlx4_is_slave(dev) &&
3204                     rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3205                         IB_LINK_LAYER_INFINIBAND) {
3206                         if (mlx4_is_master(dev))
3207                                 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3208                         if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3209                             !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3210                                 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3211                 }
3212                 ibev.event = IB_EVENT_PORT_ACTIVE;
3213                 break;
3214
3215         case MLX4_DEV_EVENT_PORT_DOWN:
3216                 if (p > ibdev->num_ports)
3217                         return;
3218                 ibev.event = IB_EVENT_PORT_ERR;
3219                 break;
3220
3221         case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3222                 ibdev->ib_active = false;
3223                 ibev.event = IB_EVENT_DEVICE_FATAL;
3224                 mlx4_ib_handle_catas_error(ibdev);
3225                 break;
3226
3227         case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3228                 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
3229                 if (!ew)
3230                         break;
3231
3232                 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3233                 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3234                 ew->ib_dev = ibdev;
3235                 /* need to queue only for port owner, which uses GEN_EQE */
3236                 if (mlx4_is_master(dev))
3237                         queue_work(wq, &ew->work);
3238                 else
3239                         handle_port_mgmt_change_event(&ew->work);
3240                 return;
3241
3242         case MLX4_DEV_EVENT_SLAVE_INIT:
3243                 /* here, p is the slave id */
3244                 do_slave_init(ibdev, p, 1);
3245                 if (mlx4_is_master(dev)) {
3246                         int i;
3247
3248                         for (i = 1; i <= ibdev->num_ports; i++) {
3249                                 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3250                                         == IB_LINK_LAYER_INFINIBAND)
3251                                         mlx4_ib_slave_alias_guid_event(ibdev,
3252                                                                        p, i,
3253                                                                        1);
3254                         }
3255                 }
3256                 return;
3257
3258         case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3259                 if (mlx4_is_master(dev)) {
3260                         int i;
3261
3262                         for (i = 1; i <= ibdev->num_ports; i++) {
3263                                 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3264                                         == IB_LINK_LAYER_INFINIBAND)
3265                                         mlx4_ib_slave_alias_guid_event(ibdev,
3266                                                                        p, i,
3267                                                                        0);
3268                         }
3269                 }
3270                 /* here, p is the slave id */
3271                 do_slave_init(ibdev, p, 0);
3272                 return;
3273
3274         default:
3275                 return;
3276         }
3277
3278         ibev.device           = ibdev_ptr;
3279         ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3280
3281         ib_dispatch_event(&ibev);
3282 }
3283
3284 static struct mlx4_interface mlx4_ib_interface = {
3285         .add            = mlx4_ib_add,
3286         .remove         = mlx4_ib_remove,
3287         .event          = mlx4_ib_event,
3288         .protocol       = MLX4_PROT_IB_IPV6,
3289         .flags          = MLX4_INTFF_BONDING
3290 };
3291
3292 static int __init mlx4_ib_init(void)
3293 {
3294         int err;
3295
3296         wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
3297         if (!wq)
3298                 return -ENOMEM;
3299
3300         err = mlx4_ib_mcg_init();
3301         if (err)
3302                 goto clean_wq;
3303
3304         err = mlx4_register_interface(&mlx4_ib_interface);
3305         if (err)
3306                 goto clean_mcg;
3307
3308         return 0;
3309
3310 clean_mcg:
3311         mlx4_ib_mcg_destroy();
3312
3313 clean_wq:
3314         destroy_workqueue(wq);
3315         return err;
3316 }
3317
3318 static void __exit mlx4_ib_cleanup(void)
3319 {
3320         mlx4_unregister_interface(&mlx4_ib_interface);
3321         mlx4_ib_mcg_destroy();
3322         destroy_workqueue(wq);
3323 }
3324
3325 module_init(mlx4_ib_init);
3326 module_exit(mlx4_ib_cleanup);