Merge remote-tracking branch 'torvalds/master' into perf/core
[linux-2.6-microblaze.git] / drivers / infiniband / hw / mlx4 / main.c
1 /*
2  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
42 #include <linux/sched/mm.h>
43 #include <linux/sched/task.h>
44
45 #include <net/ipv6.h>
46 #include <net/addrconf.h>
47 #include <net/devlink.h>
48
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_cache.h>
53
54 #include <net/bonding.h>
55
56 #include <linux/mlx4/driver.h>
57 #include <linux/mlx4/cmd.h>
58 #include <linux/mlx4/qp.h>
59
60 #include "mlx4_ib.h"
61 #include <rdma/mlx4-abi.h>
62
63 #define DRV_NAME        MLX4_IB_DRV_NAME
64 #define DRV_VERSION     "4.0-0"
65
66 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
67 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
68 #define MLX4_IB_CARD_REV_A0   0xA0
69
70 MODULE_AUTHOR("Roland Dreier");
71 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
72 MODULE_LICENSE("Dual BSD/GPL");
73
74 int mlx4_ib_sm_guid_assign = 0;
75 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
76 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)");
77
78 static const char mlx4_ib_version[] =
79         DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
80         DRV_VERSION "\n";
81
82 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
83 static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
84                                                     u32 port_num);
85
86 static struct workqueue_struct *wq;
87
88 static void init_query_mad(struct ib_smp *mad)
89 {
90         mad->base_version  = 1;
91         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
92         mad->class_version = 1;
93         mad->method        = IB_MGMT_METHOD_GET;
94 }
95
96 static int check_flow_steering_support(struct mlx4_dev *dev)
97 {
98         int eth_num_ports = 0;
99         int ib_num_ports = 0;
100
101         int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
102
103         if (dmfs) {
104                 int i;
105                 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
106                         eth_num_ports++;
107                 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
108                         ib_num_ports++;
109                 dmfs &= (!ib_num_ports ||
110                          (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
111                         (!eth_num_ports ||
112                          (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
113                 if (ib_num_ports && mlx4_is_mfunc(dev)) {
114                         pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
115                         dmfs = 0;
116                 }
117         }
118         return dmfs;
119 }
120
121 static int num_ib_ports(struct mlx4_dev *dev)
122 {
123         int ib_ports = 0;
124         int i;
125
126         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
127                 ib_ports++;
128
129         return ib_ports;
130 }
131
132 static struct net_device *mlx4_ib_get_netdev(struct ib_device *device,
133                                              u32 port_num)
134 {
135         struct mlx4_ib_dev *ibdev = to_mdev(device);
136         struct net_device *dev;
137
138         rcu_read_lock();
139         dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
140
141         if (dev) {
142                 if (mlx4_is_bonded(ibdev->dev)) {
143                         struct net_device *upper = NULL;
144
145                         upper = netdev_master_upper_dev_get_rcu(dev);
146                         if (upper) {
147                                 struct net_device *active;
148
149                                 active = bond_option_active_slave_get_rcu(netdev_priv(upper));
150                                 if (active)
151                                         dev = active;
152                         }
153                 }
154         }
155         if (dev)
156                 dev_hold(dev);
157
158         rcu_read_unlock();
159         return dev;
160 }
161
162 static int mlx4_ib_update_gids_v1(struct gid_entry *gids,
163                                   struct mlx4_ib_dev *ibdev,
164                                   u32 port_num)
165 {
166         struct mlx4_cmd_mailbox *mailbox;
167         int err;
168         struct mlx4_dev *dev = ibdev->dev;
169         int i;
170         union ib_gid *gid_tbl;
171
172         mailbox = mlx4_alloc_cmd_mailbox(dev);
173         if (IS_ERR(mailbox))
174                 return -ENOMEM;
175
176         gid_tbl = mailbox->buf;
177
178         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
179                 memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid));
180
181         err = mlx4_cmd(dev, mailbox->dma,
182                        MLX4_SET_PORT_GID_TABLE << 8 | port_num,
183                        1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
184                        MLX4_CMD_WRAPPED);
185         if (mlx4_is_bonded(dev))
186                 err += mlx4_cmd(dev, mailbox->dma,
187                                 MLX4_SET_PORT_GID_TABLE << 8 | 2,
188                                 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
189                                 MLX4_CMD_WRAPPED);
190
191         mlx4_free_cmd_mailbox(dev, mailbox);
192         return err;
193 }
194
195 static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids,
196                                      struct mlx4_ib_dev *ibdev,
197                                      u32 port_num)
198 {
199         struct mlx4_cmd_mailbox *mailbox;
200         int err;
201         struct mlx4_dev *dev = ibdev->dev;
202         int i;
203         struct {
204                 union ib_gid    gid;
205                 __be32          rsrvd1[2];
206                 __be16          rsrvd2;
207                 u8              type;
208                 u8              version;
209                 __be32          rsrvd3;
210         } *gid_tbl;
211
212         mailbox = mlx4_alloc_cmd_mailbox(dev);
213         if (IS_ERR(mailbox))
214                 return -ENOMEM;
215
216         gid_tbl = mailbox->buf;
217         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
218                 memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid));
219                 if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
220                         gid_tbl[i].version = 2;
221                         if (!ipv6_addr_v4mapped((struct in6_addr *)&gids[i].gid))
222                                 gid_tbl[i].type = 1;
223                 }
224         }
225
226         err = mlx4_cmd(dev, mailbox->dma,
227                        MLX4_SET_PORT_ROCE_ADDR << 8 | port_num,
228                        1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
229                        MLX4_CMD_WRAPPED);
230         if (mlx4_is_bonded(dev))
231                 err += mlx4_cmd(dev, mailbox->dma,
232                                 MLX4_SET_PORT_ROCE_ADDR << 8 | 2,
233                                 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
234                                 MLX4_CMD_WRAPPED);
235
236         mlx4_free_cmd_mailbox(dev, mailbox);
237         return err;
238 }
239
240 static int mlx4_ib_update_gids(struct gid_entry *gids,
241                                struct mlx4_ib_dev *ibdev,
242                                u32 port_num)
243 {
244         if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
245                 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
246
247         return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
248 }
249
250 static void free_gid_entry(struct gid_entry *entry)
251 {
252         memset(&entry->gid, 0, sizeof(entry->gid));
253         kfree(entry->ctx);
254         entry->ctx = NULL;
255 }
256
257 static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
258 {
259         struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
260         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
261         struct mlx4_port_gid_table   *port_gid_table;
262         int free = -1, found = -1;
263         int ret = 0;
264         int hw_update = 0;
265         int i;
266         struct gid_entry *gids = NULL;
267         u16 vlan_id = 0xffff;
268         u8 mac[ETH_ALEN];
269
270         if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
271                 return -EINVAL;
272
273         if (attr->port_num > MLX4_MAX_PORTS)
274                 return -EINVAL;
275
276         if (!context)
277                 return -EINVAL;
278
279         ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
280         if (ret)
281                 return ret;
282         port_gid_table = &iboe->gids[attr->port_num - 1];
283         spin_lock_bh(&iboe->lock);
284         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) {
285                 if (!memcmp(&port_gid_table->gids[i].gid,
286                             &attr->gid, sizeof(attr->gid)) &&
287                     port_gid_table->gids[i].gid_type == attr->gid_type &&
288                     port_gid_table->gids[i].vlan_id == vlan_id)  {
289                         found = i;
290                         break;
291                 }
292                 if (free < 0 && rdma_is_zero_gid(&port_gid_table->gids[i].gid))
293                         free = i; /* HW has space */
294         }
295
296         if (found < 0) {
297                 if (free < 0) {
298                         ret = -ENOSPC;
299                 } else {
300                         port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC);
301                         if (!port_gid_table->gids[free].ctx) {
302                                 ret = -ENOMEM;
303                         } else {
304                                 *context = port_gid_table->gids[free].ctx;
305                                 memcpy(&port_gid_table->gids[free].gid,
306                                        &attr->gid, sizeof(attr->gid));
307                                 port_gid_table->gids[free].gid_type = attr->gid_type;
308                                 port_gid_table->gids[free].vlan_id = vlan_id;
309                                 port_gid_table->gids[free].ctx->real_index = free;
310                                 port_gid_table->gids[free].ctx->refcount = 1;
311                                 hw_update = 1;
312                         }
313                 }
314         } else {
315                 struct gid_cache_context *ctx = port_gid_table->gids[found].ctx;
316                 *context = ctx;
317                 ctx->refcount++;
318         }
319         if (!ret && hw_update) {
320                 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
321                                      GFP_ATOMIC);
322                 if (!gids) {
323                         ret = -ENOMEM;
324                         *context = NULL;
325                         free_gid_entry(&port_gid_table->gids[free]);
326                 } else {
327                         for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
328                                 memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid));
329                                 gids[i].gid_type = port_gid_table->gids[i].gid_type;
330                         }
331                 }
332         }
333         spin_unlock_bh(&iboe->lock);
334
335         if (!ret && hw_update) {
336                 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
337                 if (ret) {
338                         spin_lock_bh(&iboe->lock);
339                         *context = NULL;
340                         free_gid_entry(&port_gid_table->gids[free]);
341                         spin_unlock_bh(&iboe->lock);
342                 }
343                 kfree(gids);
344         }
345
346         return ret;
347 }
348
349 static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
350 {
351         struct gid_cache_context *ctx = *context;
352         struct mlx4_ib_dev *ibdev = to_mdev(attr->device);
353         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
354         struct mlx4_port_gid_table   *port_gid_table;
355         int ret = 0;
356         int hw_update = 0;
357         struct gid_entry *gids = NULL;
358
359         if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
360                 return -EINVAL;
361
362         if (attr->port_num > MLX4_MAX_PORTS)
363                 return -EINVAL;
364
365         port_gid_table = &iboe->gids[attr->port_num - 1];
366         spin_lock_bh(&iboe->lock);
367         if (ctx) {
368                 ctx->refcount--;
369                 if (!ctx->refcount) {
370                         unsigned int real_index = ctx->real_index;
371
372                         free_gid_entry(&port_gid_table->gids[real_index]);
373                         hw_update = 1;
374                 }
375         }
376         if (!ret && hw_update) {
377                 int i;
378
379                 gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids),
380                                      GFP_ATOMIC);
381                 if (!gids) {
382                         ret = -ENOMEM;
383                 } else {
384                         for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) {
385                                 memcpy(&gids[i].gid,
386                                        &port_gid_table->gids[i].gid,
387                                        sizeof(union ib_gid));
388                                 gids[i].gid_type =
389                                     port_gid_table->gids[i].gid_type;
390                         }
391                 }
392         }
393         spin_unlock_bh(&iboe->lock);
394
395         if (!ret && hw_update) {
396                 ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num);
397                 kfree(gids);
398         }
399         return ret;
400 }
401
402 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
403                                     const struct ib_gid_attr *attr)
404 {
405         struct mlx4_ib_iboe *iboe = &ibdev->iboe;
406         struct gid_cache_context *ctx = NULL;
407         struct mlx4_port_gid_table   *port_gid_table;
408         int real_index = -EINVAL;
409         int i;
410         unsigned long flags;
411         u32 port_num = attr->port_num;
412
413         if (port_num > MLX4_MAX_PORTS)
414                 return -EINVAL;
415
416         if (mlx4_is_bonded(ibdev->dev))
417                 port_num = 1;
418
419         if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
420                 return attr->index;
421
422         spin_lock_irqsave(&iboe->lock, flags);
423         port_gid_table = &iboe->gids[port_num - 1];
424
425         for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i)
426                 if (!memcmp(&port_gid_table->gids[i].gid,
427                             &attr->gid, sizeof(attr->gid)) &&
428                     attr->gid_type == port_gid_table->gids[i].gid_type) {
429                         ctx = port_gid_table->gids[i].ctx;
430                         break;
431                 }
432         if (ctx)
433                 real_index = ctx->real_index;
434         spin_unlock_irqrestore(&iboe->lock, flags);
435         return real_index;
436 }
437
438 static int mlx4_ib_query_device(struct ib_device *ibdev,
439                                 struct ib_device_attr *props,
440                                 struct ib_udata *uhw)
441 {
442         struct mlx4_ib_dev *dev = to_mdev(ibdev);
443         struct ib_smp *in_mad  = NULL;
444         struct ib_smp *out_mad = NULL;
445         int err;
446         int have_ib_ports;
447         struct mlx4_uverbs_ex_query_device cmd;
448         struct mlx4_uverbs_ex_query_device_resp resp = {};
449         struct mlx4_clock_params clock_params;
450
451         if (uhw->inlen) {
452                 if (uhw->inlen < sizeof(cmd))
453                         return -EINVAL;
454
455                 err = ib_copy_from_udata(&cmd, uhw, sizeof(cmd));
456                 if (err)
457                         return err;
458
459                 if (cmd.comp_mask)
460                         return -EINVAL;
461
462                 if (cmd.reserved)
463                         return -EINVAL;
464         }
465
466         resp.response_length = offsetof(typeof(resp), response_length) +
467                 sizeof(resp.response_length);
468         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
469         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
470         err = -ENOMEM;
471         if (!in_mad || !out_mad)
472                 goto out;
473
474         init_query_mad(in_mad);
475         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
476
477         err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
478                            1, NULL, NULL, in_mad, out_mad);
479         if (err)
480                 goto out;
481
482         memset(props, 0, sizeof *props);
483
484         have_ib_ports = num_ib_ports(dev->dev);
485
486         props->fw_ver = dev->dev->caps.fw_ver;
487         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
488                 IB_DEVICE_PORT_ACTIVE_EVENT             |
489                 IB_DEVICE_SYS_IMAGE_GUID                |
490                 IB_DEVICE_RC_RNR_NAK_GEN                |
491                 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
492         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
493                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
494         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
495                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
496         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
497                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
498         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
499                 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
500         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
501                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
502         if (dev->dev->caps.max_gso_sz &&
503             (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
504             (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
505                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
506         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
507                 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
508         if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
509             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
510             (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
511                 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
512         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
513                 props->device_cap_flags |= IB_DEVICE_XRC;
514         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
515                 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
516         if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
517                 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
518                         props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
519                 else
520                         props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
521         }
522         if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
523                 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
524
525         props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
526
527         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
528                 0xffffff;
529         props->vendor_part_id      = dev->dev->persist->pdev->device;
530         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
531         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
532
533         props->max_mr_size         = ~0ull;
534         props->page_size_cap       = dev->dev->caps.page_size_cap;
535         props->max_qp              = dev->dev->quotas.qp;
536         props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
537         props->max_send_sge =
538                 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
539         props->max_recv_sge =
540                 min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
541         props->max_sge_rd = MLX4_MAX_SGE_RD;
542         props->max_cq              = dev->dev->quotas.cq;
543         props->max_cqe             = dev->dev->caps.max_cqes;
544         props->max_mr              = dev->dev->quotas.mpt;
545         props->max_pd              = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
546         props->max_qp_rd_atom      = dev->dev->caps.max_qp_dest_rdma;
547         props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
548         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
549         props->max_srq             = dev->dev->quotas.srq;
550         props->max_srq_wr          = dev->dev->caps.max_srq_wqes - 1;
551         props->max_srq_sge         = dev->dev->caps.max_srq_sge;
552         props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
553         props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
554         props->atomic_cap          = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
555                 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
556         props->masked_atomic_cap   = props->atomic_cap;
557         props->max_pkeys           = dev->dev->caps.pkey_table_len[1];
558         props->max_mcast_grp       = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
559         props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
560         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
561                                            props->max_mcast_grp;
562         props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL;
563         props->timestamp_mask = 0xFFFFFFFFFFFFULL;
564         props->max_ah = INT_MAX;
565
566         if (mlx4_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET ||
567             mlx4_ib_port_link_layer(ibdev, 2) == IB_LINK_LAYER_ETHERNET) {
568                 if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) {
569                         props->rss_caps.max_rwq_indirection_tables =
570                                 props->max_qp;
571                         props->rss_caps.max_rwq_indirection_table_size =
572                                 dev->dev->caps.max_rss_tbl_sz;
573                         props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
574                         props->max_wq_type_rq = props->max_qp;
575                 }
576
577                 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
578                         props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
579         }
580
581         props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT;
582         props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD;
583
584         if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) {
585                 resp.response_length += sizeof(resp.hca_core_clock_offset);
586                 if (!mlx4_get_internal_clock_params(dev->dev, &clock_params)) {
587                         resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET;
588                         resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE;
589                 }
590         }
591
592         if (uhw->outlen >= resp.response_length +
593             sizeof(resp.max_inl_recv_sz)) {
594                 resp.response_length += sizeof(resp.max_inl_recv_sz);
595                 resp.max_inl_recv_sz  = dev->dev->caps.max_rq_sg *
596                         sizeof(struct mlx4_wqe_data_seg);
597         }
598
599         if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) {
600                 if (props->rss_caps.supported_qpts) {
601                         resp.rss_caps.rx_hash_function =
602                                 MLX4_IB_RX_HASH_FUNC_TOEPLITZ;
603
604                         resp.rss_caps.rx_hash_fields_mask =
605                                 MLX4_IB_RX_HASH_SRC_IPV4 |
606                                 MLX4_IB_RX_HASH_DST_IPV4 |
607                                 MLX4_IB_RX_HASH_SRC_IPV6 |
608                                 MLX4_IB_RX_HASH_DST_IPV6 |
609                                 MLX4_IB_RX_HASH_SRC_PORT_TCP |
610                                 MLX4_IB_RX_HASH_DST_PORT_TCP |
611                                 MLX4_IB_RX_HASH_SRC_PORT_UDP |
612                                 MLX4_IB_RX_HASH_DST_PORT_UDP;
613
614                         if (dev->dev->caps.tunnel_offload_mode ==
615                             MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
616                                 resp.rss_caps.rx_hash_fields_mask |=
617                                         MLX4_IB_RX_HASH_INNER;
618                 }
619                 resp.response_length = offsetof(typeof(resp), rss_caps) +
620                                        sizeof(resp.rss_caps);
621         }
622
623         if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) {
624                 if (dev->dev->caps.max_gso_sz &&
625                     ((mlx4_ib_port_link_layer(ibdev, 1) ==
626                     IB_LINK_LAYER_ETHERNET) ||
627                     (mlx4_ib_port_link_layer(ibdev, 2) ==
628                     IB_LINK_LAYER_ETHERNET))) {
629                         resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz;
630                         resp.tso_caps.supported_qpts |=
631                                 1 << IB_QPT_RAW_PACKET;
632                 }
633                 resp.response_length = offsetof(typeof(resp), tso_caps) +
634                                        sizeof(resp.tso_caps);
635         }
636
637         if (uhw->outlen) {
638                 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
639                 if (err)
640                         goto out;
641         }
642 out:
643         kfree(in_mad);
644         kfree(out_mad);
645
646         return err;
647 }
648
649 static enum rdma_link_layer
650 mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num)
651 {
652         struct mlx4_dev *dev = to_mdev(device)->dev;
653
654         return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
655                 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
656 }
657
658 static int ib_link_query_port(struct ib_device *ibdev, u32 port,
659                               struct ib_port_attr *props, int netw_view)
660 {
661         struct ib_smp *in_mad  = NULL;
662         struct ib_smp *out_mad = NULL;
663         int ext_active_speed;
664         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
665         int err = -ENOMEM;
666
667         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
668         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
669         if (!in_mad || !out_mad)
670                 goto out;
671
672         init_query_mad(in_mad);
673         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
674         in_mad->attr_mod = cpu_to_be32(port);
675
676         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
677                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
678
679         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
680                                 in_mad, out_mad);
681         if (err)
682                 goto out;
683
684
685         props->lid              = be16_to_cpup((__be16 *) (out_mad->data + 16));
686         props->lmc              = out_mad->data[34] & 0x7;
687         props->sm_lid           = be16_to_cpup((__be16 *) (out_mad->data + 18));
688         props->sm_sl            = out_mad->data[36] & 0xf;
689         props->state            = out_mad->data[32] & 0xf;
690         props->phys_state       = out_mad->data[33] >> 4;
691         props->port_cap_flags   = be32_to_cpup((__be32 *) (out_mad->data + 20));
692         if (netw_view)
693                 props->gid_tbl_len = out_mad->data[50];
694         else
695                 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
696         props->max_msg_sz       = to_mdev(ibdev)->dev->caps.max_msg_sz;
697         props->pkey_tbl_len     = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
698         props->bad_pkey_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 46));
699         props->qkey_viol_cntr   = be16_to_cpup((__be16 *) (out_mad->data + 48));
700         props->active_width     = out_mad->data[31] & 0xf;
701         props->active_speed     = out_mad->data[35] >> 4;
702         props->max_mtu          = out_mad->data[41] & 0xf;
703         props->active_mtu       = out_mad->data[36] >> 4;
704         props->subnet_timeout   = out_mad->data[51] & 0x1f;
705         props->max_vl_num       = out_mad->data[37] >> 4;
706         props->init_type_reply  = out_mad->data[41] >> 4;
707
708         /* Check if extended speeds (EDR/FDR/...) are supported */
709         if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
710                 ext_active_speed = out_mad->data[62] >> 4;
711
712                 switch (ext_active_speed) {
713                 case 1:
714                         props->active_speed = IB_SPEED_FDR;
715                         break;
716                 case 2:
717                         props->active_speed = IB_SPEED_EDR;
718                         break;
719                 }
720         }
721
722         /* If reported active speed is QDR, check if is FDR-10 */
723         if (props->active_speed == IB_SPEED_QDR) {
724                 init_query_mad(in_mad);
725                 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
726                 in_mad->attr_mod = cpu_to_be32(port);
727
728                 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
729                                    NULL, NULL, in_mad, out_mad);
730                 if (err)
731                         goto out;
732
733                 /* Checking LinkSpeedActive for FDR-10 */
734                 if (out_mad->data[15] & 0x1)
735                         props->active_speed = IB_SPEED_FDR10;
736         }
737
738         /* Avoid wrong speed value returned by FW if the IB link is down. */
739         if (props->state == IB_PORT_DOWN)
740                  props->active_speed = IB_SPEED_SDR;
741
742 out:
743         kfree(in_mad);
744         kfree(out_mad);
745         return err;
746 }
747
748 static u8 state_to_phys_state(enum ib_port_state state)
749 {
750         return state == IB_PORT_ACTIVE ?
751                 IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
752 }
753
754 static int eth_link_query_port(struct ib_device *ibdev, u32 port,
755                                struct ib_port_attr *props)
756 {
757
758         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
759         struct mlx4_ib_iboe *iboe = &mdev->iboe;
760         struct net_device *ndev;
761         enum ib_mtu tmp;
762         struct mlx4_cmd_mailbox *mailbox;
763         int err = 0;
764         int is_bonded = mlx4_is_bonded(mdev->dev);
765
766         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
767         if (IS_ERR(mailbox))
768                 return PTR_ERR(mailbox);
769
770         err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
771                            MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
772                            MLX4_CMD_WRAPPED);
773         if (err)
774                 goto out;
775
776         props->active_width     =  (((u8 *)mailbox->buf)[5] == 0x40) ||
777                                    (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
778                                            IB_WIDTH_4X : IB_WIDTH_1X;
779         props->active_speed     =  (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
780                                            IB_SPEED_FDR : IB_SPEED_QDR;
781         props->port_cap_flags   = IB_PORT_CM_SUP;
782         props->ip_gids = true;
783         props->gid_tbl_len      = mdev->dev->caps.gid_table_len[port];
784         props->max_msg_sz       = mdev->dev->caps.max_msg_sz;
785         if (mdev->dev->caps.pkey_table_len[port])
786                 props->pkey_tbl_len = 1;
787         props->max_mtu          = IB_MTU_4096;
788         props->max_vl_num       = 2;
789         props->state            = IB_PORT_DOWN;
790         props->phys_state       = state_to_phys_state(props->state);
791         props->active_mtu       = IB_MTU_256;
792         spin_lock_bh(&iboe->lock);
793         ndev = iboe->netdevs[port - 1];
794         if (ndev && is_bonded) {
795                 rcu_read_lock(); /* required to get upper dev */
796                 ndev = netdev_master_upper_dev_get_rcu(ndev);
797                 rcu_read_unlock();
798         }
799         if (!ndev)
800                 goto out_unlock;
801
802         tmp = iboe_get_mtu(ndev->mtu);
803         props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
804
805         props->state            = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
806                                         IB_PORT_ACTIVE : IB_PORT_DOWN;
807         props->phys_state       = state_to_phys_state(props->state);
808 out_unlock:
809         spin_unlock_bh(&iboe->lock);
810 out:
811         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
812         return err;
813 }
814
815 int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
816                          struct ib_port_attr *props, int netw_view)
817 {
818         int err;
819
820         /* props being zeroed by the caller, avoid zeroing it here */
821
822         err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
823                 ib_link_query_port(ibdev, port, props, netw_view) :
824                                 eth_link_query_port(ibdev, port, props);
825
826         return err;
827 }
828
829 static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
830                               struct ib_port_attr *props)
831 {
832         /* returns host view */
833         return __mlx4_ib_query_port(ibdev, port, props, 0);
834 }
835
836 int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
837                         union ib_gid *gid, int netw_view)
838 {
839         struct ib_smp *in_mad  = NULL;
840         struct ib_smp *out_mad = NULL;
841         int err = -ENOMEM;
842         struct mlx4_ib_dev *dev = to_mdev(ibdev);
843         int clear = 0;
844         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
845
846         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
847         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
848         if (!in_mad || !out_mad)
849                 goto out;
850
851         init_query_mad(in_mad);
852         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
853         in_mad->attr_mod = cpu_to_be32(port);
854
855         if (mlx4_is_mfunc(dev->dev) && netw_view)
856                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
857
858         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
859         if (err)
860                 goto out;
861
862         memcpy(gid->raw, out_mad->data + 8, 8);
863
864         if (mlx4_is_mfunc(dev->dev) && !netw_view) {
865                 if (index) {
866                         /* For any index > 0, return the null guid */
867                         err = 0;
868                         clear = 1;
869                         goto out;
870                 }
871         }
872
873         init_query_mad(in_mad);
874         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
875         in_mad->attr_mod = cpu_to_be32(index / 8);
876
877         err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
878                            NULL, NULL, in_mad, out_mad);
879         if (err)
880                 goto out;
881
882         memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
883
884 out:
885         if (clear)
886                 memset(gid->raw + 8, 0, 8);
887         kfree(in_mad);
888         kfree(out_mad);
889         return err;
890 }
891
892 static int mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
893                              union ib_gid *gid)
894 {
895         if (rdma_protocol_ib(ibdev, port))
896                 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
897         return 0;
898 }
899
900 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
901                                u64 *sl2vl_tbl)
902 {
903         union sl2vl_tbl_to_u64 sl2vl64;
904         struct ib_smp *in_mad  = NULL;
905         struct ib_smp *out_mad = NULL;
906         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
907         int err = -ENOMEM;
908         int jj;
909
910         if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
911                 *sl2vl_tbl = 0;
912                 return 0;
913         }
914
915         in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
916         out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
917         if (!in_mad || !out_mad)
918                 goto out;
919
920         init_query_mad(in_mad);
921         in_mad->attr_id  = IB_SMP_ATTR_SL_TO_VL_TABLE;
922         in_mad->attr_mod = 0;
923
924         if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
925                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
926
927         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
928                            in_mad, out_mad);
929         if (err)
930                 goto out;
931
932         for (jj = 0; jj < 8; jj++)
933                 sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj];
934         *sl2vl_tbl = sl2vl64.sl64;
935
936 out:
937         kfree(in_mad);
938         kfree(out_mad);
939         return err;
940 }
941
942 static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
943 {
944         u64 sl2vl;
945         int i;
946         int err;
947
948         for (i = 1; i <= mdev->dev->caps.num_ports; i++) {
949                 if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
950                         continue;
951                 err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl);
952                 if (err) {
953                         pr_err("Unable to get default sl to vl mapping for port %d.  Using all zeroes (%d)\n",
954                                i, err);
955                         sl2vl = 0;
956                 }
957                 atomic64_set(&mdev->sl2vl[i - 1], sl2vl);
958         }
959 }
960
961 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
962                          u16 *pkey, int netw_view)
963 {
964         struct ib_smp *in_mad  = NULL;
965         struct ib_smp *out_mad = NULL;
966         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
967         int err = -ENOMEM;
968
969         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
970         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
971         if (!in_mad || !out_mad)
972                 goto out;
973
974         init_query_mad(in_mad);
975         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
976         in_mad->attr_mod = cpu_to_be32(index / 32);
977
978         if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
979                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
980
981         err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
982                            in_mad, out_mad);
983         if (err)
984                 goto out;
985
986         *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
987
988 out:
989         kfree(in_mad);
990         kfree(out_mad);
991         return err;
992 }
993
994 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
995                               u16 *pkey)
996 {
997         return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
998 }
999
1000 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
1001                                  struct ib_device_modify *props)
1002 {
1003         struct mlx4_cmd_mailbox *mailbox;
1004         unsigned long flags;
1005
1006         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1007                 return -EOPNOTSUPP;
1008
1009         if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1010                 return 0;
1011
1012         if (mlx4_is_slave(to_mdev(ibdev)->dev))
1013                 return -EOPNOTSUPP;
1014
1015         spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
1016         memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1017         spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
1018
1019         /*
1020          * If possible, pass node desc to FW, so it can generate
1021          * a 144 trap.  If cmd fails, just ignore.
1022          */
1023         mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
1024         if (IS_ERR(mailbox))
1025                 return 0;
1026
1027         memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1028         mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
1029                  MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1030
1031         mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1032
1033         return 0;
1034 }
1035
1036 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u32 port,
1037                             int reset_qkey_viols, u32 cap_mask)
1038 {
1039         struct mlx4_cmd_mailbox *mailbox;
1040         int err;
1041
1042         mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
1043         if (IS_ERR(mailbox))
1044                 return PTR_ERR(mailbox);
1045
1046         if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1047                 *(u8 *) mailbox->buf         = !!reset_qkey_viols << 6;
1048                 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
1049         } else {
1050                 ((u8 *) mailbox->buf)[3]     = !!reset_qkey_viols;
1051                 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
1052         }
1053
1054         err = mlx4_cmd(dev->dev, mailbox->dma, port, MLX4_SET_PORT_IB_OPCODE,
1055                        MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1056                        MLX4_CMD_WRAPPED);
1057
1058         mlx4_free_cmd_mailbox(dev->dev, mailbox);
1059         return err;
1060 }
1061
1062 static int mlx4_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
1063                                struct ib_port_modify *props)
1064 {
1065         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1066         u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
1067         struct ib_port_attr attr;
1068         u32 cap_mask;
1069         int err;
1070
1071         /* return OK if this is RoCE. CM calls ib_modify_port() regardless
1072          * of whether port link layer is ETH or IB. For ETH ports, qkey
1073          * violations and port capabilities are not meaningful.
1074          */
1075         if (is_eth)
1076                 return 0;
1077
1078         mutex_lock(&mdev->cap_mask_mutex);
1079
1080         err = ib_query_port(ibdev, port, &attr);
1081         if (err)
1082                 goto out;
1083
1084         cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
1085                 ~props->clr_port_cap_mask;
1086
1087         err = mlx4_ib_SET_PORT(mdev, port,
1088                                !!(mask & IB_PORT_RESET_QKEY_CNTR),
1089                                cap_mask);
1090
1091 out:
1092         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1093         return err;
1094 }
1095
1096 static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx,
1097                                   struct ib_udata *udata)
1098 {
1099         struct ib_device *ibdev = uctx->device;
1100         struct mlx4_ib_dev *dev = to_mdev(ibdev);
1101         struct mlx4_ib_ucontext *context = to_mucontext(uctx);
1102         struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
1103         struct mlx4_ib_alloc_ucontext_resp resp;
1104         int err;
1105
1106         if (!dev->ib_active)
1107                 return -EAGAIN;
1108
1109         if (ibdev->ops.uverbs_abi_ver ==
1110             MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1111                 resp_v3.qp_tab_size      = dev->dev->caps.num_qps;
1112                 resp_v3.bf_reg_size      = dev->dev->caps.bf_reg_size;
1113                 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1114         } else {
1115                 resp.dev_caps         = dev->dev->caps.userspace_caps;
1116                 resp.qp_tab_size      = dev->dev->caps.num_qps;
1117                 resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
1118                 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
1119                 resp.cqe_size         = dev->dev->caps.cqe_size;
1120         }
1121
1122         err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1123         if (err)
1124                 return err;
1125
1126         INIT_LIST_HEAD(&context->db_page_list);
1127         mutex_init(&context->db_page_mutex);
1128
1129         INIT_LIST_HEAD(&context->wqn_ranges_list);
1130         mutex_init(&context->wqn_ranges_mutex);
1131
1132         if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1133                 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
1134         else
1135                 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
1136
1137         if (err) {
1138                 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1139                 return -EFAULT;
1140         }
1141
1142         return err;
1143 }
1144
1145 static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1146 {
1147         struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
1148
1149         mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
1150 }
1151
1152 static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1153 {
1154 }
1155
1156 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
1157 {
1158         struct mlx4_ib_dev *dev = to_mdev(context->device);
1159
1160         switch (vma->vm_pgoff) {
1161         case 0:
1162                 return rdma_user_mmap_io(context, vma,
1163                                          to_mucontext(context)->uar.pfn,
1164                                          PAGE_SIZE,
1165                                          pgprot_noncached(vma->vm_page_prot),
1166                                          NULL);
1167
1168         case 1:
1169                 if (dev->dev->caps.bf_reg_size == 0)
1170                         return -EINVAL;
1171                 return rdma_user_mmap_io(
1172                         context, vma,
1173                         to_mucontext(context)->uar.pfn +
1174                                 dev->dev->caps.num_uars,
1175                         PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot),
1176                         NULL);
1177
1178         case 3: {
1179                 struct mlx4_clock_params params;
1180                 int ret;
1181
1182                 ret = mlx4_get_internal_clock_params(dev->dev, &params);
1183                 if (ret)
1184                         return ret;
1185
1186                 return rdma_user_mmap_io(
1187                         context, vma,
1188                         (pci_resource_start(dev->dev->persist->pdev,
1189                                             params.bar) +
1190                          params.offset) >>
1191                                 PAGE_SHIFT,
1192                         PAGE_SIZE, pgprot_noncached(vma->vm_page_prot),
1193                         NULL);
1194         }
1195
1196         default:
1197                 return -EINVAL;
1198         }
1199 }
1200
1201 static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
1202 {
1203         struct mlx4_ib_pd *pd = to_mpd(ibpd);
1204         struct ib_device *ibdev = ibpd->device;
1205         int err;
1206
1207         err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1208         if (err)
1209                 return err;
1210
1211         if (udata && ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
1212                 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1213                 return -EFAULT;
1214         }
1215         return 0;
1216 }
1217
1218 static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
1219 {
1220         mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
1221         return 0;
1222 }
1223
1224 static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
1225 {
1226         struct mlx4_ib_dev *dev = to_mdev(ibxrcd->device);
1227         struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
1228         struct ib_cq_init_attr cq_attr = {};
1229         int err;
1230
1231         if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1232                 return -EOPNOTSUPP;
1233
1234         err = mlx4_xrcd_alloc(dev->dev, &xrcd->xrcdn);
1235         if (err)
1236                 return err;
1237
1238         xrcd->pd = ib_alloc_pd(ibxrcd->device, 0);
1239         if (IS_ERR(xrcd->pd)) {
1240                 err = PTR_ERR(xrcd->pd);
1241                 goto err2;
1242         }
1243
1244         cq_attr.cqe = 1;
1245         xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr);
1246         if (IS_ERR(xrcd->cq)) {
1247                 err = PTR_ERR(xrcd->cq);
1248                 goto err3;
1249         }
1250
1251         return 0;
1252
1253 err3:
1254         ib_dealloc_pd(xrcd->pd);
1255 err2:
1256         mlx4_xrcd_free(dev->dev, xrcd->xrcdn);
1257         return err;
1258 }
1259
1260 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
1261 {
1262         ib_destroy_cq(to_mxrcd(xrcd)->cq);
1263         ib_dealloc_pd(to_mxrcd(xrcd)->pd);
1264         mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
1265         return 0;
1266 }
1267
1268 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
1269 {
1270         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1271         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1272         struct mlx4_ib_gid_entry *ge;
1273
1274         ge = kzalloc(sizeof *ge, GFP_KERNEL);
1275         if (!ge)
1276                 return -ENOMEM;
1277
1278         ge->gid = *gid;
1279         if (mlx4_ib_add_mc(mdev, mqp, gid)) {
1280                 ge->port = mqp->port;
1281                 ge->added = 1;
1282         }
1283
1284         mutex_lock(&mqp->mutex);
1285         list_add_tail(&ge->list, &mqp->gid_list);
1286         mutex_unlock(&mqp->mutex);
1287
1288         return 0;
1289 }
1290
1291 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1292                                           struct mlx4_ib_counters *ctr_table)
1293 {
1294         struct counter_index *counter, *tmp_count;
1295
1296         mutex_lock(&ctr_table->mutex);
1297         list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list,
1298                                  list) {
1299                 if (counter->allocated)
1300                         mlx4_counter_free(ibdev->dev, counter->index);
1301                 list_del(&counter->list);
1302                 kfree(counter);
1303         }
1304         mutex_unlock(&ctr_table->mutex);
1305 }
1306
1307 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
1308                    union ib_gid *gid)
1309 {
1310         struct net_device *ndev;
1311         int ret = 0;
1312
1313         if (!mqp->port)
1314                 return 0;
1315
1316         spin_lock_bh(&mdev->iboe.lock);
1317         ndev = mdev->iboe.netdevs[mqp->port - 1];
1318         if (ndev)
1319                 dev_hold(ndev);
1320         spin_unlock_bh(&mdev->iboe.lock);
1321
1322         if (ndev) {
1323                 ret = 1;
1324                 dev_put(ndev);
1325         }
1326
1327         return ret;
1328 }
1329
1330 struct mlx4_ib_steering {
1331         struct list_head list;
1332         struct mlx4_flow_reg_id reg_id;
1333         union ib_gid gid;
1334 };
1335
1336 #define LAST_ETH_FIELD vlan_tag
1337 #define LAST_IB_FIELD sl
1338 #define LAST_IPV4_FIELD dst_ip
1339 #define LAST_TCP_UDP_FIELD src_port
1340
1341 /* Field is the last supported field */
1342 #define FIELDS_NOT_SUPPORTED(filter, field)\
1343         memchr_inv((void *)&filter.field  +\
1344                    sizeof(filter.field), 0,\
1345                    sizeof(filter) -\
1346                    offsetof(typeof(filter), field) -\
1347                    sizeof(filter.field))
1348
1349 static int parse_flow_attr(struct mlx4_dev *dev,
1350                            u32 qp_num,
1351                            union ib_flow_spec *ib_spec,
1352                            struct _rule_hw *mlx4_spec)
1353 {
1354         enum mlx4_net_trans_rule_id type;
1355
1356         switch (ib_spec->type) {
1357         case IB_FLOW_SPEC_ETH:
1358                 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
1359                         return -ENOTSUPP;
1360
1361                 type = MLX4_NET_TRANS_RULE_ID_ETH;
1362                 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
1363                        ETH_ALEN);
1364                 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
1365                        ETH_ALEN);
1366                 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
1367                 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
1368                 break;
1369         case IB_FLOW_SPEC_IB:
1370                 if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD))
1371                         return -ENOTSUPP;
1372
1373                 type = MLX4_NET_TRANS_RULE_ID_IB;
1374                 mlx4_spec->ib.l3_qpn =
1375                         cpu_to_be32(qp_num);
1376                 mlx4_spec->ib.qpn_mask =
1377                         cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
1378                 break;
1379
1380
1381         case IB_FLOW_SPEC_IPV4:
1382                 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
1383                         return -ENOTSUPP;
1384
1385                 type = MLX4_NET_TRANS_RULE_ID_IPV4;
1386                 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
1387                 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
1388                 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
1389                 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
1390                 break;
1391
1392         case IB_FLOW_SPEC_TCP:
1393         case IB_FLOW_SPEC_UDP:
1394                 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD))
1395                         return -ENOTSUPP;
1396
1397                 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
1398                                         MLX4_NET_TRANS_RULE_ID_TCP :
1399                                         MLX4_NET_TRANS_RULE_ID_UDP;
1400                 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
1401                 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
1402                 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
1403                 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
1404                 break;
1405
1406         default:
1407                 return -EINVAL;
1408         }
1409         if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
1410             mlx4_hw_rule_sz(dev, type) < 0)
1411                 return -EINVAL;
1412         mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
1413         mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
1414         return mlx4_hw_rule_sz(dev, type);
1415 }
1416
1417 struct default_rules {
1418         __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1419         __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
1420         __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
1421         __u8  link_layer;
1422 };
1423 static const struct default_rules default_table[] = {
1424         {
1425                 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
1426                 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
1427                 .rules_create_list = {IB_FLOW_SPEC_IB},
1428                 .link_layer = IB_LINK_LAYER_INFINIBAND
1429         }
1430 };
1431
1432 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
1433                                          struct ib_flow_attr *flow_attr)
1434 {
1435         int i, j, k;
1436         void *ib_flow;
1437         const struct default_rules *pdefault_rules = default_table;
1438         u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
1439
1440         for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
1441                 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
1442                 memset(&field_types, 0, sizeof(field_types));
1443
1444                 if (link_layer != pdefault_rules->link_layer)
1445                         continue;
1446
1447                 ib_flow = flow_attr + 1;
1448                 /* we assume the specs are sorted */
1449                 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
1450                      j < flow_attr->num_of_specs; k++) {
1451                         union ib_flow_spec *current_flow =
1452                                 (union ib_flow_spec *)ib_flow;
1453
1454                         /* same layer but different type */
1455                         if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
1456                              (pdefault_rules->mandatory_fields[k] &
1457                               IB_FLOW_SPEC_LAYER_MASK)) &&
1458                             (current_flow->type !=
1459                              pdefault_rules->mandatory_fields[k]))
1460                                 goto out;
1461
1462                         /* same layer, try match next one */
1463                         if (current_flow->type ==
1464                             pdefault_rules->mandatory_fields[k]) {
1465                                 j++;
1466                                 ib_flow +=
1467                                         ((union ib_flow_spec *)ib_flow)->size;
1468                         }
1469                 }
1470
1471                 ib_flow = flow_attr + 1;
1472                 for (j = 0; j < flow_attr->num_of_specs;
1473                      j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
1474                         for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
1475                                 /* same layer and same type */
1476                                 if (((union ib_flow_spec *)ib_flow)->type ==
1477                                     pdefault_rules->mandatory_not_fields[k])
1478                                         goto out;
1479
1480                 return i;
1481         }
1482 out:
1483         return -1;
1484 }
1485
1486 static int __mlx4_ib_create_default_rules(
1487                 struct mlx4_ib_dev *mdev,
1488                 struct ib_qp *qp,
1489                 const struct default_rules *pdefault_rules,
1490                 struct _rule_hw *mlx4_spec) {
1491         int size = 0;
1492         int i;
1493
1494         for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
1495                 union ib_flow_spec ib_spec = {};
1496                 int ret;
1497
1498                 switch (pdefault_rules->rules_create_list[i]) {
1499                 case 0:
1500                         /* no rule */
1501                         continue;
1502                 case IB_FLOW_SPEC_IB:
1503                         ib_spec.type = IB_FLOW_SPEC_IB;
1504                         ib_spec.size = sizeof(struct ib_flow_spec_ib);
1505
1506                         break;
1507                 default:
1508                         /* invalid rule */
1509                         return -EINVAL;
1510                 }
1511                 /* We must put empty rule, qpn is being ignored */
1512                 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
1513                                       mlx4_spec);
1514                 if (ret < 0) {
1515                         pr_info("invalid parsing\n");
1516                         return -EINVAL;
1517                 }
1518
1519                 mlx4_spec = (void *)mlx4_spec + ret;
1520                 size += ret;
1521         }
1522         return size;
1523 }
1524
1525 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1526                           int domain,
1527                           enum mlx4_net_trans_promisc_mode flow_type,
1528                           u64 *reg_id)
1529 {
1530         int ret, i;
1531         int size = 0;
1532         void *ib_flow;
1533         struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1534         struct mlx4_cmd_mailbox *mailbox;
1535         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1536         int default_flow;
1537
1538         if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1539                 pr_err("Invalid priority value %d\n", flow_attr->priority);
1540                 return -EINVAL;
1541         }
1542
1543         if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1544                 return -EINVAL;
1545
1546         mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1547         if (IS_ERR(mailbox))
1548                 return PTR_ERR(mailbox);
1549         ctrl = mailbox->buf;
1550
1551         ctrl->prio = cpu_to_be16(domain | flow_attr->priority);
1552         ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1553         ctrl->port = flow_attr->port;
1554         ctrl->qpn = cpu_to_be32(qp->qp_num);
1555
1556         ib_flow = flow_attr + 1;
1557         size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1558         /* Add default flows */
1559         default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1560         if (default_flow >= 0) {
1561                 ret = __mlx4_ib_create_default_rules(
1562                                 mdev, qp, default_table + default_flow,
1563                                 mailbox->buf + size);
1564                 if (ret < 0) {
1565                         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1566                         return -EINVAL;
1567                 }
1568                 size += ret;
1569         }
1570         for (i = 0; i < flow_attr->num_of_specs; i++) {
1571                 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1572                                       mailbox->buf + size);
1573                 if (ret < 0) {
1574                         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1575                         return -EINVAL;
1576                 }
1577                 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1578                 size += ret;
1579         }
1580
1581         if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1582             flow_attr->num_of_specs == 1) {
1583                 struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1584                 enum ib_flow_spec_type header_spec =
1585                         ((union ib_flow_spec *)(flow_attr + 1))->type;
1586
1587                 if (header_spec == IB_FLOW_SPEC_ETH)
1588                         mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1589         }
1590
1591         ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1592                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1593                            MLX4_CMD_NATIVE);
1594         if (ret == -ENOMEM)
1595                 pr_err("mcg table is full. Fail to register network rule.\n");
1596         else if (ret == -ENXIO)
1597                 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1598         else if (ret)
1599                 pr_err("Invalid argument. Fail to register network rule.\n");
1600
1601         mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1602         return ret;
1603 }
1604
1605 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1606 {
1607         int err;
1608         err = mlx4_cmd(dev, reg_id, 0, 0,
1609                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1610                        MLX4_CMD_NATIVE);
1611         if (err)
1612                 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1613                        reg_id);
1614         return err;
1615 }
1616
1617 static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1618                                     u64 *reg_id)
1619 {
1620         void *ib_flow;
1621         union ib_flow_spec *ib_spec;
1622         struct mlx4_dev *dev = to_mdev(qp->device)->dev;
1623         int err = 0;
1624
1625         if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
1626             dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
1627                 return 0; /* do nothing */
1628
1629         ib_flow = flow_attr + 1;
1630         ib_spec = (union ib_flow_spec *)ib_flow;
1631
1632         if (ib_spec->type !=  IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
1633                 return 0; /* do nothing */
1634
1635         err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
1636                                     flow_attr->port, qp->qp_num,
1637                                     MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
1638                                     reg_id);
1639         return err;
1640 }
1641
1642 static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
1643                                       struct ib_flow_attr *flow_attr,
1644                                       enum mlx4_net_trans_promisc_mode *type)
1645 {
1646         int err = 0;
1647
1648         if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
1649             (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
1650             (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
1651                 return -EOPNOTSUPP;
1652         }
1653
1654         if (flow_attr->num_of_specs == 0) {
1655                 type[0] = MLX4_FS_MC_SNIFFER;
1656                 type[1] = MLX4_FS_UC_SNIFFER;
1657         } else {
1658                 union ib_flow_spec *ib_spec;
1659
1660                 ib_spec = (union ib_flow_spec *)(flow_attr + 1);
1661                 if (ib_spec->type !=  IB_FLOW_SPEC_ETH)
1662                         return -EINVAL;
1663
1664                 /* if all is zero than MC and UC */
1665                 if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
1666                         type[0] = MLX4_FS_MC_SNIFFER;
1667                         type[1] = MLX4_FS_UC_SNIFFER;
1668                 } else {
1669                         u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
1670                                             ib_spec->eth.mask.dst_mac[1],
1671                                             ib_spec->eth.mask.dst_mac[2],
1672                                             ib_spec->eth.mask.dst_mac[3],
1673                                             ib_spec->eth.mask.dst_mac[4],
1674                                             ib_spec->eth.mask.dst_mac[5]};
1675
1676                         /* Above xor was only on MC bit, non empty mask is valid
1677                          * only if this bit is set and rest are zero.
1678                          */
1679                         if (!is_zero_ether_addr(&mac[0]))
1680                                 return -EINVAL;
1681
1682                         if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
1683                                 type[0] = MLX4_FS_MC_SNIFFER;
1684                         else
1685                                 type[0] = MLX4_FS_UC_SNIFFER;
1686                 }
1687         }
1688
1689         return err;
1690 }
1691
1692 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1693                                            struct ib_flow_attr *flow_attr,
1694                                            struct ib_udata *udata)
1695 {
1696         int err = 0, i = 0, j = 0;
1697         struct mlx4_ib_flow *mflow;
1698         enum mlx4_net_trans_promisc_mode type[2];
1699         struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1700         int is_bonded = mlx4_is_bonded(dev);
1701
1702         if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)
1703                 return ERR_PTR(-EOPNOTSUPP);
1704
1705         if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
1706             (flow_attr->type != IB_FLOW_ATTR_NORMAL))
1707                 return ERR_PTR(-EOPNOTSUPP);
1708
1709         if (udata &&
1710             udata->inlen && !ib_is_udata_cleared(udata, 0, udata->inlen))
1711                 return ERR_PTR(-EOPNOTSUPP);
1712
1713         memset(type, 0, sizeof(type));
1714
1715         mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1716         if (!mflow) {
1717                 err = -ENOMEM;
1718                 goto err_free;
1719         }
1720
1721         switch (flow_attr->type) {
1722         case IB_FLOW_ATTR_NORMAL:
1723                 /* If dont trap flag (continue match) is set, under specific
1724                  * condition traffic be replicated to given qp,
1725                  * without stealing it
1726                  */
1727                 if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
1728                         err = mlx4_ib_add_dont_trap_rule(dev,
1729                                                          flow_attr,
1730                                                          type);
1731                         if (err)
1732                                 goto err_free;
1733                 } else {
1734                         type[0] = MLX4_FS_REGULAR;
1735                 }
1736                 break;
1737
1738         case IB_FLOW_ATTR_ALL_DEFAULT:
1739                 type[0] = MLX4_FS_ALL_DEFAULT;
1740                 break;
1741
1742         case IB_FLOW_ATTR_MC_DEFAULT:
1743                 type[0] = MLX4_FS_MC_DEFAULT;
1744                 break;
1745
1746         case IB_FLOW_ATTR_SNIFFER:
1747                 type[0] = MLX4_FS_MIRROR_RX_PORT;
1748                 type[1] = MLX4_FS_MIRROR_SX_PORT;
1749                 break;
1750
1751         default:
1752                 err = -EINVAL;
1753                 goto err_free;
1754         }
1755
1756         while (i < ARRAY_SIZE(type) && type[i]) {
1757                 err = __mlx4_ib_create_flow(qp, flow_attr, MLX4_DOMAIN_UVERBS,
1758                                             type[i], &mflow->reg_id[i].id);
1759                 if (err)
1760                         goto err_create_flow;
1761                 if (is_bonded) {
1762                         /* Application always sees one port so the mirror rule
1763                          * must be on port #2
1764                          */
1765                         flow_attr->port = 2;
1766                         err = __mlx4_ib_create_flow(qp, flow_attr,
1767                                                     MLX4_DOMAIN_UVERBS, type[j],
1768                                                     &mflow->reg_id[j].mirror);
1769                         flow_attr->port = 1;
1770                         if (err)
1771                                 goto err_create_flow;
1772                         j++;
1773                 }
1774
1775                 i++;
1776         }
1777
1778         if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1779                 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1780                                                &mflow->reg_id[i].id);
1781                 if (err)
1782                         goto err_create_flow;
1783
1784                 if (is_bonded) {
1785                         flow_attr->port = 2;
1786                         err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1787                                                        &mflow->reg_id[j].mirror);
1788                         flow_attr->port = 1;
1789                         if (err)
1790                                 goto err_create_flow;
1791                         j++;
1792                 }
1793                 /* function to create mirror rule */
1794                 i++;
1795         }
1796
1797         return &mflow->ibflow;
1798
1799 err_create_flow:
1800         while (i) {
1801                 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1802                                              mflow->reg_id[i].id);
1803                 i--;
1804         }
1805
1806         while (j) {
1807                 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1808                                              mflow->reg_id[j].mirror);
1809                 j--;
1810         }
1811 err_free:
1812         kfree(mflow);
1813         return ERR_PTR(err);
1814 }
1815
1816 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1817 {
1818         int err, ret = 0;
1819         int i = 0;
1820         struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1821         struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1822
1823         while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1824                 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1825                 if (err)
1826                         ret = err;
1827                 if (mflow->reg_id[i].mirror) {
1828                         err = __mlx4_ib_destroy_flow(mdev->dev,
1829                                                      mflow->reg_id[i].mirror);
1830                         if (err)
1831                                 ret = err;
1832                 }
1833                 i++;
1834         }
1835
1836         kfree(mflow);
1837         return ret;
1838 }
1839
1840 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1841 {
1842         int err;
1843         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1844         struct mlx4_dev *dev = mdev->dev;
1845         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1846         struct mlx4_ib_steering *ib_steering = NULL;
1847         enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1848         struct mlx4_flow_reg_id reg_id;
1849
1850         if (mdev->dev->caps.steering_mode ==
1851             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1852                 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1853                 if (!ib_steering)
1854                         return -ENOMEM;
1855         }
1856
1857         err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1858                                     !!(mqp->flags &
1859                                        MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1860                                     prot, &reg_id.id);
1861         if (err) {
1862                 pr_err("multicast attach op failed, err %d\n", err);
1863                 goto err_malloc;
1864         }
1865
1866         reg_id.mirror = 0;
1867         if (mlx4_is_bonded(dev)) {
1868                 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
1869                                             (mqp->port == 1) ? 2 : 1,
1870                                             !!(mqp->flags &
1871                                             MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1872                                             prot, &reg_id.mirror);
1873                 if (err)
1874                         goto err_add;
1875         }
1876
1877         err = add_gid_entry(ibqp, gid);
1878         if (err)
1879                 goto err_add;
1880
1881         if (ib_steering) {
1882                 memcpy(ib_steering->gid.raw, gid->raw, 16);
1883                 ib_steering->reg_id = reg_id;
1884                 mutex_lock(&mqp->mutex);
1885                 list_add(&ib_steering->list, &mqp->steering_rules);
1886                 mutex_unlock(&mqp->mutex);
1887         }
1888         return 0;
1889
1890 err_add:
1891         mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1892                               prot, reg_id.id);
1893         if (reg_id.mirror)
1894                 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1895                                       prot, reg_id.mirror);
1896 err_malloc:
1897         kfree(ib_steering);
1898
1899         return err;
1900 }
1901
1902 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1903 {
1904         struct mlx4_ib_gid_entry *ge;
1905         struct mlx4_ib_gid_entry *tmp;
1906         struct mlx4_ib_gid_entry *ret = NULL;
1907
1908         list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1909                 if (!memcmp(raw, ge->gid.raw, 16)) {
1910                         ret = ge;
1911                         break;
1912                 }
1913         }
1914
1915         return ret;
1916 }
1917
1918 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1919 {
1920         int err;
1921         struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1922         struct mlx4_dev *dev = mdev->dev;
1923         struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1924         struct net_device *ndev;
1925         struct mlx4_ib_gid_entry *ge;
1926         struct mlx4_flow_reg_id reg_id = {0, 0};
1927         enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
1928
1929         if (mdev->dev->caps.steering_mode ==
1930             MLX4_STEERING_MODE_DEVICE_MANAGED) {
1931                 struct mlx4_ib_steering *ib_steering;
1932
1933                 mutex_lock(&mqp->mutex);
1934                 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1935                         if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1936                                 list_del(&ib_steering->list);
1937                                 break;
1938                         }
1939                 }
1940                 mutex_unlock(&mqp->mutex);
1941                 if (&ib_steering->list == &mqp->steering_rules) {
1942                         pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1943                         return -EINVAL;
1944                 }
1945                 reg_id = ib_steering->reg_id;
1946                 kfree(ib_steering);
1947         }
1948
1949         err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1950                                     prot, reg_id.id);
1951         if (err)
1952                 return err;
1953
1954         if (mlx4_is_bonded(dev)) {
1955                 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1956                                             prot, reg_id.mirror);
1957                 if (err)
1958                         return err;
1959         }
1960
1961         mutex_lock(&mqp->mutex);
1962         ge = find_gid_entry(mqp, gid->raw);
1963         if (ge) {
1964                 spin_lock_bh(&mdev->iboe.lock);
1965                 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1966                 if (ndev)
1967                         dev_hold(ndev);
1968                 spin_unlock_bh(&mdev->iboe.lock);
1969                 if (ndev)
1970                         dev_put(ndev);
1971                 list_del(&ge->list);
1972                 kfree(ge);
1973         } else
1974                 pr_warn("could not find mgid entry\n");
1975
1976         mutex_unlock(&mqp->mutex);
1977
1978         return 0;
1979 }
1980
1981 static int init_node_data(struct mlx4_ib_dev *dev)
1982 {
1983         struct ib_smp *in_mad  = NULL;
1984         struct ib_smp *out_mad = NULL;
1985         int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1986         int err = -ENOMEM;
1987
1988         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1989         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1990         if (!in_mad || !out_mad)
1991                 goto out;
1992
1993         init_query_mad(in_mad);
1994         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1995         if (mlx4_is_master(dev->dev))
1996                 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1997
1998         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1999         if (err)
2000                 goto out;
2001
2002         memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
2003
2004         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
2005
2006         err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
2007         if (err)
2008                 goto out;
2009
2010         dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
2011         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
2012
2013 out:
2014         kfree(in_mad);
2015         kfree(out_mad);
2016         return err;
2017 }
2018
2019 static ssize_t hca_type_show(struct device *device,
2020                              struct device_attribute *attr, char *buf)
2021 {
2022         struct mlx4_ib_dev *dev =
2023                 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2024
2025         return sysfs_emit(buf, "MT%d\n", dev->dev->persist->pdev->device);
2026 }
2027 static DEVICE_ATTR_RO(hca_type);
2028
2029 static ssize_t hw_rev_show(struct device *device,
2030                            struct device_attribute *attr, char *buf)
2031 {
2032         struct mlx4_ib_dev *dev =
2033                 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2034
2035         return sysfs_emit(buf, "%x\n", dev->dev->rev_id);
2036 }
2037 static DEVICE_ATTR_RO(hw_rev);
2038
2039 static ssize_t board_id_show(struct device *device,
2040                              struct device_attribute *attr, char *buf)
2041 {
2042         struct mlx4_ib_dev *dev =
2043                 rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev);
2044
2045         return sysfs_emit(buf, "%.*s\n", MLX4_BOARD_ID_LEN, dev->dev->board_id);
2046 }
2047 static DEVICE_ATTR_RO(board_id);
2048
2049 static struct attribute *mlx4_class_attributes[] = {
2050         &dev_attr_hw_rev.attr,
2051         &dev_attr_hca_type.attr,
2052         &dev_attr_board_id.attr,
2053         NULL
2054 };
2055
2056 static const struct attribute_group mlx4_attr_group = {
2057         .attrs = mlx4_class_attributes,
2058 };
2059
2060 struct diag_counter {
2061         const char *name;
2062         u32 offset;
2063 };
2064
2065 #define DIAG_COUNTER(_name, _offset)                    \
2066         { .name = #_name, .offset = _offset }
2067
2068 static const struct diag_counter diag_basic[] = {
2069         DIAG_COUNTER(rq_num_lle, 0x00),
2070         DIAG_COUNTER(sq_num_lle, 0x04),
2071         DIAG_COUNTER(rq_num_lqpoe, 0x08),
2072         DIAG_COUNTER(sq_num_lqpoe, 0x0C),
2073         DIAG_COUNTER(rq_num_lpe, 0x18),
2074         DIAG_COUNTER(sq_num_lpe, 0x1C),
2075         DIAG_COUNTER(rq_num_wrfe, 0x20),
2076         DIAG_COUNTER(sq_num_wrfe, 0x24),
2077         DIAG_COUNTER(sq_num_mwbe, 0x2C),
2078         DIAG_COUNTER(sq_num_bre, 0x34),
2079         DIAG_COUNTER(sq_num_rire, 0x44),
2080         DIAG_COUNTER(rq_num_rire, 0x48),
2081         DIAG_COUNTER(sq_num_rae, 0x4C),
2082         DIAG_COUNTER(rq_num_rae, 0x50),
2083         DIAG_COUNTER(sq_num_roe, 0x54),
2084         DIAG_COUNTER(sq_num_tree, 0x5C),
2085         DIAG_COUNTER(sq_num_rree, 0x64),
2086         DIAG_COUNTER(rq_num_rnr, 0x68),
2087         DIAG_COUNTER(sq_num_rnr, 0x6C),
2088         DIAG_COUNTER(rq_num_oos, 0x100),
2089         DIAG_COUNTER(sq_num_oos, 0x104),
2090 };
2091
2092 static const struct diag_counter diag_ext[] = {
2093         DIAG_COUNTER(rq_num_dup, 0x130),
2094         DIAG_COUNTER(sq_num_to, 0x134),
2095 };
2096
2097 static const struct diag_counter diag_device_only[] = {
2098         DIAG_COUNTER(num_cqovf, 0x1A0),
2099         DIAG_COUNTER(rq_num_udsdprd, 0x118),
2100 };
2101
2102 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2103                                                     u32 port_num)
2104 {
2105         struct mlx4_ib_dev *dev = to_mdev(ibdev);
2106         struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2107
2108         if (!diag[!!port_num].name)
2109                 return NULL;
2110
2111         return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
2112                                           diag[!!port_num].num_counters,
2113                                           RDMA_HW_STATS_DEFAULT_LIFESPAN);
2114 }
2115
2116 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2117                                 struct rdma_hw_stats *stats,
2118                                 u32 port, int index)
2119 {
2120         struct mlx4_ib_dev *dev = to_mdev(ibdev);
2121         struct mlx4_ib_diag_counters *diag = dev->diag_counters;
2122         u32 hw_value[ARRAY_SIZE(diag_device_only) +
2123                 ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
2124         int ret;
2125         int i;
2126
2127         ret = mlx4_query_diag_counters(dev->dev,
2128                                        MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
2129                                        diag[!!port].offset, hw_value,
2130                                        diag[!!port].num_counters, port);
2131
2132         if (ret)
2133                 return ret;
2134
2135         for (i = 0; i < diag[!!port].num_counters; i++)
2136                 stats->value[i] = hw_value[i];
2137
2138         return diag[!!port].num_counters;
2139 }
2140
2141 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2142                                          const char ***name,
2143                                          u32 **offset,
2144                                          u32 *num,
2145                                          bool port)
2146 {
2147         u32 num_counters;
2148
2149         num_counters = ARRAY_SIZE(diag_basic);
2150
2151         if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2152                 num_counters += ARRAY_SIZE(diag_ext);
2153
2154         if (!port)
2155                 num_counters += ARRAY_SIZE(diag_device_only);
2156
2157         *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
2158         if (!*name)
2159                 return -ENOMEM;
2160
2161         *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
2162         if (!*offset)
2163                 goto err_name;
2164
2165         *num = num_counters;
2166
2167         return 0;
2168
2169 err_name:
2170         kfree(*name);
2171         return -ENOMEM;
2172 }
2173
2174 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2175                                        const char **name,
2176                                        u32 *offset,
2177                                        bool port)
2178 {
2179         int i;
2180         int j;
2181
2182         for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
2183                 name[i] = diag_basic[i].name;
2184                 offset[i] = diag_basic[i].offset;
2185         }
2186
2187         if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2188                 for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
2189                         name[j] = diag_ext[i].name;
2190                         offset[j] = diag_ext[i].offset;
2191                 }
2192         }
2193
2194         if (!port) {
2195                 for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
2196                         name[j] = diag_device_only[i].name;
2197                         offset[j] = diag_device_only[i].offset;
2198                 }
2199         }
2200 }
2201
2202 static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
2203         .alloc_hw_stats = mlx4_ib_alloc_hw_stats,
2204         .get_hw_stats = mlx4_ib_get_hw_stats,
2205 };
2206
2207 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2208 {
2209         struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2210         int i;
2211         int ret;
2212         bool per_port = !!(ibdev->dev->caps.flags2 &
2213                 MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
2214
2215         if (mlx4_is_slave(ibdev->dev))
2216                 return 0;
2217
2218         for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2219                 /* i == 1 means we are building port counters */
2220                 if (i && !per_port)
2221                         continue;
2222
2223                 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2224                                                     &diag[i].offset,
2225                                                     &diag[i].num_counters, i);
2226                 if (ret)
2227                         goto err_alloc;
2228
2229                 mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2230                                            diag[i].offset, i);
2231         }
2232
2233         ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops);
2234
2235         return 0;
2236
2237 err_alloc:
2238         if (i) {
2239                 kfree(diag[i - 1].name);
2240                 kfree(diag[i - 1].offset);
2241         }
2242
2243         return ret;
2244 }
2245
2246 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2247 {
2248         int i;
2249
2250         for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
2251                 kfree(ibdev->diag_counters[i].offset);
2252                 kfree(ibdev->diag_counters[i].name);
2253         }
2254 }
2255
2256 #define MLX4_IB_INVALID_MAC     ((u64)-1)
2257 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2258                                struct net_device *dev,
2259                                int port)
2260 {
2261         u64 new_smac = 0;
2262         u64 release_mac = MLX4_IB_INVALID_MAC;
2263         struct mlx4_ib_qp *qp;
2264
2265         new_smac = mlx4_mac_to_u64(dev->dev_addr);
2266         atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2267
2268         /* no need for update QP1 and mac registration in non-SRIOV */
2269         if (!mlx4_is_mfunc(ibdev->dev))
2270                 return;
2271
2272         mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2273         qp = ibdev->qp1_proxy[port - 1];
2274         if (qp) {
2275                 int new_smac_index;
2276                 u64 old_smac;
2277                 struct mlx4_update_qp_params update_params;
2278
2279                 mutex_lock(&qp->mutex);
2280                 old_smac = qp->pri.smac;
2281                 if (new_smac == old_smac)
2282                         goto unlock;
2283
2284                 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2285
2286                 if (new_smac_index < 0)
2287                         goto unlock;
2288
2289                 update_params.smac_index = new_smac_index;
2290                 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2291                                    &update_params)) {
2292                         release_mac = new_smac;
2293                         goto unlock;
2294                 }
2295                 /* if old port was zero, no mac was yet registered for this QP */
2296                 if (qp->pri.smac_port)
2297                         release_mac = old_smac;
2298                 qp->pri.smac = new_smac;
2299                 qp->pri.smac_port = port;
2300                 qp->pri.smac_index = new_smac_index;
2301         }
2302
2303 unlock:
2304         if (release_mac != MLX4_IB_INVALID_MAC)
2305                 mlx4_unregister_mac(ibdev->dev, port, release_mac);
2306         if (qp)
2307                 mutex_unlock(&qp->mutex);
2308         mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2309 }
2310
2311 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2312                                  struct net_device *dev,
2313                                  unsigned long event)
2314
2315 {
2316         struct mlx4_ib_iboe *iboe;
2317         int update_qps_port = -1;
2318         int port;
2319
2320         ASSERT_RTNL();
2321
2322         iboe = &ibdev->iboe;
2323
2324         spin_lock_bh(&iboe->lock);
2325         mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2326
2327                 iboe->netdevs[port - 1] =
2328                         mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2329
2330                 if (dev == iboe->netdevs[port - 1] &&
2331                     (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
2332                      event == NETDEV_UP || event == NETDEV_CHANGE))
2333                         update_qps_port = port;
2334
2335                 if (dev == iboe->netdevs[port - 1] &&
2336                     (event == NETDEV_UP || event == NETDEV_DOWN)) {
2337                         enum ib_port_state port_state;
2338                         struct ib_event ibev = { };
2339
2340                         if (ib_get_cached_port_state(&ibdev->ib_dev, port,
2341                                                      &port_state))
2342                                 continue;
2343
2344                         if (event == NETDEV_UP &&
2345                             (port_state != IB_PORT_ACTIVE ||
2346                              iboe->last_port_state[port - 1] != IB_PORT_DOWN))
2347                                 continue;
2348                         if (event == NETDEV_DOWN &&
2349                             (port_state != IB_PORT_DOWN ||
2350                              iboe->last_port_state[port - 1] != IB_PORT_ACTIVE))
2351                                 continue;
2352                         iboe->last_port_state[port - 1] = port_state;
2353
2354                         ibev.device = &ibdev->ib_dev;
2355                         ibev.element.port_num = port;
2356                         ibev.event = event == NETDEV_UP ? IB_EVENT_PORT_ACTIVE :
2357                                                           IB_EVENT_PORT_ERR;
2358                         ib_dispatch_event(&ibev);
2359                 }
2360
2361         }
2362         spin_unlock_bh(&iboe->lock);
2363
2364         if (update_qps_port > 0)
2365                 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2366 }
2367
2368 static int mlx4_ib_netdev_event(struct notifier_block *this,
2369                                 unsigned long event, void *ptr)
2370 {
2371         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2372         struct mlx4_ib_dev *ibdev;
2373
2374         if (!net_eq(dev_net(dev), &init_net))
2375                 return NOTIFY_DONE;
2376
2377         ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2378         mlx4_ib_scan_netdevs(ibdev, dev, event);
2379
2380         return NOTIFY_DONE;
2381 }
2382
2383 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2384 {
2385         int port;
2386         int slave;
2387         int i;
2388
2389         if (mlx4_is_master(ibdev->dev)) {
2390                 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2391                      ++slave) {
2392                         for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2393                                 for (i = 0;
2394                                      i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2395                                      ++i) {
2396                                         ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2397                                         /* master has the identity virt2phys pkey mapping */
2398                                                 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2399                                                         ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2400                                         mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2401                                                              ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2402                                 }
2403                         }
2404                 }
2405                 /* initialize pkey cache */
2406                 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2407                         for (i = 0;
2408                              i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2409                              ++i)
2410                                 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2411                                         (i) ? 0 : 0xFFFF;
2412                 }
2413         }
2414 }
2415
2416 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2417 {
2418         int i, j, eq = 0, total_eqs = 0;
2419
2420         ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2421                                   sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2422         if (!ibdev->eq_table)
2423                 return;
2424
2425         for (i = 1; i <= dev->caps.num_ports; i++) {
2426                 for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
2427                      j++, total_eqs++) {
2428                         if (i > 1 &&  mlx4_is_eq_shared(dev, total_eqs))
2429                                 continue;
2430                         ibdev->eq_table[eq] = total_eqs;
2431                         if (!mlx4_assign_eq(dev, i,
2432                                             &ibdev->eq_table[eq]))
2433                                 eq++;
2434                         else
2435                                 ibdev->eq_table[eq] = -1;
2436                 }
2437         }
2438
2439         for (i = eq; i < dev->caps.num_comp_vectors;
2440              ibdev->eq_table[i++] = -1)
2441                 ;
2442
2443         /* Advertise the new number of EQs to clients */
2444         ibdev->ib_dev.num_comp_vectors = eq;
2445 }
2446
2447 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2448 {
2449         int i;
2450         int total_eqs = ibdev->ib_dev.num_comp_vectors;
2451
2452         /* no eqs were allocated */
2453         if (!ibdev->eq_table)
2454                 return;
2455
2456         /* Reset the advertised EQ number */
2457         ibdev->ib_dev.num_comp_vectors = 0;
2458
2459         for (i = 0; i < total_eqs; i++)
2460                 mlx4_release_eq(dev, ibdev->eq_table[i]);
2461
2462         kfree(ibdev->eq_table);
2463         ibdev->eq_table = NULL;
2464 }
2465
2466 static int mlx4_port_immutable(struct ib_device *ibdev, u32 port_num,
2467                                struct ib_port_immutable *immutable)
2468 {
2469         struct ib_port_attr attr;
2470         struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2471         int err;
2472
2473         if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2474                 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
2475                 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2476         } else {
2477                 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)
2478                         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
2479                 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
2480                         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
2481                                 RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
2482                 immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET;
2483                 if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE |
2484                     RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP))
2485                         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
2486         }
2487
2488         err = ib_query_port(ibdev, port_num, &attr);
2489         if (err)
2490                 return err;
2491
2492         immutable->pkey_tbl_len = attr.pkey_tbl_len;
2493         immutable->gid_tbl_len = attr.gid_tbl_len;
2494
2495         return 0;
2496 }
2497
2498 static void get_fw_ver_str(struct ib_device *device, char *str)
2499 {
2500         struct mlx4_ib_dev *dev =
2501                 container_of(device, struct mlx4_ib_dev, ib_dev);
2502         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
2503                  (int) (dev->dev->caps.fw_ver >> 32),
2504                  (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
2505                  (int) dev->dev->caps.fw_ver & 0xffff);
2506 }
2507
2508 static const struct ib_device_ops mlx4_ib_dev_ops = {
2509         .owner = THIS_MODULE,
2510         .driver_id = RDMA_DRIVER_MLX4,
2511         .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION,
2512
2513         .add_gid = mlx4_ib_add_gid,
2514         .alloc_mr = mlx4_ib_alloc_mr,
2515         .alloc_pd = mlx4_ib_alloc_pd,
2516         .alloc_ucontext = mlx4_ib_alloc_ucontext,
2517         .attach_mcast = mlx4_ib_mcg_attach,
2518         .create_ah = mlx4_ib_create_ah,
2519         .create_cq = mlx4_ib_create_cq,
2520         .create_qp = mlx4_ib_create_qp,
2521         .create_srq = mlx4_ib_create_srq,
2522         .dealloc_pd = mlx4_ib_dealloc_pd,
2523         .dealloc_ucontext = mlx4_ib_dealloc_ucontext,
2524         .del_gid = mlx4_ib_del_gid,
2525         .dereg_mr = mlx4_ib_dereg_mr,
2526         .destroy_ah = mlx4_ib_destroy_ah,
2527         .destroy_cq = mlx4_ib_destroy_cq,
2528         .destroy_qp = mlx4_ib_destroy_qp,
2529         .destroy_srq = mlx4_ib_destroy_srq,
2530         .detach_mcast = mlx4_ib_mcg_detach,
2531         .disassociate_ucontext = mlx4_ib_disassociate_ucontext,
2532         .drain_rq = mlx4_ib_drain_rq,
2533         .drain_sq = mlx4_ib_drain_sq,
2534         .get_dev_fw_str = get_fw_ver_str,
2535         .get_dma_mr = mlx4_ib_get_dma_mr,
2536         .get_link_layer = mlx4_ib_port_link_layer,
2537         .get_netdev = mlx4_ib_get_netdev,
2538         .get_port_immutable = mlx4_port_immutable,
2539         .map_mr_sg = mlx4_ib_map_mr_sg,
2540         .mmap = mlx4_ib_mmap,
2541         .modify_cq = mlx4_ib_modify_cq,
2542         .modify_device = mlx4_ib_modify_device,
2543         .modify_port = mlx4_ib_modify_port,
2544         .modify_qp = mlx4_ib_modify_qp,
2545         .modify_srq = mlx4_ib_modify_srq,
2546         .poll_cq = mlx4_ib_poll_cq,
2547         .post_recv = mlx4_ib_post_recv,
2548         .post_send = mlx4_ib_post_send,
2549         .post_srq_recv = mlx4_ib_post_srq_recv,
2550         .process_mad = mlx4_ib_process_mad,
2551         .query_ah = mlx4_ib_query_ah,
2552         .query_device = mlx4_ib_query_device,
2553         .query_gid = mlx4_ib_query_gid,
2554         .query_pkey = mlx4_ib_query_pkey,
2555         .query_port = mlx4_ib_query_port,
2556         .query_qp = mlx4_ib_query_qp,
2557         .query_srq = mlx4_ib_query_srq,
2558         .reg_user_mr = mlx4_ib_reg_user_mr,
2559         .req_notify_cq = mlx4_ib_arm_cq,
2560         .rereg_user_mr = mlx4_ib_rereg_user_mr,
2561         .resize_cq = mlx4_ib_resize_cq,
2562
2563         INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah),
2564         INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq),
2565         INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd),
2566         INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq),
2567         INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext),
2568 };
2569
2570 static const struct ib_device_ops mlx4_ib_dev_wq_ops = {
2571         .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table,
2572         .create_wq = mlx4_ib_create_wq,
2573         .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table,
2574         .destroy_wq = mlx4_ib_destroy_wq,
2575         .modify_wq = mlx4_ib_modify_wq,
2576
2577         INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table,
2578                            ib_rwq_ind_tbl),
2579 };
2580
2581 static const struct ib_device_ops mlx4_ib_dev_mw_ops = {
2582         .alloc_mw = mlx4_ib_alloc_mw,
2583         .dealloc_mw = mlx4_ib_dealloc_mw,
2584
2585         INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw),
2586 };
2587
2588 static const struct ib_device_ops mlx4_ib_dev_xrc_ops = {
2589         .alloc_xrcd = mlx4_ib_alloc_xrcd,
2590         .dealloc_xrcd = mlx4_ib_dealloc_xrcd,
2591
2592         INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd),
2593 };
2594
2595 static const struct ib_device_ops mlx4_ib_dev_fs_ops = {
2596         .create_flow = mlx4_ib_create_flow,
2597         .destroy_flow = mlx4_ib_destroy_flow,
2598 };
2599
2600 static void *mlx4_ib_add(struct mlx4_dev *dev)
2601 {
2602         struct mlx4_ib_dev *ibdev;
2603         int num_ports = 0;
2604         int i, j;
2605         int err;
2606         struct mlx4_ib_iboe *iboe;
2607         int ib_num_ports = 0;
2608         int num_req_counters;
2609         int allocated;
2610         u32 counter_index;
2611         struct counter_index *new_counter_index = NULL;
2612
2613         pr_info_once("%s", mlx4_ib_version);
2614
2615         num_ports = 0;
2616         mlx4_foreach_ib_transport_port(i, dev)
2617                 num_ports++;
2618
2619         /* No point in registering a device with no ports... */
2620         if (num_ports == 0)
2621                 return NULL;
2622
2623         ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev);
2624         if (!ibdev) {
2625                 dev_err(&dev->persist->pdev->dev,
2626                         "Device struct alloc failed\n");
2627                 return NULL;
2628         }
2629
2630         iboe = &ibdev->iboe;
2631
2632         if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2633                 goto err_dealloc;
2634
2635         if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2636                 goto err_pd;
2637
2638         ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2639                                  PAGE_SIZE);
2640         if (!ibdev->uar_map)
2641                 goto err_uar;
2642         MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2643
2644         ibdev->dev = dev;
2645         ibdev->bond_next_port   = 0;
2646
2647         ibdev->ib_dev.node_type         = RDMA_NODE_IB_CA;
2648         ibdev->ib_dev.local_dma_lkey    = dev->caps.reserved_lkey;
2649         ibdev->num_ports                = num_ports;
2650         ibdev->ib_dev.phys_port_cnt     = mlx4_is_bonded(dev) ?
2651                                                 1 : ibdev->num_ports;
2652         ibdev->ib_dev.num_comp_vectors  = dev->caps.num_comp_vectors;
2653         ibdev->ib_dev.dev.parent        = &dev->persist->pdev->dev;
2654
2655         ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops);
2656
2657         if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) &&
2658             ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) ==
2659             IB_LINK_LAYER_ETHERNET) ||
2660             (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) ==
2661             IB_LINK_LAYER_ETHERNET)))
2662                 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops);
2663
2664         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2665             dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2666                 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops);
2667
2668         if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2669                 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops);
2670         }
2671
2672         if (check_flow_steering_support(dev)) {
2673                 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2674                 ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops);
2675         }
2676
2677         if (!dev->caps.userspace_caps)
2678                 ibdev->ib_dev.ops.uverbs_abi_ver =
2679                         MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2680
2681         mlx4_ib_alloc_eqs(dev, ibdev);
2682
2683         spin_lock_init(&iboe->lock);
2684
2685         if (init_node_data(ibdev))
2686                 goto err_map;
2687         mlx4_init_sl2vl_tbl(ibdev);
2688
2689         for (i = 0; i < ibdev->num_ports; ++i) {
2690                 mutex_init(&ibdev->counters_table[i].mutex);
2691                 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2692                 iboe->last_port_state[i] = IB_PORT_DOWN;
2693         }
2694
2695         num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2696         for (i = 0; i < num_req_counters; ++i) {
2697                 mutex_init(&ibdev->qp1_proxy_lock[i]);
2698                 allocated = 0;
2699                 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2700                                                 IB_LINK_LAYER_ETHERNET) {
2701                         err = mlx4_counter_alloc(ibdev->dev, &counter_index,
2702                                                  MLX4_RES_USAGE_DRIVER);
2703                         /* if failed to allocate a new counter, use default */
2704                         if (err)
2705                                 counter_index =
2706                                         mlx4_get_default_counter_index(dev,
2707                                                                        i + 1);
2708                         else
2709                                 allocated = 1;
2710                 } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */
2711                         counter_index = mlx4_get_default_counter_index(dev,
2712                                                                        i + 1);
2713                 }
2714                 new_counter_index = kmalloc(sizeof(*new_counter_index),
2715                                             GFP_KERNEL);
2716                 if (!new_counter_index) {
2717                         if (allocated)
2718                                 mlx4_counter_free(ibdev->dev, counter_index);
2719                         goto err_counter;
2720                 }
2721                 new_counter_index->index = counter_index;
2722                 new_counter_index->allocated = allocated;
2723                 list_add_tail(&new_counter_index->list,
2724                               &ibdev->counters_table[i].counters_list);
2725                 ibdev->counters_table[i].default_counter = counter_index;
2726                 pr_info("counter index %d for port %d allocated %d\n",
2727                         counter_index, i + 1, allocated);
2728         }
2729         if (mlx4_is_bonded(dev))
2730                 for (i = 1; i < ibdev->num_ports ; ++i) {
2731                         new_counter_index =
2732                                         kmalloc(sizeof(struct counter_index),
2733                                                 GFP_KERNEL);
2734                         if (!new_counter_index)
2735                                 goto err_counter;
2736                         new_counter_index->index = counter_index;
2737                         new_counter_index->allocated = 0;
2738                         list_add_tail(&new_counter_index->list,
2739                                       &ibdev->counters_table[i].counters_list);
2740                         ibdev->counters_table[i].default_counter =
2741                                                                 counter_index;
2742                 }
2743
2744         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2745                 ib_num_ports++;
2746
2747         spin_lock_init(&ibdev->sm_lock);
2748         mutex_init(&ibdev->cap_mask_mutex);
2749         INIT_LIST_HEAD(&ibdev->qp_list);
2750         spin_lock_init(&ibdev->reset_flow_resource_lock);
2751
2752         if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2753             ib_num_ports) {
2754                 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2755                 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2756                                             MLX4_IB_UC_STEER_QPN_ALIGN,
2757                                             &ibdev->steer_qpn_base, 0,
2758                                             MLX4_RES_USAGE_DRIVER);
2759                 if (err)
2760                         goto err_counter;
2761
2762                 ibdev->ib_uc_qpns_bitmap =
2763                         kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
2764                                       sizeof(long),
2765                                       GFP_KERNEL);
2766                 if (!ibdev->ib_uc_qpns_bitmap)
2767                         goto err_steer_qp_release;
2768
2769                 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2770                         bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2771                                     ibdev->steer_qpn_count);
2772                         err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2773                                         dev, ibdev->steer_qpn_base,
2774                                         ibdev->steer_qpn_base +
2775                                         ibdev->steer_qpn_count - 1);
2776                         if (err)
2777                                 goto err_steer_free_bitmap;
2778                 } else {
2779                         bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2780                                     ibdev->steer_qpn_count);
2781                 }
2782         }
2783
2784         for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2785                 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2786
2787         if (mlx4_ib_alloc_diag_counters(ibdev))
2788                 goto err_steer_free_bitmap;
2789
2790         rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
2791         if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
2792                                &dev->persist->pdev->dev))
2793                 goto err_diag_counters;
2794
2795         if (mlx4_ib_mad_init(ibdev))
2796                 goto err_reg;
2797
2798         if (mlx4_ib_init_sriov(ibdev))
2799                 goto err_mad;
2800
2801         if (!iboe->nb.notifier_call) {
2802                 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2803                 err = register_netdevice_notifier(&iboe->nb);
2804                 if (err) {
2805                         iboe->nb.notifier_call = NULL;
2806                         goto err_notif;
2807                 }
2808         }
2809         if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
2810                 err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT);
2811                 if (err)
2812                         goto err_notif;
2813         }
2814
2815         ibdev->ib_active = true;
2816         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2817                 devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
2818                                          &ibdev->ib_dev);
2819
2820         if (mlx4_is_mfunc(ibdev->dev))
2821                 init_pkeys(ibdev);
2822
2823         /* create paravirt contexts for any VFs which are active */
2824         if (mlx4_is_master(ibdev->dev)) {
2825                 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2826                         if (j == mlx4_master_func_num(ibdev->dev))
2827                                 continue;
2828                         if (mlx4_is_slave_active(ibdev->dev, j))
2829                                 do_slave_init(ibdev, j, 1);
2830                 }
2831         }
2832         return ibdev;
2833
2834 err_notif:
2835         if (ibdev->iboe.nb.notifier_call) {
2836                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2837                         pr_warn("failure unregistering notifier\n");
2838                 ibdev->iboe.nb.notifier_call = NULL;
2839         }
2840         flush_workqueue(wq);
2841
2842         mlx4_ib_close_sriov(ibdev);
2843
2844 err_mad:
2845         mlx4_ib_mad_cleanup(ibdev);
2846
2847 err_reg:
2848         ib_unregister_device(&ibdev->ib_dev);
2849
2850 err_diag_counters:
2851         mlx4_ib_diag_cleanup(ibdev);
2852
2853 err_steer_free_bitmap:
2854         kfree(ibdev->ib_uc_qpns_bitmap);
2855
2856 err_steer_qp_release:
2857         mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2858                               ibdev->steer_qpn_count);
2859 err_counter:
2860         for (i = 0; i < ibdev->num_ports; ++i)
2861                 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2862
2863 err_map:
2864         mlx4_ib_free_eqs(dev, ibdev);
2865         iounmap(ibdev->uar_map);
2866
2867 err_uar:
2868         mlx4_uar_free(dev, &ibdev->priv_uar);
2869
2870 err_pd:
2871         mlx4_pd_free(dev, ibdev->priv_pdn);
2872
2873 err_dealloc:
2874         ib_dealloc_device(&ibdev->ib_dev);
2875
2876         return NULL;
2877 }
2878
2879 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2880 {
2881         int offset;
2882
2883         WARN_ON(!dev->ib_uc_qpns_bitmap);
2884
2885         offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2886                                          dev->steer_qpn_count,
2887                                          get_count_order(count));
2888         if (offset < 0)
2889                 return offset;
2890
2891         *qpn = dev->steer_qpn_base + offset;
2892         return 0;
2893 }
2894
2895 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2896 {
2897         if (!qpn ||
2898             dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2899                 return;
2900
2901         if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n",
2902                  qpn, dev->steer_qpn_base))
2903                 /* not supposed to be here */
2904                 return;
2905
2906         bitmap_release_region(dev->ib_uc_qpns_bitmap,
2907                               qpn - dev->steer_qpn_base,
2908                               get_count_order(count));
2909 }
2910
2911 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2912                          int is_attach)
2913 {
2914         int err;
2915         size_t flow_size;
2916         struct ib_flow_attr *flow = NULL;
2917         struct ib_flow_spec_ib *ib_spec;
2918
2919         if (is_attach) {
2920                 flow_size = sizeof(struct ib_flow_attr) +
2921                             sizeof(struct ib_flow_spec_ib);
2922                 flow = kzalloc(flow_size, GFP_KERNEL);
2923                 if (!flow)
2924                         return -ENOMEM;
2925                 flow->port = mqp->port;
2926                 flow->num_of_specs = 1;
2927                 flow->size = flow_size;
2928                 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2929                 ib_spec->type = IB_FLOW_SPEC_IB;
2930                 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2931                 /* Add an empty rule for IB L2 */
2932                 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2933
2934                 err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
2935                                             MLX4_FS_REGULAR, &mqp->reg_id);
2936         } else {
2937                 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2938         }
2939         kfree(flow);
2940         return err;
2941 }
2942
2943 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2944 {
2945         struct mlx4_ib_dev *ibdev = ibdev_ptr;
2946         int p;
2947         int i;
2948
2949         mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2950                 devlink_port_type_clear(mlx4_get_devlink_port(dev, i));
2951         ibdev->ib_active = false;
2952         flush_workqueue(wq);
2953
2954         if (ibdev->iboe.nb.notifier_call) {
2955                 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2956                         pr_warn("failure unregistering notifier\n");
2957                 ibdev->iboe.nb.notifier_call = NULL;
2958         }
2959
2960         mlx4_ib_close_sriov(ibdev);
2961         mlx4_ib_mad_cleanup(ibdev);
2962         ib_unregister_device(&ibdev->ib_dev);
2963         mlx4_ib_diag_cleanup(ibdev);
2964
2965         mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2966                               ibdev->steer_qpn_count);
2967         kfree(ibdev->ib_uc_qpns_bitmap);
2968
2969         iounmap(ibdev->uar_map);
2970         for (p = 0; p < ibdev->num_ports; ++p)
2971                 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
2972
2973         mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2974                 mlx4_CLOSE_PORT(dev, p);
2975
2976         mlx4_ib_free_eqs(dev, ibdev);
2977
2978         mlx4_uar_free(dev, &ibdev->priv_uar);
2979         mlx4_pd_free(dev, ibdev->priv_pdn);
2980         ib_dealloc_device(&ibdev->ib_dev);
2981 }
2982
2983 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2984 {
2985         struct mlx4_ib_demux_work **dm = NULL;
2986         struct mlx4_dev *dev = ibdev->dev;
2987         int i;
2988         unsigned long flags;
2989         struct mlx4_active_ports actv_ports;
2990         unsigned int ports;
2991         unsigned int first_port;
2992
2993         if (!mlx4_is_master(dev))
2994                 return;
2995
2996         actv_ports = mlx4_get_active_ports(dev, slave);
2997         ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2998         first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2999
3000         dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
3001         if (!dm)
3002                 return;
3003
3004         for (i = 0; i < ports; i++) {
3005                 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
3006                 if (!dm[i]) {
3007                         while (--i >= 0)
3008                                 kfree(dm[i]);
3009                         goto out;
3010                 }
3011                 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
3012                 dm[i]->port = first_port + i + 1;
3013                 dm[i]->slave = slave;
3014                 dm[i]->do_init = do_init;
3015                 dm[i]->dev = ibdev;
3016         }
3017         /* initialize or tear down tunnel QPs for the slave */
3018         spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3019         if (!ibdev->sriov.is_going_down) {
3020                 for (i = 0; i < ports; i++)
3021                         queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3022                 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3023         } else {
3024                 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3025                 for (i = 0; i < ports; i++)
3026                         kfree(dm[i]);
3027         }
3028 out:
3029         kfree(dm);
3030         return;
3031 }
3032
3033 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3034 {
3035         struct mlx4_ib_qp *mqp;
3036         unsigned long flags_qp;
3037         unsigned long flags_cq;
3038         struct mlx4_ib_cq *send_mcq, *recv_mcq;
3039         struct list_head    cq_notify_list;
3040         struct mlx4_cq *mcq;
3041         unsigned long flags;
3042
3043         pr_warn("mlx4_ib_handle_catas_error was started\n");
3044         INIT_LIST_HEAD(&cq_notify_list);
3045
3046         /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3047         spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3048
3049         list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3050                 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3051                 if (mqp->sq.tail != mqp->sq.head) {
3052                         send_mcq = to_mcq(mqp->ibqp.send_cq);
3053                         spin_lock_irqsave(&send_mcq->lock, flags_cq);
3054                         if (send_mcq->mcq.comp &&
3055                             mqp->ibqp.send_cq->comp_handler) {
3056                                 if (!send_mcq->mcq.reset_notify_added) {
3057                                         send_mcq->mcq.reset_notify_added = 1;
3058                                         list_add_tail(&send_mcq->mcq.reset_notify,
3059                                                       &cq_notify_list);
3060                                 }
3061                         }
3062                         spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3063                 }
3064                 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3065                 /* Now, handle the QP's receive queue */
3066                 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3067                 /* no handling is needed for SRQ */
3068                 if (!mqp->ibqp.srq) {
3069                         if (mqp->rq.tail != mqp->rq.head) {
3070                                 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3071                                 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3072                                 if (recv_mcq->mcq.comp &&
3073                                     mqp->ibqp.recv_cq->comp_handler) {
3074                                         if (!recv_mcq->mcq.reset_notify_added) {
3075                                                 recv_mcq->mcq.reset_notify_added = 1;
3076                                                 list_add_tail(&recv_mcq->mcq.reset_notify,
3077                                                               &cq_notify_list);
3078                                         }
3079                                 }
3080                                 spin_unlock_irqrestore(&recv_mcq->lock,
3081                                                        flags_cq);
3082                         }
3083                 }
3084                 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3085         }
3086
3087         list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
3088                 mcq->comp(mcq);
3089         }
3090         spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3091         pr_warn("mlx4_ib_handle_catas_error ended\n");
3092 }
3093
3094 static void handle_bonded_port_state_event(struct work_struct *work)
3095 {
3096         struct ib_event_work *ew =
3097                 container_of(work, struct ib_event_work, work);
3098         struct mlx4_ib_dev *ibdev = ew->ib_dev;
3099         enum ib_port_state bonded_port_state = IB_PORT_NOP;
3100         int i;
3101         struct ib_event ibev;
3102
3103         kfree(ew);
3104         spin_lock_bh(&ibdev->iboe.lock);
3105         for (i = 0; i < MLX4_MAX_PORTS; ++i) {
3106                 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3107                 enum ib_port_state curr_port_state;
3108
3109                 if (!curr_netdev)
3110                         continue;
3111
3112                 curr_port_state =
3113                         (netif_running(curr_netdev) &&
3114                          netif_carrier_ok(curr_netdev)) ?
3115                         IB_PORT_ACTIVE : IB_PORT_DOWN;
3116
3117                 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
3118                         curr_port_state : IB_PORT_ACTIVE;
3119         }
3120         spin_unlock_bh(&ibdev->iboe.lock);
3121
3122         ibev.device = &ibdev->ib_dev;
3123         ibev.element.port_num = 1;
3124         ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
3125                 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
3126
3127         ib_dispatch_event(&ibev);
3128 }
3129
3130 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port)
3131 {
3132         u64 sl2vl;
3133         int err;
3134
3135         err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl);
3136         if (err) {
3137                 pr_err("Unable to get current sl to vl mapping for port %d.  Using all zeroes (%d)\n",
3138                        port, err);
3139                 sl2vl = 0;
3140         }
3141         atomic64_set(&mdev->sl2vl[port - 1], sl2vl);
3142 }
3143
3144 static void ib_sl2vl_update_work(struct work_struct *work)
3145 {
3146         struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
3147         struct mlx4_ib_dev *mdev = ew->ib_dev;
3148         int port = ew->port;
3149
3150         mlx4_ib_sl2vl_update(mdev, port);
3151
3152         kfree(ew);
3153 }
3154
3155 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3156                                      int port)
3157 {
3158         struct ib_event_work *ew;
3159
3160         ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3161         if (ew) {
3162                 INIT_WORK(&ew->work, ib_sl2vl_update_work);
3163                 ew->port = port;
3164                 ew->ib_dev = ibdev;
3165                 queue_work(wq, &ew->work);
3166         }
3167 }
3168
3169 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
3170                           enum mlx4_dev_event event, unsigned long param)
3171 {
3172         struct ib_event ibev;
3173         struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3174         struct mlx4_eqe *eqe = NULL;
3175         struct ib_event_work *ew;
3176         int p = 0;
3177
3178         if (mlx4_is_bonded(dev) &&
3179             ((event == MLX4_DEV_EVENT_PORT_UP) ||
3180             (event == MLX4_DEV_EVENT_PORT_DOWN))) {
3181                 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
3182                 if (!ew)
3183                         return;
3184                 INIT_WORK(&ew->work, handle_bonded_port_state_event);
3185                 ew->ib_dev = ibdev;
3186                 queue_work(wq, &ew->work);
3187                 return;
3188         }
3189
3190         if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
3191                 eqe = (struct mlx4_eqe *)param;
3192         else
3193                 p = (int) param;
3194
3195         switch (event) {
3196         case MLX4_DEV_EVENT_PORT_UP:
3197                 if (p > ibdev->num_ports)
3198                         return;
3199                 if (!mlx4_is_slave(dev) &&
3200                     rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3201                         IB_LINK_LAYER_INFINIBAND) {
3202                         if (mlx4_is_master(dev))
3203                                 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3204                         if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3205                             !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3206                                 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3207                 }
3208                 ibev.event = IB_EVENT_PORT_ACTIVE;
3209                 break;
3210
3211         case MLX4_DEV_EVENT_PORT_DOWN:
3212                 if (p > ibdev->num_ports)
3213                         return;
3214                 ibev.event = IB_EVENT_PORT_ERR;
3215                 break;
3216
3217         case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3218                 ibdev->ib_active = false;
3219                 ibev.event = IB_EVENT_DEVICE_FATAL;
3220                 mlx4_ib_handle_catas_error(ibdev);
3221                 break;
3222
3223         case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
3224                 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
3225                 if (!ew)
3226                         break;
3227
3228                 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
3229                 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
3230                 ew->ib_dev = ibdev;
3231                 /* need to queue only for port owner, which uses GEN_EQE */
3232                 if (mlx4_is_master(dev))
3233                         queue_work(wq, &ew->work);
3234                 else
3235                         handle_port_mgmt_change_event(&ew->work);
3236                 return;
3237
3238         case MLX4_DEV_EVENT_SLAVE_INIT:
3239                 /* here, p is the slave id */
3240                 do_slave_init(ibdev, p, 1);
3241                 if (mlx4_is_master(dev)) {
3242                         int i;
3243
3244                         for (i = 1; i <= ibdev->num_ports; i++) {
3245                                 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3246                                         == IB_LINK_LAYER_INFINIBAND)
3247                                         mlx4_ib_slave_alias_guid_event(ibdev,
3248                                                                        p, i,
3249                                                                        1);
3250                         }
3251                 }
3252                 return;
3253
3254         case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
3255                 if (mlx4_is_master(dev)) {
3256                         int i;
3257
3258                         for (i = 1; i <= ibdev->num_ports; i++) {
3259                                 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3260                                         == IB_LINK_LAYER_INFINIBAND)
3261                                         mlx4_ib_slave_alias_guid_event(ibdev,
3262                                                                        p, i,
3263                                                                        0);
3264                         }
3265                 }
3266                 /* here, p is the slave id */
3267                 do_slave_init(ibdev, p, 0);
3268                 return;
3269
3270         default:
3271                 return;
3272         }
3273
3274         ibev.device           = ibdev_ptr;
3275         ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
3276
3277         ib_dispatch_event(&ibev);
3278 }
3279
3280 static struct mlx4_interface mlx4_ib_interface = {
3281         .add            = mlx4_ib_add,
3282         .remove         = mlx4_ib_remove,
3283         .event          = mlx4_ib_event,
3284         .protocol       = MLX4_PROT_IB_IPV6,
3285         .flags          = MLX4_INTFF_BONDING
3286 };
3287
3288 static int __init mlx4_ib_init(void)
3289 {
3290         int err;
3291
3292         wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM);
3293         if (!wq)
3294                 return -ENOMEM;
3295
3296         err = mlx4_ib_mcg_init();
3297         if (err)
3298                 goto clean_wq;
3299
3300         err = mlx4_register_interface(&mlx4_ib_interface);
3301         if (err)
3302                 goto clean_mcg;
3303
3304         return 0;
3305
3306 clean_mcg:
3307         mlx4_ib_mcg_destroy();
3308
3309 clean_wq:
3310         destroy_workqueue(wq);
3311         return err;
3312 }
3313
3314 static void __exit mlx4_ib_cleanup(void)
3315 {
3316         mlx4_unregister_interface(&mlx4_ib_interface);
3317         mlx4_ib_mcg_destroy();
3318         destroy_workqueue(wq);
3319 }
3320
3321 module_init(mlx4_ib_init);
3322 module_exit(mlx4_ib_cleanup);