Merge tag 'char-misc-4.18-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / mlx5 / main.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/debugfs.h>
34 #include <linux/highmem.h>
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/errno.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/bitmap.h>
42 #if defined(CONFIG_X86)
43 #include <asm/pat.h>
44 #endif
45 #include <linux/sched.h>
46 #include <linux/sched/mm.h>
47 #include <linux/sched/task.h>
48 #include <linux/delay.h>
49 #include <rdma/ib_user_verbs.h>
50 #include <rdma/ib_addr.h>
51 #include <rdma/ib_cache.h>
52 #include <linux/mlx5/port.h>
53 #include <linux/mlx5/vport.h>
54 #include <linux/mlx5/fs.h>
55 #include <linux/list.h>
56 #include <rdma/ib_smi.h>
57 #include <rdma/ib_umem.h>
58 #include <linux/in.h>
59 #include <linux/etherdevice.h>
60 #include "mlx5_ib.h"
61 #include "ib_rep.h"
62 #include "cmd.h"
63 #include <linux/mlx5/fs_helpers.h>
64 #include <linux/mlx5/accel.h>
65 #include <rdma/uverbs_std_types.h>
66 #include <rdma/mlx5_user_ioctl_verbs.h>
67 #include <rdma/mlx5_user_ioctl_cmds.h>
68
69 #define UVERBS_MODULE_NAME mlx5_ib
70 #include <rdma/uverbs_named_ioctl.h>
71
72 #define DRIVER_NAME "mlx5_ib"
73 #define DRIVER_VERSION "5.0-0"
74
75 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
76 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
77 MODULE_LICENSE("Dual BSD/GPL");
78
79 static char mlx5_version[] =
80         DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
81         DRIVER_VERSION "\n";
82
83 struct mlx5_ib_event_work {
84         struct work_struct      work;
85         struct mlx5_core_dev    *dev;
86         void                    *context;
87         enum mlx5_dev_event     event;
88         unsigned long           param;
89 };
90
91 enum {
92         MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
93 };
94
95 static struct workqueue_struct *mlx5_ib_event_wq;
96 static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
97 static LIST_HEAD(mlx5_ib_dev_list);
98 /*
99  * This mutex should be held when accessing either of the above lists
100  */
101 static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
102
103 /* We can't use an array for xlt_emergency_page because dma_map_single
104  * doesn't work on kernel modules memory
105  */
106 static unsigned long xlt_emergency_page;
107 static struct mutex xlt_emergency_page_mutex;
108
109 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
110 {
111         struct mlx5_ib_dev *dev;
112
113         mutex_lock(&mlx5_ib_multiport_mutex);
114         dev = mpi->ibdev;
115         mutex_unlock(&mlx5_ib_multiport_mutex);
116         return dev;
117 }
118
119 static enum rdma_link_layer
120 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
121 {
122         switch (port_type_cap) {
123         case MLX5_CAP_PORT_TYPE_IB:
124                 return IB_LINK_LAYER_INFINIBAND;
125         case MLX5_CAP_PORT_TYPE_ETH:
126                 return IB_LINK_LAYER_ETHERNET;
127         default:
128                 return IB_LINK_LAYER_UNSPECIFIED;
129         }
130 }
131
132 static enum rdma_link_layer
133 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
134 {
135         struct mlx5_ib_dev *dev = to_mdev(device);
136         int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
137
138         return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
139 }
140
141 static int get_port_state(struct ib_device *ibdev,
142                           u8 port_num,
143                           enum ib_port_state *state)
144 {
145         struct ib_port_attr attr;
146         int ret;
147
148         memset(&attr, 0, sizeof(attr));
149         ret = ibdev->query_port(ibdev, port_num, &attr);
150         if (!ret)
151                 *state = attr.state;
152         return ret;
153 }
154
155 static int mlx5_netdev_event(struct notifier_block *this,
156                              unsigned long event, void *ptr)
157 {
158         struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
159         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
160         u8 port_num = roce->native_port_num;
161         struct mlx5_core_dev *mdev;
162         struct mlx5_ib_dev *ibdev;
163
164         ibdev = roce->dev;
165         mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
166         if (!mdev)
167                 return NOTIFY_DONE;
168
169         switch (event) {
170         case NETDEV_REGISTER:
171         case NETDEV_UNREGISTER:
172                 write_lock(&roce->netdev_lock);
173                 if (ibdev->rep) {
174                         struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch;
175                         struct net_device *rep_ndev;
176
177                         rep_ndev = mlx5_ib_get_rep_netdev(esw,
178                                                           ibdev->rep->vport);
179                         if (rep_ndev == ndev)
180                                 roce->netdev = (event == NETDEV_UNREGISTER) ?
181                                         NULL : ndev;
182                 } else if (ndev->dev.parent == &mdev->pdev->dev) {
183                         roce->netdev = (event == NETDEV_UNREGISTER) ?
184                                 NULL : ndev;
185                 }
186                 write_unlock(&roce->netdev_lock);
187                 break;
188
189         case NETDEV_CHANGE:
190         case NETDEV_UP:
191         case NETDEV_DOWN: {
192                 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
193                 struct net_device *upper = NULL;
194
195                 if (lag_ndev) {
196                         upper = netdev_master_upper_dev_get(lag_ndev);
197                         dev_put(lag_ndev);
198                 }
199
200                 if ((upper == ndev || (!upper && ndev == roce->netdev))
201                     && ibdev->ib_active) {
202                         struct ib_event ibev = { };
203                         enum ib_port_state port_state;
204
205                         if (get_port_state(&ibdev->ib_dev, port_num,
206                                            &port_state))
207                                 goto done;
208
209                         if (roce->last_port_state == port_state)
210                                 goto done;
211
212                         roce->last_port_state = port_state;
213                         ibev.device = &ibdev->ib_dev;
214                         if (port_state == IB_PORT_DOWN)
215                                 ibev.event = IB_EVENT_PORT_ERR;
216                         else if (port_state == IB_PORT_ACTIVE)
217                                 ibev.event = IB_EVENT_PORT_ACTIVE;
218                         else
219                                 goto done;
220
221                         ibev.element.port_num = port_num;
222                         ib_dispatch_event(&ibev);
223                 }
224                 break;
225         }
226
227         default:
228                 break;
229         }
230 done:
231         mlx5_ib_put_native_port_mdev(ibdev, port_num);
232         return NOTIFY_DONE;
233 }
234
235 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
236                                              u8 port_num)
237 {
238         struct mlx5_ib_dev *ibdev = to_mdev(device);
239         struct net_device *ndev;
240         struct mlx5_core_dev *mdev;
241
242         mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
243         if (!mdev)
244                 return NULL;
245
246         ndev = mlx5_lag_get_roce_netdev(mdev);
247         if (ndev)
248                 goto out;
249
250         /* Ensure ndev does not disappear before we invoke dev_hold()
251          */
252         read_lock(&ibdev->roce[port_num - 1].netdev_lock);
253         ndev = ibdev->roce[port_num - 1].netdev;
254         if (ndev)
255                 dev_hold(ndev);
256         read_unlock(&ibdev->roce[port_num - 1].netdev_lock);
257
258 out:
259         mlx5_ib_put_native_port_mdev(ibdev, port_num);
260         return ndev;
261 }
262
263 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
264                                                    u8 ib_port_num,
265                                                    u8 *native_port_num)
266 {
267         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
268                                                           ib_port_num);
269         struct mlx5_core_dev *mdev = NULL;
270         struct mlx5_ib_multiport_info *mpi;
271         struct mlx5_ib_port *port;
272
273         if (!mlx5_core_mp_enabled(ibdev->mdev) ||
274             ll != IB_LINK_LAYER_ETHERNET) {
275                 if (native_port_num)
276                         *native_port_num = ib_port_num;
277                 return ibdev->mdev;
278         }
279
280         if (native_port_num)
281                 *native_port_num = 1;
282
283         port = &ibdev->port[ib_port_num - 1];
284         if (!port)
285                 return NULL;
286
287         spin_lock(&port->mp.mpi_lock);
288         mpi = ibdev->port[ib_port_num - 1].mp.mpi;
289         if (mpi && !mpi->unaffiliate) {
290                 mdev = mpi->mdev;
291                 /* If it's the master no need to refcount, it'll exist
292                  * as long as the ib_dev exists.
293                  */
294                 if (!mpi->is_master)
295                         mpi->mdev_refcnt++;
296         }
297         spin_unlock(&port->mp.mpi_lock);
298
299         return mdev;
300 }
301
302 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u8 port_num)
303 {
304         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
305                                                           port_num);
306         struct mlx5_ib_multiport_info *mpi;
307         struct mlx5_ib_port *port;
308
309         if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
310                 return;
311
312         port = &ibdev->port[port_num - 1];
313
314         spin_lock(&port->mp.mpi_lock);
315         mpi = ibdev->port[port_num - 1].mp.mpi;
316         if (mpi->is_master)
317                 goto out;
318
319         mpi->mdev_refcnt--;
320         if (mpi->unaffiliate)
321                 complete(&mpi->unref_comp);
322 out:
323         spin_unlock(&port->mp.mpi_lock);
324 }
325
326 static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
327                                     u8 *active_width)
328 {
329         switch (eth_proto_oper) {
330         case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
331         case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
332         case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
333         case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
334                 *active_width = IB_WIDTH_1X;
335                 *active_speed = IB_SPEED_SDR;
336                 break;
337         case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
338         case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
339         case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
340         case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
341         case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
342         case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
343         case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
344                 *active_width = IB_WIDTH_1X;
345                 *active_speed = IB_SPEED_QDR;
346                 break;
347         case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
348         case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
349         case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
350                 *active_width = IB_WIDTH_1X;
351                 *active_speed = IB_SPEED_EDR;
352                 break;
353         case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
354         case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
355         case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
356         case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
357                 *active_width = IB_WIDTH_4X;
358                 *active_speed = IB_SPEED_QDR;
359                 break;
360         case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
361         case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
362         case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
363                 *active_width = IB_WIDTH_1X;
364                 *active_speed = IB_SPEED_HDR;
365                 break;
366         case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
367                 *active_width = IB_WIDTH_4X;
368                 *active_speed = IB_SPEED_FDR;
369                 break;
370         case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
371         case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
372         case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
373         case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
374                 *active_width = IB_WIDTH_4X;
375                 *active_speed = IB_SPEED_EDR;
376                 break;
377         default:
378                 return -EINVAL;
379         }
380
381         return 0;
382 }
383
384 static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
385                                 struct ib_port_attr *props)
386 {
387         struct mlx5_ib_dev *dev = to_mdev(device);
388         struct mlx5_core_dev *mdev;
389         struct net_device *ndev, *upper;
390         enum ib_mtu ndev_ib_mtu;
391         bool put_mdev = true;
392         u16 qkey_viol_cntr;
393         u32 eth_prot_oper;
394         u8 mdev_port_num;
395         int err;
396
397         mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
398         if (!mdev) {
399                 /* This means the port isn't affiliated yet. Get the
400                  * info for the master port instead.
401                  */
402                 put_mdev = false;
403                 mdev = dev->mdev;
404                 mdev_port_num = 1;
405                 port_num = 1;
406         }
407
408         /* Possible bad flows are checked before filling out props so in case
409          * of an error it will still be zeroed out.
410          */
411         err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper,
412                                              mdev_port_num);
413         if (err)
414                 goto out;
415
416         props->active_width     = IB_WIDTH_4X;
417         props->active_speed     = IB_SPEED_QDR;
418
419         translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
420                                  &props->active_width);
421
422         props->port_cap_flags  |= IB_PORT_CM_SUP;
423         props->port_cap_flags  |= IB_PORT_IP_BASED_GIDS;
424
425         props->gid_tbl_len      = MLX5_CAP_ROCE(dev->mdev,
426                                                 roce_address_table_size);
427         props->max_mtu          = IB_MTU_4096;
428         props->max_msg_sz       = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
429         props->pkey_tbl_len     = 1;
430         props->state            = IB_PORT_DOWN;
431         props->phys_state       = 3;
432
433         mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
434         props->qkey_viol_cntr = qkey_viol_cntr;
435
436         /* If this is a stub query for an unaffiliated port stop here */
437         if (!put_mdev)
438                 goto out;
439
440         ndev = mlx5_ib_get_netdev(device, port_num);
441         if (!ndev)
442                 goto out;
443
444         if (mlx5_lag_is_active(dev->mdev)) {
445                 rcu_read_lock();
446                 upper = netdev_master_upper_dev_get_rcu(ndev);
447                 if (upper) {
448                         dev_put(ndev);
449                         ndev = upper;
450                         dev_hold(ndev);
451                 }
452                 rcu_read_unlock();
453         }
454
455         if (netif_running(ndev) && netif_carrier_ok(ndev)) {
456                 props->state      = IB_PORT_ACTIVE;
457                 props->phys_state = 5;
458         }
459
460         ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
461
462         dev_put(ndev);
463
464         props->active_mtu       = min(props->max_mtu, ndev_ib_mtu);
465 out:
466         if (put_mdev)
467                 mlx5_ib_put_native_port_mdev(dev, port_num);
468         return err;
469 }
470
471 static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
472                          unsigned int index, const union ib_gid *gid,
473                          const struct ib_gid_attr *attr)
474 {
475         enum ib_gid_type gid_type = IB_GID_TYPE_IB;
476         u8 roce_version = 0;
477         u8 roce_l3_type = 0;
478         bool vlan = false;
479         u8 mac[ETH_ALEN];
480         u16 vlan_id = 0;
481
482         if (gid) {
483                 gid_type = attr->gid_type;
484                 ether_addr_copy(mac, attr->ndev->dev_addr);
485
486                 if (is_vlan_dev(attr->ndev)) {
487                         vlan = true;
488                         vlan_id = vlan_dev_vlan_id(attr->ndev);
489                 }
490         }
491
492         switch (gid_type) {
493         case IB_GID_TYPE_IB:
494                 roce_version = MLX5_ROCE_VERSION_1;
495                 break;
496         case IB_GID_TYPE_ROCE_UDP_ENCAP:
497                 roce_version = MLX5_ROCE_VERSION_2;
498                 if (ipv6_addr_v4mapped((void *)gid))
499                         roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
500                 else
501                         roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
502                 break;
503
504         default:
505                 mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
506         }
507
508         return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
509                                       roce_l3_type, gid->raw, mac, vlan,
510                                       vlan_id, port_num);
511 }
512
513 static int mlx5_ib_add_gid(const union ib_gid *gid,
514                            const struct ib_gid_attr *attr,
515                            __always_unused void **context)
516 {
517         return set_roce_addr(to_mdev(attr->device), attr->port_num,
518                              attr->index, gid, attr);
519 }
520
521 static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
522                            __always_unused void **context)
523 {
524         return set_roce_addr(to_mdev(attr->device), attr->port_num,
525                              attr->index, NULL, NULL);
526 }
527
528 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
529                                int index)
530 {
531         struct ib_gid_attr attr;
532         union ib_gid gid;
533
534         if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr))
535                 return 0;
536
537         dev_put(attr.ndev);
538
539         if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
540                 return 0;
541
542         return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
543 }
544
545 int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num,
546                            int index, enum ib_gid_type *gid_type)
547 {
548         struct ib_gid_attr attr;
549         union ib_gid gid;
550         int ret;
551
552         ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr);
553         if (ret)
554                 return ret;
555
556         dev_put(attr.ndev);
557
558         *gid_type = attr.gid_type;
559
560         return 0;
561 }
562
563 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
564 {
565         if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
566                 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
567         return 0;
568 }
569
570 enum {
571         MLX5_VPORT_ACCESS_METHOD_MAD,
572         MLX5_VPORT_ACCESS_METHOD_HCA,
573         MLX5_VPORT_ACCESS_METHOD_NIC,
574 };
575
576 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
577 {
578         if (mlx5_use_mad_ifc(to_mdev(ibdev)))
579                 return MLX5_VPORT_ACCESS_METHOD_MAD;
580
581         if (mlx5_ib_port_link_layer(ibdev, 1) ==
582             IB_LINK_LAYER_ETHERNET)
583                 return MLX5_VPORT_ACCESS_METHOD_NIC;
584
585         return MLX5_VPORT_ACCESS_METHOD_HCA;
586 }
587
588 static void get_atomic_caps(struct mlx5_ib_dev *dev,
589                             u8 atomic_size_qp,
590                             struct ib_device_attr *props)
591 {
592         u8 tmp;
593         u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
594         u8 atomic_req_8B_endianness_mode =
595                 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
596
597         /* Check if HW supports 8 bytes standard atomic operations and capable
598          * of host endianness respond
599          */
600         tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
601         if (((atomic_operations & tmp) == tmp) &&
602             (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
603             (atomic_req_8B_endianness_mode)) {
604                 props->atomic_cap = IB_ATOMIC_HCA;
605         } else {
606                 props->atomic_cap = IB_ATOMIC_NONE;
607         }
608 }
609
610 static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
611                                struct ib_device_attr *props)
612 {
613         u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
614
615         get_atomic_caps(dev, atomic_size_qp, props);
616 }
617
618 static void get_atomic_caps_dc(struct mlx5_ib_dev *dev,
619                                struct ib_device_attr *props)
620 {
621         u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
622
623         get_atomic_caps(dev, atomic_size_qp, props);
624 }
625
626 bool mlx5_ib_dc_atomic_is_supported(struct mlx5_ib_dev *dev)
627 {
628         struct ib_device_attr props = {};
629
630         get_atomic_caps_dc(dev, &props);
631         return (props.atomic_cap == IB_ATOMIC_HCA) ? true : false;
632 }
633 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
634                                         __be64 *sys_image_guid)
635 {
636         struct mlx5_ib_dev *dev = to_mdev(ibdev);
637         struct mlx5_core_dev *mdev = dev->mdev;
638         u64 tmp;
639         int err;
640
641         switch (mlx5_get_vport_access_method(ibdev)) {
642         case MLX5_VPORT_ACCESS_METHOD_MAD:
643                 return mlx5_query_mad_ifc_system_image_guid(ibdev,
644                                                             sys_image_guid);
645
646         case MLX5_VPORT_ACCESS_METHOD_HCA:
647                 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
648                 break;
649
650         case MLX5_VPORT_ACCESS_METHOD_NIC:
651                 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
652                 break;
653
654         default:
655                 return -EINVAL;
656         }
657
658         if (!err)
659                 *sys_image_guid = cpu_to_be64(tmp);
660
661         return err;
662
663 }
664
665 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
666                                 u16 *max_pkeys)
667 {
668         struct mlx5_ib_dev *dev = to_mdev(ibdev);
669         struct mlx5_core_dev *mdev = dev->mdev;
670
671         switch (mlx5_get_vport_access_method(ibdev)) {
672         case MLX5_VPORT_ACCESS_METHOD_MAD:
673                 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
674
675         case MLX5_VPORT_ACCESS_METHOD_HCA:
676         case MLX5_VPORT_ACCESS_METHOD_NIC:
677                 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
678                                                 pkey_table_size));
679                 return 0;
680
681         default:
682                 return -EINVAL;
683         }
684 }
685
686 static int mlx5_query_vendor_id(struct ib_device *ibdev,
687                                 u32 *vendor_id)
688 {
689         struct mlx5_ib_dev *dev = to_mdev(ibdev);
690
691         switch (mlx5_get_vport_access_method(ibdev)) {
692         case MLX5_VPORT_ACCESS_METHOD_MAD:
693                 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
694
695         case MLX5_VPORT_ACCESS_METHOD_HCA:
696         case MLX5_VPORT_ACCESS_METHOD_NIC:
697                 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
698
699         default:
700                 return -EINVAL;
701         }
702 }
703
704 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
705                                 __be64 *node_guid)
706 {
707         u64 tmp;
708         int err;
709
710         switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
711         case MLX5_VPORT_ACCESS_METHOD_MAD:
712                 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
713
714         case MLX5_VPORT_ACCESS_METHOD_HCA:
715                 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
716                 break;
717
718         case MLX5_VPORT_ACCESS_METHOD_NIC:
719                 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
720                 break;
721
722         default:
723                 return -EINVAL;
724         }
725
726         if (!err)
727                 *node_guid = cpu_to_be64(tmp);
728
729         return err;
730 }
731
732 struct mlx5_reg_node_desc {
733         u8      desc[IB_DEVICE_NODE_DESC_MAX];
734 };
735
736 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
737 {
738         struct mlx5_reg_node_desc in;
739
740         if (mlx5_use_mad_ifc(dev))
741                 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
742
743         memset(&in, 0, sizeof(in));
744
745         return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
746                                     sizeof(struct mlx5_reg_node_desc),
747                                     MLX5_REG_NODE_DESC, 0, 0);
748 }
749
750 static int mlx5_ib_query_device(struct ib_device *ibdev,
751                                 struct ib_device_attr *props,
752                                 struct ib_udata *uhw)
753 {
754         struct mlx5_ib_dev *dev = to_mdev(ibdev);
755         struct mlx5_core_dev *mdev = dev->mdev;
756         int err = -ENOMEM;
757         int max_sq_desc;
758         int max_rq_sg;
759         int max_sq_sg;
760         u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
761         bool raw_support = !mlx5_core_mp_enabled(mdev);
762         struct mlx5_ib_query_device_resp resp = {};
763         size_t resp_len;
764         u64 max_tso;
765
766         resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
767         if (uhw->outlen && uhw->outlen < resp_len)
768                 return -EINVAL;
769         else
770                 resp.response_length = resp_len;
771
772         if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
773                 return -EINVAL;
774
775         memset(props, 0, sizeof(*props));
776         err = mlx5_query_system_image_guid(ibdev,
777                                            &props->sys_image_guid);
778         if (err)
779                 return err;
780
781         err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
782         if (err)
783                 return err;
784
785         err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
786         if (err)
787                 return err;
788
789         props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
790                 (fw_rev_min(dev->mdev) << 16) |
791                 fw_rev_sub(dev->mdev);
792         props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
793                 IB_DEVICE_PORT_ACTIVE_EVENT             |
794                 IB_DEVICE_SYS_IMAGE_GUID                |
795                 IB_DEVICE_RC_RNR_NAK_GEN;
796
797         if (MLX5_CAP_GEN(mdev, pkv))
798                 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
799         if (MLX5_CAP_GEN(mdev, qkv))
800                 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
801         if (MLX5_CAP_GEN(mdev, apm))
802                 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
803         if (MLX5_CAP_GEN(mdev, xrc))
804                 props->device_cap_flags |= IB_DEVICE_XRC;
805         if (MLX5_CAP_GEN(mdev, imaicl)) {
806                 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
807                                            IB_DEVICE_MEM_WINDOW_TYPE_2B;
808                 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
809                 /* We support 'Gappy' memory registration too */
810                 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
811         }
812         props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
813         if (MLX5_CAP_GEN(mdev, sho)) {
814                 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
815                 /* At this stage no support for signature handover */
816                 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
817                                       IB_PROT_T10DIF_TYPE_2 |
818                                       IB_PROT_T10DIF_TYPE_3;
819                 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
820                                        IB_GUARD_T10DIF_CSUM;
821         }
822         if (MLX5_CAP_GEN(mdev, block_lb_mc))
823                 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
824
825         if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
826                 if (MLX5_CAP_ETH(mdev, csum_cap)) {
827                         /* Legacy bit to support old userspace libraries */
828                         props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
829                         props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
830                 }
831
832                 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
833                         props->raw_packet_caps |=
834                                 IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
835
836                 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) {
837                         max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
838                         if (max_tso) {
839                                 resp.tso_caps.max_tso = 1 << max_tso;
840                                 resp.tso_caps.supported_qpts |=
841                                         1 << IB_QPT_RAW_PACKET;
842                                 resp.response_length += sizeof(resp.tso_caps);
843                         }
844                 }
845
846                 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) {
847                         resp.rss_caps.rx_hash_function =
848                                                 MLX5_RX_HASH_FUNC_TOEPLITZ;
849                         resp.rss_caps.rx_hash_fields_mask =
850                                                 MLX5_RX_HASH_SRC_IPV4 |
851                                                 MLX5_RX_HASH_DST_IPV4 |
852                                                 MLX5_RX_HASH_SRC_IPV6 |
853                                                 MLX5_RX_HASH_DST_IPV6 |
854                                                 MLX5_RX_HASH_SRC_PORT_TCP |
855                                                 MLX5_RX_HASH_DST_PORT_TCP |
856                                                 MLX5_RX_HASH_SRC_PORT_UDP |
857                                                 MLX5_RX_HASH_DST_PORT_UDP |
858                                                 MLX5_RX_HASH_INNER;
859                         if (mlx5_accel_ipsec_device_caps(dev->mdev) &
860                             MLX5_ACCEL_IPSEC_CAP_DEVICE)
861                                 resp.rss_caps.rx_hash_fields_mask |=
862                                         MLX5_RX_HASH_IPSEC_SPI;
863                         resp.response_length += sizeof(resp.rss_caps);
864                 }
865         } else {
866                 if (field_avail(typeof(resp), tso_caps, uhw->outlen))
867                         resp.response_length += sizeof(resp.tso_caps);
868                 if (field_avail(typeof(resp), rss_caps, uhw->outlen))
869                         resp.response_length += sizeof(resp.rss_caps);
870         }
871
872         if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
873                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
874                 props->device_cap_flags |= IB_DEVICE_UD_TSO;
875         }
876
877         if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
878             MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
879             raw_support)
880                 props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
881
882         if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
883             MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
884                 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
885
886         if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
887             MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
888             raw_support) {
889                 /* Legacy bit to support old userspace libraries */
890                 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
891                 props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
892         }
893
894         if (MLX5_CAP_DEV_MEM(mdev, memic)) {
895                 props->max_dm_size =
896                         MLX5_CAP_DEV_MEM(mdev, max_memic_size);
897         }
898
899         if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
900                 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
901
902         if (MLX5_CAP_GEN(mdev, end_pad))
903                 props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
904
905         props->vendor_part_id      = mdev->pdev->device;
906         props->hw_ver              = mdev->pdev->revision;
907
908         props->max_mr_size         = ~0ull;
909         props->page_size_cap       = ~(min_page_size - 1);
910         props->max_qp              = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
911         props->max_qp_wr           = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
912         max_rq_sg =  MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
913                      sizeof(struct mlx5_wqe_data_seg);
914         max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
915         max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
916                      sizeof(struct mlx5_wqe_raddr_seg)) /
917                 sizeof(struct mlx5_wqe_data_seg);
918         props->max_sge = min(max_rq_sg, max_sq_sg);
919         props->max_sge_rd          = MLX5_MAX_SGE_RD;
920         props->max_cq              = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
921         props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
922         props->max_mr              = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
923         props->max_pd              = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
924         props->max_qp_rd_atom      = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
925         props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
926         props->max_srq             = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
927         props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
928         props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
929         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
930         props->max_srq_sge         = max_rq_sg - 1;
931         props->max_fast_reg_page_list_len =
932                 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
933         get_atomic_caps_qp(dev, props);
934         props->masked_atomic_cap   = IB_ATOMIC_NONE;
935         props->max_mcast_grp       = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
936         props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
937         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
938                                            props->max_mcast_grp;
939         props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
940         props->max_ah = INT_MAX;
941         props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
942         props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
943
944 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
945         if (MLX5_CAP_GEN(mdev, pg))
946                 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
947         props->odp_caps = dev->odp_caps;
948 #endif
949
950         if (MLX5_CAP_GEN(mdev, cd))
951                 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
952
953         if (!mlx5_core_is_pf(mdev))
954                 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
955
956         if (mlx5_ib_port_link_layer(ibdev, 1) ==
957             IB_LINK_LAYER_ETHERNET && raw_support) {
958                 props->rss_caps.max_rwq_indirection_tables =
959                         1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
960                 props->rss_caps.max_rwq_indirection_table_size =
961                         1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
962                 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
963                 props->max_wq_type_rq =
964                         1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
965         }
966
967         if (MLX5_CAP_GEN(mdev, tag_matching)) {
968                 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
969                 props->tm_caps.max_num_tags =
970                         (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
971                 props->tm_caps.flags = IB_TM_CAP_RC;
972                 props->tm_caps.max_ops =
973                         1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
974                 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
975         }
976
977         if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
978                 props->cq_caps.max_cq_moderation_count =
979                                                 MLX5_MAX_CQ_COUNT;
980                 props->cq_caps.max_cq_moderation_period =
981                                                 MLX5_MAX_CQ_PERIOD;
982         }
983
984         if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
985                 resp.response_length += sizeof(resp.cqe_comp_caps);
986
987                 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
988                         resp.cqe_comp_caps.max_num =
989                                 MLX5_CAP_GEN(dev->mdev,
990                                              cqe_compression_max_num);
991
992                         resp.cqe_comp_caps.supported_format =
993                                 MLX5_IB_CQE_RES_FORMAT_HASH |
994                                 MLX5_IB_CQE_RES_FORMAT_CSUM;
995
996                         if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
997                                 resp.cqe_comp_caps.supported_format |=
998                                         MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
999                 }
1000         }
1001
1002         if (field_avail(typeof(resp), packet_pacing_caps, uhw->outlen) &&
1003             raw_support) {
1004                 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1005                     MLX5_CAP_GEN(mdev, qos)) {
1006                         resp.packet_pacing_caps.qp_rate_limit_max =
1007                                 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1008                         resp.packet_pacing_caps.qp_rate_limit_min =
1009                                 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1010                         resp.packet_pacing_caps.supported_qpts |=
1011                                 1 << IB_QPT_RAW_PACKET;
1012                         if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1013                             MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1014                                 resp.packet_pacing_caps.cap_flags |=
1015                                         MLX5_IB_PP_SUPPORT_BURST;
1016                 }
1017                 resp.response_length += sizeof(resp.packet_pacing_caps);
1018         }
1019
1020         if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
1021                         uhw->outlen)) {
1022                 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1023                         resp.mlx5_ib_support_multi_pkt_send_wqes =
1024                                 MLX5_IB_ALLOW_MPW;
1025
1026                 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1027                         resp.mlx5_ib_support_multi_pkt_send_wqes |=
1028                                 MLX5_IB_SUPPORT_EMPW;
1029
1030                 resp.response_length +=
1031                         sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
1032         }
1033
1034         if (field_avail(typeof(resp), flags, uhw->outlen)) {
1035                 resp.response_length += sizeof(resp.flags);
1036
1037                 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1038                         resp.flags |=
1039                                 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
1040
1041                 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1042                         resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
1043         }
1044
1045         if (field_avail(typeof(resp), sw_parsing_caps,
1046                         uhw->outlen)) {
1047                 resp.response_length += sizeof(resp.sw_parsing_caps);
1048                 if (MLX5_CAP_ETH(mdev, swp)) {
1049                         resp.sw_parsing_caps.sw_parsing_offloads |=
1050                                 MLX5_IB_SW_PARSING;
1051
1052                         if (MLX5_CAP_ETH(mdev, swp_csum))
1053                                 resp.sw_parsing_caps.sw_parsing_offloads |=
1054                                         MLX5_IB_SW_PARSING_CSUM;
1055
1056                         if (MLX5_CAP_ETH(mdev, swp_lso))
1057                                 resp.sw_parsing_caps.sw_parsing_offloads |=
1058                                         MLX5_IB_SW_PARSING_LSO;
1059
1060                         if (resp.sw_parsing_caps.sw_parsing_offloads)
1061                                 resp.sw_parsing_caps.supported_qpts =
1062                                         BIT(IB_QPT_RAW_PACKET);
1063                 }
1064         }
1065
1066         if (field_avail(typeof(resp), striding_rq_caps, uhw->outlen) &&
1067             raw_support) {
1068                 resp.response_length += sizeof(resp.striding_rq_caps);
1069                 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1070                         resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
1071                                 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
1072                         resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
1073                                 MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
1074                         resp.striding_rq_caps.min_single_wqe_log_num_of_strides =
1075                                 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
1076                         resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
1077                                 MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
1078                         resp.striding_rq_caps.supported_qpts =
1079                                 BIT(IB_QPT_RAW_PACKET);
1080                 }
1081         }
1082
1083         if (field_avail(typeof(resp), tunnel_offloads_caps,
1084                         uhw->outlen)) {
1085                 resp.response_length += sizeof(resp.tunnel_offloads_caps);
1086                 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1087                         resp.tunnel_offloads_caps |=
1088                                 MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
1089                 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1090                         resp.tunnel_offloads_caps |=
1091                                 MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
1092                 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1093                         resp.tunnel_offloads_caps |=
1094                                 MLX5_IB_TUNNELED_OFFLOADS_GRE;
1095                 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1096                     MLX5_FLEX_PROTO_CW_MPLS_GRE)
1097                         resp.tunnel_offloads_caps |=
1098                                 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1099                 if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1100                     MLX5_FLEX_PROTO_CW_MPLS_UDP)
1101                         resp.tunnel_offloads_caps |=
1102                                 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1103         }
1104
1105         if (uhw->outlen) {
1106                 err = ib_copy_to_udata(uhw, &resp, resp.response_length);
1107
1108                 if (err)
1109                         return err;
1110         }
1111
1112         return 0;
1113 }
1114
1115 enum mlx5_ib_width {
1116         MLX5_IB_WIDTH_1X        = 1 << 0,
1117         MLX5_IB_WIDTH_2X        = 1 << 1,
1118         MLX5_IB_WIDTH_4X        = 1 << 2,
1119         MLX5_IB_WIDTH_8X        = 1 << 3,
1120         MLX5_IB_WIDTH_12X       = 1 << 4
1121 };
1122
1123 static int translate_active_width(struct ib_device *ibdev, u8 active_width,
1124                                   u8 *ib_width)
1125 {
1126         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1127         int err = 0;
1128
1129         if (active_width & MLX5_IB_WIDTH_1X) {
1130                 *ib_width = IB_WIDTH_1X;
1131         } else if (active_width & MLX5_IB_WIDTH_2X) {
1132                 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
1133                             (int)active_width);
1134                 err = -EINVAL;
1135         } else if (active_width & MLX5_IB_WIDTH_4X) {
1136                 *ib_width = IB_WIDTH_4X;
1137         } else if (active_width & MLX5_IB_WIDTH_8X) {
1138                 *ib_width = IB_WIDTH_8X;
1139         } else if (active_width & MLX5_IB_WIDTH_12X) {
1140                 *ib_width = IB_WIDTH_12X;
1141         } else {
1142                 mlx5_ib_dbg(dev, "Invalid active_width %d\n",
1143                             (int)active_width);
1144                 err = -EINVAL;
1145         }
1146
1147         return err;
1148 }
1149
1150 static int mlx5_mtu_to_ib_mtu(int mtu)
1151 {
1152         switch (mtu) {
1153         case 256: return 1;
1154         case 512: return 2;
1155         case 1024: return 3;
1156         case 2048: return 4;
1157         case 4096: return 5;
1158         default:
1159                 pr_warn("invalid mtu\n");
1160                 return -1;
1161         }
1162 }
1163
1164 enum ib_max_vl_num {
1165         __IB_MAX_VL_0           = 1,
1166         __IB_MAX_VL_0_1         = 2,
1167         __IB_MAX_VL_0_3         = 3,
1168         __IB_MAX_VL_0_7         = 4,
1169         __IB_MAX_VL_0_14        = 5,
1170 };
1171
1172 enum mlx5_vl_hw_cap {
1173         MLX5_VL_HW_0    = 1,
1174         MLX5_VL_HW_0_1  = 2,
1175         MLX5_VL_HW_0_2  = 3,
1176         MLX5_VL_HW_0_3  = 4,
1177         MLX5_VL_HW_0_4  = 5,
1178         MLX5_VL_HW_0_5  = 6,
1179         MLX5_VL_HW_0_6  = 7,
1180         MLX5_VL_HW_0_7  = 8,
1181         MLX5_VL_HW_0_14 = 15
1182 };
1183
1184 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
1185                                 u8 *max_vl_num)
1186 {
1187         switch (vl_hw_cap) {
1188         case MLX5_VL_HW_0:
1189                 *max_vl_num = __IB_MAX_VL_0;
1190                 break;
1191         case MLX5_VL_HW_0_1:
1192                 *max_vl_num = __IB_MAX_VL_0_1;
1193                 break;
1194         case MLX5_VL_HW_0_3:
1195                 *max_vl_num = __IB_MAX_VL_0_3;
1196                 break;
1197         case MLX5_VL_HW_0_7:
1198                 *max_vl_num = __IB_MAX_VL_0_7;
1199                 break;
1200         case MLX5_VL_HW_0_14:
1201                 *max_vl_num = __IB_MAX_VL_0_14;
1202                 break;
1203
1204         default:
1205                 return -EINVAL;
1206         }
1207
1208         return 0;
1209 }
1210
1211 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
1212                                struct ib_port_attr *props)
1213 {
1214         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1215         struct mlx5_core_dev *mdev = dev->mdev;
1216         struct mlx5_hca_vport_context *rep;
1217         u16 max_mtu;
1218         u16 oper_mtu;
1219         int err;
1220         u8 ib_link_width_oper;
1221         u8 vl_hw_cap;
1222
1223         rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1224         if (!rep) {
1225                 err = -ENOMEM;
1226                 goto out;
1227         }
1228
1229         /* props being zeroed by the caller, avoid zeroing it here */
1230
1231         err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1232         if (err)
1233                 goto out;
1234
1235         props->lid              = rep->lid;
1236         props->lmc              = rep->lmc;
1237         props->sm_lid           = rep->sm_lid;
1238         props->sm_sl            = rep->sm_sl;
1239         props->state            = rep->vport_state;
1240         props->phys_state       = rep->port_physical_state;
1241         props->port_cap_flags   = rep->cap_mask1;
1242         props->gid_tbl_len      = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1243         props->max_msg_sz       = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1244         props->pkey_tbl_len     = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1245         props->bad_pkey_cntr    = rep->pkey_violation_counter;
1246         props->qkey_viol_cntr   = rep->qkey_violation_counter;
1247         props->subnet_timeout   = rep->subnet_timeout;
1248         props->init_type_reply  = rep->init_type_reply;
1249         props->grh_required     = rep->grh_required;
1250
1251         err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
1252         if (err)
1253                 goto out;
1254
1255         err = translate_active_width(ibdev, ib_link_width_oper,
1256                                      &props->active_width);
1257         if (err)
1258                 goto out;
1259         err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
1260         if (err)
1261                 goto out;
1262
1263         mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1264
1265         props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
1266
1267         mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1268
1269         props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
1270
1271         err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1272         if (err)
1273                 goto out;
1274
1275         err = translate_max_vl_num(ibdev, vl_hw_cap,
1276                                    &props->max_vl_num);
1277 out:
1278         kfree(rep);
1279         return err;
1280 }
1281
1282 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
1283                        struct ib_port_attr *props)
1284 {
1285         unsigned int count;
1286         int ret;
1287
1288         switch (mlx5_get_vport_access_method(ibdev)) {
1289         case MLX5_VPORT_ACCESS_METHOD_MAD:
1290                 ret = mlx5_query_mad_ifc_port(ibdev, port, props);
1291                 break;
1292
1293         case MLX5_VPORT_ACCESS_METHOD_HCA:
1294                 ret = mlx5_query_hca_port(ibdev, port, props);
1295                 break;
1296
1297         case MLX5_VPORT_ACCESS_METHOD_NIC:
1298                 ret = mlx5_query_port_roce(ibdev, port, props);
1299                 break;
1300
1301         default:
1302                 ret = -EINVAL;
1303         }
1304
1305         if (!ret && props) {
1306                 struct mlx5_ib_dev *dev = to_mdev(ibdev);
1307                 struct mlx5_core_dev *mdev;
1308                 bool put_mdev = true;
1309
1310                 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1311                 if (!mdev) {
1312                         /* If the port isn't affiliated yet query the master.
1313                          * The master and slave will have the same values.
1314                          */
1315                         mdev = dev->mdev;
1316                         port = 1;
1317                         put_mdev = false;
1318                 }
1319                 count = mlx5_core_reserved_gids_count(mdev);
1320                 if (put_mdev)
1321                         mlx5_ib_put_native_port_mdev(dev, port);
1322                 props->gid_tbl_len -= count;
1323         }
1324         return ret;
1325 }
1326
1327 static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
1328                                   struct ib_port_attr *props)
1329 {
1330         int ret;
1331
1332         /* Only link layer == ethernet is valid for representors */
1333         ret = mlx5_query_port_roce(ibdev, port, props);
1334         if (ret || !props)
1335                 return ret;
1336
1337         /* We don't support GIDS */
1338         props->gid_tbl_len = 0;
1339
1340         return ret;
1341 }
1342
1343 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
1344                              union ib_gid *gid)
1345 {
1346         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1347         struct mlx5_core_dev *mdev = dev->mdev;
1348
1349         switch (mlx5_get_vport_access_method(ibdev)) {
1350         case MLX5_VPORT_ACCESS_METHOD_MAD:
1351                 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
1352
1353         case MLX5_VPORT_ACCESS_METHOD_HCA:
1354                 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1355
1356         default:
1357                 return -EINVAL;
1358         }
1359
1360 }
1361
1362 static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u8 port,
1363                                    u16 index, u16 *pkey)
1364 {
1365         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1366         struct mlx5_core_dev *mdev;
1367         bool put_mdev = true;
1368         u8 mdev_port_num;
1369         int err;
1370
1371         mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1372         if (!mdev) {
1373                 /* The port isn't affiliated yet, get the PKey from the master
1374                  * port. For RoCE the PKey tables will be the same.
1375                  */
1376                 put_mdev = false;
1377                 mdev = dev->mdev;
1378                 mdev_port_num = 1;
1379         }
1380
1381         err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1382                                         index, pkey);
1383         if (put_mdev)
1384                 mlx5_ib_put_native_port_mdev(dev, port);
1385
1386         return err;
1387 }
1388
1389 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1390                               u16 *pkey)
1391 {
1392         switch (mlx5_get_vport_access_method(ibdev)) {
1393         case MLX5_VPORT_ACCESS_METHOD_MAD:
1394                 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
1395
1396         case MLX5_VPORT_ACCESS_METHOD_HCA:
1397         case MLX5_VPORT_ACCESS_METHOD_NIC:
1398                 return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
1399         default:
1400                 return -EINVAL;
1401         }
1402 }
1403
1404 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
1405                                  struct ib_device_modify *props)
1406 {
1407         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1408         struct mlx5_reg_node_desc in;
1409         struct mlx5_reg_node_desc out;
1410         int err;
1411
1412         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
1413                 return -EOPNOTSUPP;
1414
1415         if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
1416                 return 0;
1417
1418         /*
1419          * If possible, pass node desc to FW, so it can generate
1420          * a 144 trap.  If cmd fails, just ignore.
1421          */
1422         memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1423         err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1424                                    sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
1425         if (err)
1426                 return err;
1427
1428         memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
1429
1430         return err;
1431 }
1432
1433 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u8 port_num, u32 mask,
1434                                 u32 value)
1435 {
1436         struct mlx5_hca_vport_context ctx = {};
1437         struct mlx5_core_dev *mdev;
1438         u8 mdev_port_num;
1439         int err;
1440
1441         mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1442         if (!mdev)
1443                 return -ENODEV;
1444
1445         err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1446         if (err)
1447                 goto out;
1448
1449         if (~ctx.cap_mask1_perm & mask) {
1450                 mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
1451                              mask, ctx.cap_mask1_perm);
1452                 err = -EINVAL;
1453                 goto out;
1454         }
1455
1456         ctx.cap_mask1 = value;
1457         ctx.cap_mask1_perm = mask;
1458         err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1459                                                  0, &ctx);
1460
1461 out:
1462         mlx5_ib_put_native_port_mdev(dev, port_num);
1463
1464         return err;
1465 }
1466
1467 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1468                                struct ib_port_modify *props)
1469 {
1470         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1471         struct ib_port_attr attr;
1472         u32 tmp;
1473         int err;
1474         u32 change_mask;
1475         u32 value;
1476         bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
1477                       IB_LINK_LAYER_INFINIBAND);
1478
1479         /* CM layer calls ib_modify_port() regardless of the link layer. For
1480          * Ethernet ports, qkey violation and Port capabilities are meaningless.
1481          */
1482         if (!is_ib)
1483                 return 0;
1484
1485         if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1486                 change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
1487                 value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
1488                 return set_port_caps_atomic(dev, port, change_mask, value);
1489         }
1490
1491         mutex_lock(&dev->cap_mask_mutex);
1492
1493         err = ib_query_port(ibdev, port, &attr);
1494         if (err)
1495                 goto out;
1496
1497         tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
1498                 ~props->clr_port_cap_mask;
1499
1500         err = mlx5_set_port_caps(dev->mdev, port, tmp);
1501
1502 out:
1503         mutex_unlock(&dev->cap_mask_mutex);
1504         return err;
1505 }
1506
1507 static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
1508 {
1509         mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
1510                     caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
1511 }
1512
1513 static u16 calc_dynamic_bfregs(int uars_per_sys_page)
1514 {
1515         /* Large page with non 4k uar support might limit the dynamic size */
1516         if (uars_per_sys_page == 1  && PAGE_SIZE > 4096)
1517                 return MLX5_MIN_DYN_BFREGS;
1518
1519         return MLX5_MAX_DYN_BFREGS;
1520 }
1521
1522 static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
1523                              struct mlx5_ib_alloc_ucontext_req_v2 *req,
1524                              struct mlx5_bfreg_info *bfregi)
1525 {
1526         int uars_per_sys_page;
1527         int bfregs_per_sys_page;
1528         int ref_bfregs = req->total_num_bfregs;
1529
1530         if (req->total_num_bfregs == 0)
1531                 return -EINVAL;
1532
1533         BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
1534         BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
1535
1536         if (req->total_num_bfregs > MLX5_MAX_BFREGS)
1537                 return -ENOMEM;
1538
1539         uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
1540         bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
1541         /* This holds the required static allocation asked by the user */
1542         req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
1543         if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
1544                 return -EINVAL;
1545
1546         bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
1547         bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
1548         bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
1549         bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
1550
1551         mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
1552                     MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1553                     lib_uar_4k ? "yes" : "no", ref_bfregs,
1554                     req->total_num_bfregs, bfregi->total_num_bfregs,
1555                     bfregi->num_sys_pages);
1556
1557         return 0;
1558 }
1559
1560 static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1561 {
1562         struct mlx5_bfreg_info *bfregi;
1563         int err;
1564         int i;
1565
1566         bfregi = &context->bfregi;
1567         for (i = 0; i < bfregi->num_static_sys_pages; i++) {
1568                 err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
1569                 if (err)
1570                         goto error;
1571
1572                 mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
1573         }
1574
1575         for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
1576                 bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
1577
1578         return 0;
1579
1580 error:
1581         for (--i; i >= 0; i--)
1582                 if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
1583                         mlx5_ib_warn(dev, "failed to free uar %d\n", i);
1584
1585         return err;
1586 }
1587
1588 static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
1589 {
1590         struct mlx5_bfreg_info *bfregi;
1591         int err;
1592         int i;
1593
1594         bfregi = &context->bfregi;
1595         for (i = 0; i < bfregi->num_sys_pages; i++) {
1596                 if (i < bfregi->num_static_sys_pages ||
1597                     bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) {
1598                         err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
1599                         if (err) {
1600                                 mlx5_ib_warn(dev, "failed to free uar %d, err=%d\n", i, err);
1601                                 return err;
1602                         }
1603                 }
1604         }
1605
1606         return 0;
1607 }
1608
1609 static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
1610 {
1611         int err;
1612
1613         err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
1614         if (err)
1615                 return err;
1616
1617         if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1618             (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1619              !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1620                 return err;
1621
1622         mutex_lock(&dev->lb_mutex);
1623         dev->user_td++;
1624
1625         if (dev->user_td == 2)
1626                 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1627
1628         mutex_unlock(&dev->lb_mutex);
1629         return err;
1630 }
1631
1632 static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
1633 {
1634         mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
1635
1636         if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1637             (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1638              !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1639                 return;
1640
1641         mutex_lock(&dev->lb_mutex);
1642         dev->user_td--;
1643
1644         if (dev->user_td < 2)
1645                 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1646
1647         mutex_unlock(&dev->lb_mutex);
1648 }
1649
1650 static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
1651                                                   struct ib_udata *udata)
1652 {
1653         struct mlx5_ib_dev *dev = to_mdev(ibdev);
1654         struct mlx5_ib_alloc_ucontext_req_v2 req = {};
1655         struct mlx5_ib_alloc_ucontext_resp resp = {};
1656         struct mlx5_core_dev *mdev = dev->mdev;
1657         struct mlx5_ib_ucontext *context;
1658         struct mlx5_bfreg_info *bfregi;
1659         int ver;
1660         int err;
1661         size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
1662                                      max_cqe_version);
1663         bool lib_uar_4k;
1664
1665         if (!dev->ib_active)
1666                 return ERR_PTR(-EAGAIN);
1667
1668         if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
1669                 ver = 0;
1670         else if (udata->inlen >= min_req_v2)
1671                 ver = 2;
1672         else
1673                 return ERR_PTR(-EINVAL);
1674
1675         err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
1676         if (err)
1677                 return ERR_PTR(err);
1678
1679         if (req.flags)
1680                 return ERR_PTR(-EINVAL);
1681
1682         if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
1683                 return ERR_PTR(-EOPNOTSUPP);
1684
1685         req.total_num_bfregs = ALIGN(req.total_num_bfregs,
1686                                     MLX5_NON_FP_BFREGS_PER_UAR);
1687         if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
1688                 return ERR_PTR(-EINVAL);
1689
1690         resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1691         if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
1692                 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
1693         resp.cache_line_size = cache_line_size();
1694         resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1695         resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1696         resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1697         resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1698         resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1699         resp.cqe_version = min_t(__u8,
1700                                  (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1701                                  req.max_cqe_version);
1702         resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1703                                 MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
1704         resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1705                                         MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
1706         resp.response_length = min(offsetof(typeof(resp), response_length) +
1707                                    sizeof(resp.response_length), udata->outlen);
1708
1709         if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) {
1710                 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_EGRESS))
1711                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM;
1712                 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA)
1713                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA;
1714                 if (MLX5_CAP_FLOWTABLE(dev->mdev, flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1715                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING;
1716                 if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN)
1717                         resp.flow_action_flags |= MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN;
1718                 /* MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD is currently always 0 */
1719         }
1720
1721         context = kzalloc(sizeof(*context), GFP_KERNEL);
1722         if (!context)
1723                 return ERR_PTR(-ENOMEM);
1724
1725         lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
1726         bfregi = &context->bfregi;
1727
1728         /* updates req->total_num_bfregs */
1729         err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
1730         if (err)
1731                 goto out_ctx;
1732
1733         mutex_init(&bfregi->lock);
1734         bfregi->lib_uar_4k = lib_uar_4k;
1735         bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
1736                                 GFP_KERNEL);
1737         if (!bfregi->count) {
1738                 err = -ENOMEM;
1739                 goto out_ctx;
1740         }
1741
1742         bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
1743                                     sizeof(*bfregi->sys_pages),
1744                                     GFP_KERNEL);
1745         if (!bfregi->sys_pages) {
1746                 err = -ENOMEM;
1747                 goto out_count;
1748         }
1749
1750         err = allocate_uars(dev, context);
1751         if (err)
1752                 goto out_sys_pages;
1753
1754 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1755         context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
1756 #endif
1757
1758         if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
1759                 err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
1760                 if (err)
1761                         goto out_uars;
1762         }
1763
1764         INIT_LIST_HEAD(&context->vma_private_list);
1765         mutex_init(&context->vma_private_list_mutex);
1766         INIT_LIST_HEAD(&context->db_page_list);
1767         mutex_init(&context->db_page_mutex);
1768
1769         resp.tot_bfregs = req.total_num_bfregs;
1770         resp.num_ports = dev->num_ports;
1771
1772         if (field_avail(typeof(resp), cqe_version, udata->outlen))
1773                 resp.response_length += sizeof(resp.cqe_version);
1774
1775         if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) {
1776                 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
1777                                       MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
1778                 resp.response_length += sizeof(resp.cmds_supp_uhw);
1779         }
1780
1781         if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
1782                 if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
1783                         mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
1784                         resp.eth_min_inline++;
1785                 }
1786                 resp.response_length += sizeof(resp.eth_min_inline);
1787         }
1788
1789         if (field_avail(typeof(resp), clock_info_versions, udata->outlen)) {
1790                 if (mdev->clock_info)
1791                         resp.clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
1792                 resp.response_length += sizeof(resp.clock_info_versions);
1793         }
1794
1795         /*
1796          * We don't want to expose information from the PCI bar that is located
1797          * after 4096 bytes, so if the arch only supports larger pages, let's
1798          * pretend we don't support reading the HCA's core clock. This is also
1799          * forced by mmap function.
1800          */
1801         if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
1802                 if (PAGE_SIZE <= 4096) {
1803                         resp.comp_mask |=
1804                                 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
1805                         resp.hca_core_clock_offset =
1806                                 offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
1807                 }
1808                 resp.response_length += sizeof(resp.hca_core_clock_offset);
1809         }
1810
1811         if (field_avail(typeof(resp), log_uar_size, udata->outlen))
1812                 resp.response_length += sizeof(resp.log_uar_size);
1813
1814         if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
1815                 resp.response_length += sizeof(resp.num_uars_per_page);
1816
1817         if (field_avail(typeof(resp), num_dyn_bfregs, udata->outlen)) {
1818                 resp.num_dyn_bfregs = bfregi->num_dyn_bfregs;
1819                 resp.response_length += sizeof(resp.num_dyn_bfregs);
1820         }
1821
1822         err = ib_copy_to_udata(udata, &resp, resp.response_length);
1823         if (err)
1824                 goto out_td;
1825
1826         bfregi->ver = ver;
1827         bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
1828         context->cqe_version = resp.cqe_version;
1829         context->lib_caps = req.lib_caps;
1830         print_lib_caps(dev, context->lib_caps);
1831
1832         return &context->ibucontext;
1833
1834 out_td:
1835         if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1836                 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1837
1838 out_uars:
1839         deallocate_uars(dev, context);
1840
1841 out_sys_pages:
1842         kfree(bfregi->sys_pages);
1843
1844 out_count:
1845         kfree(bfregi->count);
1846
1847 out_ctx:
1848         kfree(context);
1849
1850         return ERR_PTR(err);
1851 }
1852
1853 static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
1854 {
1855         struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1856         struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
1857         struct mlx5_bfreg_info *bfregi;
1858
1859         bfregi = &context->bfregi;
1860         if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1861                 mlx5_ib_dealloc_transport_domain(dev, context->tdn);
1862
1863         deallocate_uars(dev, context);
1864         kfree(bfregi->sys_pages);
1865         kfree(bfregi->count);
1866         kfree(context);
1867
1868         return 0;
1869 }
1870
1871 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
1872                                  int uar_idx)
1873 {
1874         int fw_uars_per_page;
1875
1876         fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
1877
1878         return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
1879 }
1880
1881 static int get_command(unsigned long offset)
1882 {
1883         return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
1884 }
1885
1886 static int get_arg(unsigned long offset)
1887 {
1888         return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
1889 }
1890
1891 static int get_index(unsigned long offset)
1892 {
1893         return get_arg(offset);
1894 }
1895
1896 /* Index resides in an extra byte to enable larger values than 255 */
1897 static int get_extended_index(unsigned long offset)
1898 {
1899         return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
1900 }
1901
1902 static void  mlx5_ib_vma_open(struct vm_area_struct *area)
1903 {
1904         /* vma_open is called when a new VMA is created on top of our VMA.  This
1905          * is done through either mremap flow or split_vma (usually due to
1906          * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
1907          * as this VMA is strongly hardware related.  Therefore we set the
1908          * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
1909          * calling us again and trying to do incorrect actions.  We assume that
1910          * the original VMA size is exactly a single page, and therefore all
1911          * "splitting" operation will not happen to it.
1912          */
1913         area->vm_ops = NULL;
1914 }
1915
1916 static void  mlx5_ib_vma_close(struct vm_area_struct *area)
1917 {
1918         struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
1919
1920         /* It's guaranteed that all VMAs opened on a FD are closed before the
1921          * file itself is closed, therefore no sync is needed with the regular
1922          * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
1923          * However need a sync with accessing the vma as part of
1924          * mlx5_ib_disassociate_ucontext.
1925          * The close operation is usually called under mm->mmap_sem except when
1926          * process is exiting.
1927          * The exiting case is handled explicitly as part of
1928          * mlx5_ib_disassociate_ucontext.
1929          */
1930         mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
1931
1932         /* setting the vma context pointer to null in the mlx5_ib driver's
1933          * private data, to protect a race condition in
1934          * mlx5_ib_disassociate_ucontext().
1935          */
1936         mlx5_ib_vma_priv_data->vma = NULL;
1937         mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
1938         list_del(&mlx5_ib_vma_priv_data->list);
1939         mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
1940         kfree(mlx5_ib_vma_priv_data);
1941 }
1942
1943 static const struct vm_operations_struct mlx5_ib_vm_ops = {
1944         .open = mlx5_ib_vma_open,
1945         .close = mlx5_ib_vma_close
1946 };
1947
1948 static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
1949                                 struct mlx5_ib_ucontext *ctx)
1950 {
1951         struct mlx5_ib_vma_private_data *vma_prv;
1952         struct list_head *vma_head = &ctx->vma_private_list;
1953
1954         vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
1955         if (!vma_prv)
1956                 return -ENOMEM;
1957
1958         vma_prv->vma = vma;
1959         vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
1960         vma->vm_private_data = vma_prv;
1961         vma->vm_ops =  &mlx5_ib_vm_ops;
1962
1963         mutex_lock(&ctx->vma_private_list_mutex);
1964         list_add(&vma_prv->list, vma_head);
1965         mutex_unlock(&ctx->vma_private_list_mutex);
1966
1967         return 0;
1968 }
1969
1970 static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1971 {
1972         struct vm_area_struct *vma;
1973         struct mlx5_ib_vma_private_data *vma_private, *n;
1974         struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
1975
1976         mutex_lock(&context->vma_private_list_mutex);
1977         list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
1978                                  list) {
1979                 vma = vma_private->vma;
1980                 zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
1981                 /* context going to be destroyed, should
1982                  * not access ops any more.
1983                  */
1984                 vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
1985                 vma->vm_ops = NULL;
1986                 list_del(&vma_private->list);
1987                 kfree(vma_private);
1988         }
1989         mutex_unlock(&context->vma_private_list_mutex);
1990 }
1991
1992 static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
1993 {
1994         switch (cmd) {
1995         case MLX5_IB_MMAP_WC_PAGE:
1996                 return "WC";
1997         case MLX5_IB_MMAP_REGULAR_PAGE:
1998                 return "best effort WC";
1999         case MLX5_IB_MMAP_NC_PAGE:
2000                 return "NC";
2001         case MLX5_IB_MMAP_DEVICE_MEM:
2002                 return "Device Memory";
2003         default:
2004                 return NULL;
2005         }
2006 }
2007
2008 static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
2009                                         struct vm_area_struct *vma,
2010                                         struct mlx5_ib_ucontext *context)
2011 {
2012         phys_addr_t pfn;
2013         int err;
2014
2015         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2016                 return -EINVAL;
2017
2018         if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
2019                 return -EOPNOTSUPP;
2020
2021         if (vma->vm_flags & VM_WRITE)
2022                 return -EPERM;
2023
2024         if (!dev->mdev->clock_info_page)
2025                 return -EOPNOTSUPP;
2026
2027         pfn = page_to_pfn(dev->mdev->clock_info_page);
2028         err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
2029                               vma->vm_page_prot);
2030         if (err)
2031                 return err;
2032
2033         return mlx5_ib_set_vma_data(vma, context);
2034 }
2035
2036 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
2037                     struct vm_area_struct *vma,
2038                     struct mlx5_ib_ucontext *context)
2039 {
2040         struct mlx5_bfreg_info *bfregi = &context->bfregi;
2041         int err;
2042         unsigned long idx;
2043         phys_addr_t pfn, pa;
2044         pgprot_t prot;
2045         u32 bfreg_dyn_idx = 0;
2046         u32 uar_index;
2047         int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
2048         int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
2049                                 bfregi->num_static_sys_pages;
2050
2051         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2052                 return -EINVAL;
2053
2054         if (dyn_uar)
2055                 idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
2056         else
2057                 idx = get_index(vma->vm_pgoff);
2058
2059         if (idx >= max_valid_idx) {
2060                 mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
2061                              idx, max_valid_idx);
2062                 return -EINVAL;
2063         }
2064
2065         switch (cmd) {
2066         case MLX5_IB_MMAP_WC_PAGE:
2067         case MLX5_IB_MMAP_ALLOC_WC:
2068 /* Some architectures don't support WC memory */
2069 #if defined(CONFIG_X86)
2070                 if (!pat_enabled())
2071                         return -EPERM;
2072 #elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
2073                         return -EPERM;
2074 #endif
2075         /* fall through */
2076         case MLX5_IB_MMAP_REGULAR_PAGE:
2077                 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
2078                 prot = pgprot_writecombine(vma->vm_page_prot);
2079                 break;
2080         case MLX5_IB_MMAP_NC_PAGE:
2081                 prot = pgprot_noncached(vma->vm_page_prot);
2082                 break;
2083         default:
2084                 return -EINVAL;
2085         }
2086
2087         if (dyn_uar) {
2088                 int uars_per_page;
2089
2090                 uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
2091                 bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
2092                 if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
2093                         mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
2094                                      bfreg_dyn_idx, bfregi->total_num_bfregs);
2095                         return -EINVAL;
2096                 }
2097
2098                 mutex_lock(&bfregi->lock);
2099                 /* Fail if uar already allocated, first bfreg index of each
2100                  * page holds its count.
2101                  */
2102                 if (bfregi->count[bfreg_dyn_idx]) {
2103                         mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
2104                         mutex_unlock(&bfregi->lock);
2105                         return -EINVAL;
2106                 }
2107
2108                 bfregi->count[bfreg_dyn_idx]++;
2109                 mutex_unlock(&bfregi->lock);
2110
2111                 err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
2112                 if (err) {
2113                         mlx5_ib_warn(dev, "UAR alloc failed\n");
2114                         goto free_bfreg;
2115                 }
2116         } else {
2117                 uar_index = bfregi->sys_pages[idx];
2118         }
2119
2120         pfn = uar_index2pfn(dev, uar_index);
2121         mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
2122
2123         vma->vm_page_prot = prot;
2124         err = io_remap_pfn_range(vma, vma->vm_start, pfn,
2125                                  PAGE_SIZE, vma->vm_page_prot);
2126         if (err) {
2127                 mlx5_ib_err(dev,
2128                             "io_remap_pfn_range failed with error=%d, mmap_cmd=%s\n",
2129                             err, mmap_cmd2str(cmd));
2130                 err = -EAGAIN;
2131                 goto err;
2132         }
2133
2134         pa = pfn << PAGE_SHIFT;
2135
2136         err = mlx5_ib_set_vma_data(vma, context);
2137         if (err)
2138                 goto err;
2139
2140         if (dyn_uar)
2141                 bfregi->sys_pages[idx] = uar_index;
2142         return 0;
2143
2144 err:
2145         if (!dyn_uar)
2146                 return err;
2147
2148         mlx5_cmd_free_uar(dev->mdev, idx);
2149
2150 free_bfreg:
2151         mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
2152
2153         return err;
2154 }
2155
2156 static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
2157 {
2158         struct mlx5_ib_ucontext *mctx = to_mucontext(context);
2159         struct mlx5_ib_dev *dev = to_mdev(context->device);
2160         u16 page_idx = get_extended_index(vma->vm_pgoff);
2161         size_t map_size = vma->vm_end - vma->vm_start;
2162         u32 npages = map_size >> PAGE_SHIFT;
2163         phys_addr_t pfn;
2164         pgprot_t prot;
2165
2166         if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
2167             page_idx + npages)
2168                 return -EINVAL;
2169
2170         pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
2171               MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
2172               PAGE_SHIFT) +
2173               page_idx;
2174         prot = pgprot_writecombine(vma->vm_page_prot);
2175         vma->vm_page_prot = prot;
2176
2177         if (io_remap_pfn_range(vma, vma->vm_start, pfn, map_size,
2178                                vma->vm_page_prot))
2179                 return -EAGAIN;
2180
2181         return mlx5_ib_set_vma_data(vma, mctx);
2182 }
2183
2184 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
2185 {
2186         struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
2187         struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
2188         unsigned long command;
2189         phys_addr_t pfn;
2190
2191         command = get_command(vma->vm_pgoff);
2192         switch (command) {
2193         case MLX5_IB_MMAP_WC_PAGE:
2194         case MLX5_IB_MMAP_NC_PAGE:
2195         case MLX5_IB_MMAP_REGULAR_PAGE:
2196         case MLX5_IB_MMAP_ALLOC_WC:
2197                 return uar_mmap(dev, command, vma, context);
2198
2199         case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
2200                 return -ENOSYS;
2201
2202         case MLX5_IB_MMAP_CORE_CLOCK:
2203                 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
2204                         return -EINVAL;
2205
2206                 if (vma->vm_flags & VM_WRITE)
2207                         return -EPERM;
2208
2209                 /* Don't expose to user-space information it shouldn't have */
2210                 if (PAGE_SIZE > 4096)
2211                         return -EOPNOTSUPP;
2212
2213                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2214                 pfn = (dev->mdev->iseg_base +
2215                        offsetof(struct mlx5_init_seg, internal_timer_h)) >>
2216                         PAGE_SHIFT;
2217                 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
2218                                        PAGE_SIZE, vma->vm_page_prot))
2219                         return -EAGAIN;
2220                 break;
2221         case MLX5_IB_MMAP_CLOCK_INFO:
2222                 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
2223
2224         case MLX5_IB_MMAP_DEVICE_MEM:
2225                 return dm_mmap(ibcontext, vma);
2226
2227         default:
2228                 return -EINVAL;
2229         }
2230
2231         return 0;
2232 }
2233
2234 struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
2235                                struct ib_ucontext *context,
2236                                struct ib_dm_alloc_attr *attr,
2237                                struct uverbs_attr_bundle *attrs)
2238 {
2239         u64 act_size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
2240         struct mlx5_memic *memic = &to_mdev(ibdev)->memic;
2241         phys_addr_t memic_addr;
2242         struct mlx5_ib_dm *dm;
2243         u64 start_offset;
2244         u32 page_idx;
2245         int err;
2246
2247         dm = kzalloc(sizeof(*dm), GFP_KERNEL);
2248         if (!dm)
2249                 return ERR_PTR(-ENOMEM);
2250
2251         mlx5_ib_dbg(to_mdev(ibdev), "alloc_memic req: user_length=0x%llx act_length=0x%llx log_alignment=%d\n",
2252                     attr->length, act_size, attr->alignment);
2253
2254         err = mlx5_cmd_alloc_memic(memic, &memic_addr,
2255                                    act_size, attr->alignment);
2256         if (err)
2257                 goto err_free;
2258
2259         start_offset = memic_addr & ~PAGE_MASK;
2260         page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) -
2261                     MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
2262                     PAGE_SHIFT;
2263
2264         err = uverbs_copy_to(attrs,
2265                              MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
2266                              &start_offset, sizeof(start_offset));
2267         if (err)
2268                 goto err_dealloc;
2269
2270         err = uverbs_copy_to(attrs,
2271                              MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
2272                              &page_idx, sizeof(page_idx));
2273         if (err)
2274                 goto err_dealloc;
2275
2276         bitmap_set(to_mucontext(context)->dm_pages, page_idx,
2277                    DIV_ROUND_UP(act_size, PAGE_SIZE));
2278
2279         dm->dev_addr = memic_addr;
2280
2281         return &dm->ibdm;
2282
2283 err_dealloc:
2284         mlx5_cmd_dealloc_memic(memic, memic_addr,
2285                                act_size);
2286 err_free:
2287         kfree(dm);
2288         return ERR_PTR(err);
2289 }
2290
2291 int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
2292 {
2293         struct mlx5_memic *memic = &to_mdev(ibdm->device)->memic;
2294         struct mlx5_ib_dm *dm = to_mdm(ibdm);
2295         u64 act_size = roundup(dm->ibdm.length, MLX5_MEMIC_BASE_SIZE);
2296         u32 page_idx;
2297         int ret;
2298
2299         ret = mlx5_cmd_dealloc_memic(memic, dm->dev_addr, act_size);
2300         if (ret)
2301                 return ret;
2302
2303         page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) -
2304                     MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
2305                     PAGE_SHIFT;
2306         bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages,
2307                      page_idx,
2308                      DIV_ROUND_UP(act_size, PAGE_SIZE));
2309
2310         kfree(dm);
2311
2312         return 0;
2313 }
2314
2315 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
2316                                       struct ib_ucontext *context,
2317                                       struct ib_udata *udata)
2318 {
2319         struct mlx5_ib_alloc_pd_resp resp;
2320         struct mlx5_ib_pd *pd;
2321         int err;
2322
2323         pd = kmalloc(sizeof(*pd), GFP_KERNEL);
2324         if (!pd)
2325                 return ERR_PTR(-ENOMEM);
2326
2327         err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
2328         if (err) {
2329                 kfree(pd);
2330                 return ERR_PTR(err);
2331         }
2332
2333         if (context) {
2334                 resp.pdn = pd->pdn;
2335                 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
2336                         mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
2337                         kfree(pd);
2338                         return ERR_PTR(-EFAULT);
2339                 }
2340         }
2341
2342         return &pd->ibpd;
2343 }
2344
2345 static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
2346 {
2347         struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2348         struct mlx5_ib_pd *mpd = to_mpd(pd);
2349
2350         mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
2351         kfree(mpd);
2352
2353         return 0;
2354 }
2355
2356 enum {
2357         MATCH_CRITERIA_ENABLE_OUTER_BIT,
2358         MATCH_CRITERIA_ENABLE_MISC_BIT,
2359         MATCH_CRITERIA_ENABLE_INNER_BIT,
2360         MATCH_CRITERIA_ENABLE_MISC2_BIT
2361 };
2362
2363 #define HEADER_IS_ZERO(match_criteria, headers)                            \
2364         !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
2365                     0, MLX5_FLD_SZ_BYTES(fte_match_param, headers)))       \
2366
2367 static u8 get_match_criteria_enable(u32 *match_criteria)
2368 {
2369         u8 match_criteria_enable;
2370
2371         match_criteria_enable =
2372                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2373                 MATCH_CRITERIA_ENABLE_OUTER_BIT;
2374         match_criteria_enable |=
2375                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2376                 MATCH_CRITERIA_ENABLE_MISC_BIT;
2377         match_criteria_enable |=
2378                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2379                 MATCH_CRITERIA_ENABLE_INNER_BIT;
2380         match_criteria_enable |=
2381                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2382                 MATCH_CRITERIA_ENABLE_MISC2_BIT;
2383
2384         return match_criteria_enable;
2385 }
2386
2387 static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
2388 {
2389         MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
2390         MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
2391 }
2392
2393 static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
2394                            bool inner)
2395 {
2396         if (inner) {
2397                 MLX5_SET(fte_match_set_misc,
2398                          misc_c, inner_ipv6_flow_label, mask);
2399                 MLX5_SET(fte_match_set_misc,
2400                          misc_v, inner_ipv6_flow_label, val);
2401         } else {
2402                 MLX5_SET(fte_match_set_misc,
2403                          misc_c, outer_ipv6_flow_label, mask);
2404                 MLX5_SET(fte_match_set_misc,
2405                          misc_v, outer_ipv6_flow_label, val);
2406         }
2407 }
2408
2409 static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
2410 {
2411         MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
2412         MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
2413         MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
2414         MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
2415 }
2416
2417 static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
2418 {
2419         if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
2420             !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
2421                 return -EOPNOTSUPP;
2422
2423         if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
2424             !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
2425                 return -EOPNOTSUPP;
2426
2427         if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
2428             !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
2429                 return -EOPNOTSUPP;
2430
2431         if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
2432             !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
2433                 return -EOPNOTSUPP;
2434
2435         return 0;
2436 }
2437
2438 #define LAST_ETH_FIELD vlan_tag
2439 #define LAST_IB_FIELD sl
2440 #define LAST_IPV4_FIELD tos
2441 #define LAST_IPV6_FIELD traffic_class
2442 #define LAST_TCP_UDP_FIELD src_port
2443 #define LAST_TUNNEL_FIELD tunnel_id
2444 #define LAST_FLOW_TAG_FIELD tag_id
2445 #define LAST_DROP_FIELD size
2446 #define LAST_COUNTERS_FIELD counters
2447
2448 /* Field is the last supported field */
2449 #define FIELDS_NOT_SUPPORTED(filter, field)\
2450         memchr_inv((void *)&filter.field  +\
2451                    sizeof(filter.field), 0,\
2452                    sizeof(filter) -\
2453                    offsetof(typeof(filter), field) -\
2454                    sizeof(filter.field))
2455
2456 static int parse_flow_flow_action(const union ib_flow_spec *ib_spec,
2457                                   const struct ib_flow_attr *flow_attr,
2458                                   struct mlx5_flow_act *action)
2459 {
2460         struct mlx5_ib_flow_action *maction = to_mflow_act(ib_spec->action.act);
2461
2462         switch (maction->ib_action.type) {
2463         case IB_FLOW_ACTION_ESP:
2464                 /* Currently only AES_GCM keymat is supported by the driver */
2465                 action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
2466                 action->action |= flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS ?
2467                         MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
2468                         MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
2469                 return 0;
2470         default:
2471                 return -EOPNOTSUPP;
2472         }
2473 }
2474
2475 static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
2476                            u32 *match_v, const union ib_flow_spec *ib_spec,
2477                            const struct ib_flow_attr *flow_attr,
2478                            struct mlx5_flow_act *action, u32 prev_type)
2479 {
2480         void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
2481                                            misc_parameters);
2482         void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
2483                                            misc_parameters);
2484         void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
2485                                             misc_parameters_2);
2486         void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
2487                                             misc_parameters_2);
2488         void *headers_c;
2489         void *headers_v;
2490         int match_ipv;
2491         int ret;
2492
2493         if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2494                 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2495                                          inner_headers);
2496                 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2497                                          inner_headers);
2498                 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2499                                         ft_field_support.inner_ip_version);
2500         } else {
2501                 headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
2502                                          outer_headers);
2503                 headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
2504                                          outer_headers);
2505                 match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2506                                         ft_field_support.outer_ip_version);
2507         }
2508
2509         switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2510         case IB_FLOW_SPEC_ETH:
2511                 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
2512                         return -EOPNOTSUPP;
2513
2514                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2515                                              dmac_47_16),
2516                                 ib_spec->eth.mask.dst_mac);
2517                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2518                                              dmac_47_16),
2519                                 ib_spec->eth.val.dst_mac);
2520
2521                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2522                                              smac_47_16),
2523                                 ib_spec->eth.mask.src_mac);
2524                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2525                                              smac_47_16),
2526                                 ib_spec->eth.val.src_mac);
2527
2528                 if (ib_spec->eth.mask.vlan_tag) {
2529                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2530                                  cvlan_tag, 1);
2531                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2532                                  cvlan_tag, 1);
2533
2534                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2535                                  first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
2536                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2537                                  first_vid, ntohs(ib_spec->eth.val.vlan_tag));
2538
2539                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2540                                  first_cfi,
2541                                  ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
2542                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2543                                  first_cfi,
2544                                  ntohs(ib_spec->eth.val.vlan_tag) >> 12);
2545
2546                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2547                                  first_prio,
2548                                  ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
2549                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2550                                  first_prio,
2551                                  ntohs(ib_spec->eth.val.vlan_tag) >> 13);
2552                 }
2553                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2554                          ethertype, ntohs(ib_spec->eth.mask.ether_type));
2555                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2556                          ethertype, ntohs(ib_spec->eth.val.ether_type));
2557                 break;
2558         case IB_FLOW_SPEC_IPV4:
2559                 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
2560                         return -EOPNOTSUPP;
2561
2562                 if (match_ipv) {
2563                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2564                                  ip_version, 0xf);
2565                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2566                                  ip_version, MLX5_FS_IPV4_VERSION);
2567                 } else {
2568                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2569                                  ethertype, 0xffff);
2570                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2571                                  ethertype, ETH_P_IP);
2572                 }
2573
2574                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2575                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
2576                        &ib_spec->ipv4.mask.src_ip,
2577                        sizeof(ib_spec->ipv4.mask.src_ip));
2578                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2579                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
2580                        &ib_spec->ipv4.val.src_ip,
2581                        sizeof(ib_spec->ipv4.val.src_ip));
2582                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2583                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2584                        &ib_spec->ipv4.mask.dst_ip,
2585                        sizeof(ib_spec->ipv4.mask.dst_ip));
2586                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2587                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2588                        &ib_spec->ipv4.val.dst_ip,
2589                        sizeof(ib_spec->ipv4.val.dst_ip));
2590
2591                 set_tos(headers_c, headers_v,
2592                         ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
2593
2594                 set_proto(headers_c, headers_v,
2595                           ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
2596                 break;
2597         case IB_FLOW_SPEC_IPV6:
2598                 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
2599                         return -EOPNOTSUPP;
2600
2601                 if (match_ipv) {
2602                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2603                                  ip_version, 0xf);
2604                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2605                                  ip_version, MLX5_FS_IPV6_VERSION);
2606                 } else {
2607                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2608                                  ethertype, 0xffff);
2609                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2610                                  ethertype, ETH_P_IPV6);
2611                 }
2612
2613                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2614                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
2615                        &ib_spec->ipv6.mask.src_ip,
2616                        sizeof(ib_spec->ipv6.mask.src_ip));
2617                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2618                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
2619                        &ib_spec->ipv6.val.src_ip,
2620                        sizeof(ib_spec->ipv6.val.src_ip));
2621                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2622                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2623                        &ib_spec->ipv6.mask.dst_ip,
2624                        sizeof(ib_spec->ipv6.mask.dst_ip));
2625                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2626                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2627                        &ib_spec->ipv6.val.dst_ip,
2628                        sizeof(ib_spec->ipv6.val.dst_ip));
2629
2630                 set_tos(headers_c, headers_v,
2631                         ib_spec->ipv6.mask.traffic_class,
2632                         ib_spec->ipv6.val.traffic_class);
2633
2634                 set_proto(headers_c, headers_v,
2635                           ib_spec->ipv6.mask.next_hdr,
2636                           ib_spec->ipv6.val.next_hdr);
2637
2638                 set_flow_label(misc_params_c, misc_params_v,
2639                                ntohl(ib_spec->ipv6.mask.flow_label),
2640                                ntohl(ib_spec->ipv6.val.flow_label),
2641                                ib_spec->type & IB_FLOW_SPEC_INNER);
2642                 break;
2643         case IB_FLOW_SPEC_ESP:
2644                 if (ib_spec->esp.mask.seq)
2645                         return -EOPNOTSUPP;
2646
2647                 MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi,
2648                          ntohl(ib_spec->esp.mask.spi));
2649                 MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
2650                          ntohl(ib_spec->esp.val.spi));
2651                 break;
2652         case IB_FLOW_SPEC_TCP:
2653                 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2654                                          LAST_TCP_UDP_FIELD))
2655                         return -EOPNOTSUPP;
2656
2657                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2658                          0xff);
2659                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2660                          IPPROTO_TCP);
2661
2662                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
2663                          ntohs(ib_spec->tcp_udp.mask.src_port));
2664                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2665                          ntohs(ib_spec->tcp_udp.val.src_port));
2666
2667                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
2668                          ntohs(ib_spec->tcp_udp.mask.dst_port));
2669                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2670                          ntohs(ib_spec->tcp_udp.val.dst_port));
2671                 break;
2672         case IB_FLOW_SPEC_UDP:
2673                 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
2674                                          LAST_TCP_UDP_FIELD))
2675                         return -EOPNOTSUPP;
2676
2677                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2678                          0xff);
2679                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2680                          IPPROTO_UDP);
2681
2682                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
2683                          ntohs(ib_spec->tcp_udp.mask.src_port));
2684                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2685                          ntohs(ib_spec->tcp_udp.val.src_port));
2686
2687                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
2688                          ntohs(ib_spec->tcp_udp.mask.dst_port));
2689                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2690                          ntohs(ib_spec->tcp_udp.val.dst_port));
2691                 break;
2692         case IB_FLOW_SPEC_GRE:
2693                 if (ib_spec->gre.mask.c_ks_res0_ver)
2694                         return -EOPNOTSUPP;
2695
2696                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2697                          0xff);
2698                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2699                          IPPROTO_GRE);
2700
2701                 MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
2702                          0xffff);
2703                 MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
2704                          ntohs(ib_spec->gre.val.protocol));
2705
2706                 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
2707                                     gre_key_h),
2708                        &ib_spec->gre.mask.key,
2709                        sizeof(ib_spec->gre.mask.key));
2710                 memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
2711                                     gre_key_h),
2712                        &ib_spec->gre.val.key,
2713                        sizeof(ib_spec->gre.val.key));
2714                 break;
2715         case IB_FLOW_SPEC_MPLS:
2716                 switch (prev_type) {
2717                 case IB_FLOW_SPEC_UDP:
2718                         if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2719                                                    ft_field_support.outer_first_mpls_over_udp),
2720                                                    &ib_spec->mpls.mask.tag))
2721                                 return -EOPNOTSUPP;
2722
2723                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2724                                             outer_first_mpls_over_udp),
2725                                &ib_spec->mpls.val.tag,
2726                                sizeof(ib_spec->mpls.val.tag));
2727                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2728                                             outer_first_mpls_over_udp),
2729                                &ib_spec->mpls.mask.tag,
2730                                sizeof(ib_spec->mpls.mask.tag));
2731                         break;
2732                 case IB_FLOW_SPEC_GRE:
2733                         if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2734                                                    ft_field_support.outer_first_mpls_over_gre),
2735                                                    &ib_spec->mpls.mask.tag))
2736                                 return -EOPNOTSUPP;
2737
2738                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2739                                             outer_first_mpls_over_gre),
2740                                &ib_spec->mpls.val.tag,
2741                                sizeof(ib_spec->mpls.val.tag));
2742                         memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2743                                             outer_first_mpls_over_gre),
2744                                &ib_spec->mpls.mask.tag,
2745                                sizeof(ib_spec->mpls.mask.tag));
2746                         break;
2747                 default:
2748                         if (ib_spec->type & IB_FLOW_SPEC_INNER) {
2749                                 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2750                                                            ft_field_support.inner_first_mpls),
2751                                                            &ib_spec->mpls.mask.tag))
2752                                         return -EOPNOTSUPP;
2753
2754                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2755                                                     inner_first_mpls),
2756                                        &ib_spec->mpls.val.tag,
2757                                        sizeof(ib_spec->mpls.val.tag));
2758                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2759                                                     inner_first_mpls),
2760                                        &ib_spec->mpls.mask.tag,
2761                                        sizeof(ib_spec->mpls.mask.tag));
2762                         } else {
2763                                 if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2764                                                            ft_field_support.outer_first_mpls),
2765                                                            &ib_spec->mpls.mask.tag))
2766                                         return -EOPNOTSUPP;
2767
2768                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
2769                                                     outer_first_mpls),
2770                                        &ib_spec->mpls.val.tag,
2771                                        sizeof(ib_spec->mpls.val.tag));
2772                                 memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
2773                                                     outer_first_mpls),
2774                                        &ib_spec->mpls.mask.tag,
2775                                        sizeof(ib_spec->mpls.mask.tag));
2776                         }
2777                 }
2778                 break;
2779         case IB_FLOW_SPEC_VXLAN_TUNNEL:
2780                 if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
2781                                          LAST_TUNNEL_FIELD))
2782                         return -EOPNOTSUPP;
2783
2784                 MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
2785                          ntohl(ib_spec->tunnel.mask.tunnel_id));
2786                 MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
2787                          ntohl(ib_spec->tunnel.val.tunnel_id));
2788                 break;
2789         case IB_FLOW_SPEC_ACTION_TAG:
2790                 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
2791                                          LAST_FLOW_TAG_FIELD))
2792                         return -EOPNOTSUPP;
2793                 if (ib_spec->flow_tag.tag_id >= BIT(24))
2794                         return -EINVAL;
2795
2796                 action->flow_tag = ib_spec->flow_tag.tag_id;
2797                 action->has_flow_tag = true;
2798                 break;
2799         case IB_FLOW_SPEC_ACTION_DROP:
2800                 if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
2801                                          LAST_DROP_FIELD))
2802                         return -EOPNOTSUPP;
2803                 action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2804                 break;
2805         case IB_FLOW_SPEC_ACTION_HANDLE:
2806                 ret = parse_flow_flow_action(ib_spec, flow_attr, action);
2807                 if (ret)
2808                         return ret;
2809                 break;
2810         case IB_FLOW_SPEC_ACTION_COUNT:
2811                 if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
2812                                          LAST_COUNTERS_FIELD))
2813                         return -EOPNOTSUPP;
2814
2815                 /* for now support only one counters spec per flow */
2816                 if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
2817                         return -EINVAL;
2818
2819                 action->counters = ib_spec->flow_count.counters;
2820                 action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2821                 break;
2822         default:
2823                 return -EINVAL;
2824         }
2825
2826         return 0;
2827 }
2828
2829 /* If a flow could catch both multicast and unicast packets,
2830  * it won't fall into the multicast flow steering table and this rule
2831  * could steal other multicast packets.
2832  */
2833 static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
2834 {
2835         union ib_flow_spec *flow_spec;
2836
2837         if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
2838             ib_attr->num_of_specs < 1)
2839                 return false;
2840
2841         flow_spec = (union ib_flow_spec *)(ib_attr + 1);
2842         if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
2843                 struct ib_flow_spec_ipv4 *ipv4_spec;
2844
2845                 ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
2846                 if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
2847                         return true;
2848
2849                 return false;
2850         }
2851
2852         if (flow_spec->type == IB_FLOW_SPEC_ETH) {
2853                 struct ib_flow_spec_eth *eth_spec;
2854
2855                 eth_spec = (struct ib_flow_spec_eth *)flow_spec;
2856                 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
2857                        is_multicast_ether_addr(eth_spec->val.dst_mac);
2858         }
2859
2860         return false;
2861 }
2862
2863 enum valid_spec {
2864         VALID_SPEC_INVALID,
2865         VALID_SPEC_VALID,
2866         VALID_SPEC_NA,
2867 };
2868
2869 static enum valid_spec
2870 is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
2871                      const struct mlx5_flow_spec *spec,
2872                      const struct mlx5_flow_act *flow_act,
2873                      bool egress)
2874 {
2875         const u32 *match_c = spec->match_criteria;
2876         bool is_crypto =
2877                 (flow_act->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
2878                                      MLX5_FLOW_CONTEXT_ACTION_DECRYPT));
2879         bool is_ipsec = mlx5_fs_is_ipsec_flow(match_c);
2880         bool is_drop = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_DROP;
2881
2882         /*
2883          * Currently only crypto is supported in egress, when regular egress
2884          * rules would be supported, always return VALID_SPEC_NA.
2885          */
2886         if (!is_crypto)
2887                 return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA;
2888
2889         return is_crypto && is_ipsec &&
2890                 (!egress || (!is_drop && !flow_act->has_flow_tag)) ?
2891                 VALID_SPEC_VALID : VALID_SPEC_INVALID;
2892 }
2893
2894 static bool is_valid_spec(struct mlx5_core_dev *mdev,
2895                           const struct mlx5_flow_spec *spec,
2896                           const struct mlx5_flow_act *flow_act,
2897                           bool egress)
2898 {
2899         /* We curretly only support ipsec egress flow */
2900         return is_valid_esp_aes_gcm(mdev, spec, flow_act, egress) != VALID_SPEC_INVALID;
2901 }
2902
2903 static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
2904                                const struct ib_flow_attr *flow_attr,
2905                                bool check_inner)
2906 {
2907         union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
2908         int match_ipv = check_inner ?
2909                         MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2910                                         ft_field_support.inner_ip_version) :
2911                         MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2912                                         ft_field_support.outer_ip_version);
2913         int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
2914         bool ipv4_spec_valid, ipv6_spec_valid;
2915         unsigned int ip_spec_type = 0;
2916         bool has_ethertype = false;
2917         unsigned int spec_index;
2918         bool mask_valid = true;
2919         u16 eth_type = 0;
2920         bool type_valid;
2921
2922         /* Validate that ethertype is correct */
2923         for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
2924                 if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
2925                     ib_spec->eth.mask.ether_type) {
2926                         mask_valid = (ib_spec->eth.mask.ether_type ==
2927                                       htons(0xffff));
2928                         has_ethertype = true;
2929                         eth_type = ntohs(ib_spec->eth.val.ether_type);
2930                 } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
2931                            (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
2932                         ip_spec_type = ib_spec->type;
2933                 }
2934                 ib_spec = (void *)ib_spec + ib_spec->size;
2935         }
2936
2937         type_valid = (!has_ethertype) || (!ip_spec_type);
2938         if (!type_valid && mask_valid) {
2939                 ipv4_spec_valid = (eth_type == ETH_P_IP) &&
2940                         (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
2941                 ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
2942                         (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
2943
2944                 type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
2945                              (((eth_type == ETH_P_MPLS_UC) ||
2946                                (eth_type == ETH_P_MPLS_MC)) && match_ipv);
2947         }
2948
2949         return type_valid;
2950 }
2951
2952 static bool is_valid_attr(struct mlx5_core_dev *mdev,
2953                           const struct ib_flow_attr *flow_attr)
2954 {
2955         return is_valid_ethertype(mdev, flow_attr, false) &&
2956                is_valid_ethertype(mdev, flow_attr, true);
2957 }
2958
2959 static void put_flow_table(struct mlx5_ib_dev *dev,
2960                            struct mlx5_ib_flow_prio *prio, bool ft_added)
2961 {
2962         prio->refcount -= !!ft_added;
2963         if (!prio->refcount) {
2964                 mlx5_destroy_flow_table(prio->flow_table);
2965                 prio->flow_table = NULL;
2966         }
2967 }
2968
2969 static void counters_clear_description(struct ib_counters *counters)
2970 {
2971         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
2972
2973         mutex_lock(&mcounters->mcntrs_mutex);
2974         kfree(mcounters->counters_data);
2975         mcounters->counters_data = NULL;
2976         mcounters->cntrs_max_index = 0;
2977         mutex_unlock(&mcounters->mcntrs_mutex);
2978 }
2979
2980 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
2981 {
2982         struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
2983         struct mlx5_ib_flow_handler *handler = container_of(flow_id,
2984                                                           struct mlx5_ib_flow_handler,
2985                                                           ibflow);
2986         struct mlx5_ib_flow_handler *iter, *tmp;
2987
2988         mutex_lock(&dev->flow_db->lock);
2989
2990         list_for_each_entry_safe(iter, tmp, &handler->list, list) {
2991                 mlx5_del_flow_rules(iter->rule);
2992                 put_flow_table(dev, iter->prio, true);
2993                 list_del(&iter->list);
2994                 kfree(iter);
2995         }
2996
2997         mlx5_del_flow_rules(handler->rule);
2998         put_flow_table(dev, handler->prio, true);
2999         if (handler->ibcounters &&
3000             atomic_read(&handler->ibcounters->usecnt) == 1)
3001                 counters_clear_description(handler->ibcounters);
3002
3003         mutex_unlock(&dev->flow_db->lock);
3004         kfree(handler);
3005
3006         return 0;
3007 }
3008
3009 static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
3010 {
3011         priority *= 2;
3012         if (!dont_trap)
3013                 priority++;
3014         return priority;
3015 }
3016
3017 enum flow_table_type {
3018         MLX5_IB_FT_RX,
3019         MLX5_IB_FT_TX
3020 };
3021
3022 #define MLX5_FS_MAX_TYPES        6
3023 #define MLX5_FS_MAX_ENTRIES      BIT(16)
3024 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
3025                                                 struct ib_flow_attr *flow_attr,
3026                                                 enum flow_table_type ft_type)
3027 {
3028         bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
3029         struct mlx5_flow_namespace *ns = NULL;
3030         struct mlx5_ib_flow_prio *prio;
3031         struct mlx5_flow_table *ft;
3032         int max_table_size;
3033         int num_entries;
3034         int num_groups;
3035         int priority;
3036         int err = 0;
3037
3038         max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3039                                                        log_max_ft_size));
3040         if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3041                 if (ft_type == MLX5_IB_FT_TX)
3042                         priority = 0;
3043                 else if (flow_is_multicast_only(flow_attr) &&
3044                          !dont_trap)
3045                         priority = MLX5_IB_FLOW_MCAST_PRIO;
3046                 else
3047                         priority = ib_prio_to_core_prio(flow_attr->priority,
3048                                                         dont_trap);
3049                 ns = mlx5_get_flow_namespace(dev->mdev,
3050                                              ft_type == MLX5_IB_FT_TX ?
3051                                              MLX5_FLOW_NAMESPACE_EGRESS :
3052                                              MLX5_FLOW_NAMESPACE_BYPASS);
3053                 num_entries = MLX5_FS_MAX_ENTRIES;
3054                 num_groups = MLX5_FS_MAX_TYPES;
3055                 prio = &dev->flow_db->prios[priority];
3056         } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3057                    flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3058                 ns = mlx5_get_flow_namespace(dev->mdev,
3059                                              MLX5_FLOW_NAMESPACE_LEFTOVERS);
3060                 build_leftovers_ft_param(&priority,
3061                                          &num_entries,
3062                                          &num_groups);
3063                 prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
3064         } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3065                 if (!MLX5_CAP_FLOWTABLE(dev->mdev,
3066                                         allow_sniffer_and_nic_rx_shared_tir))
3067                         return ERR_PTR(-ENOTSUPP);
3068
3069                 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ?
3070                                              MLX5_FLOW_NAMESPACE_SNIFFER_RX :
3071                                              MLX5_FLOW_NAMESPACE_SNIFFER_TX);
3072
3073                 prio = &dev->flow_db->sniffer[ft_type];
3074                 priority = 0;
3075                 num_entries = 1;
3076                 num_groups = 1;
3077         }
3078
3079         if (!ns)
3080                 return ERR_PTR(-ENOTSUPP);
3081
3082         if (num_entries > max_table_size)
3083                 return ERR_PTR(-ENOMEM);
3084
3085         ft = prio->flow_table;
3086         if (!ft) {
3087                 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
3088                                                          num_entries,
3089                                                          num_groups,
3090                                                          0, 0);
3091
3092                 if (!IS_ERR(ft)) {
3093                         prio->refcount = 0;
3094                         prio->flow_table = ft;
3095                 } else {
3096                         err = PTR_ERR(ft);
3097                 }
3098         }
3099
3100         return err ? ERR_PTR(err) : prio;
3101 }
3102
3103 static void set_underlay_qp(struct mlx5_ib_dev *dev,
3104                             struct mlx5_flow_spec *spec,
3105                             u32 underlay_qpn)
3106 {
3107         void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
3108                                            spec->match_criteria,
3109                                            misc_parameters);
3110         void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3111                                            misc_parameters);
3112
3113         if (underlay_qpn &&
3114             MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
3115                                       ft_field_support.bth_dst_qp)) {
3116                 MLX5_SET(fte_match_set_misc,
3117                          misc_params_v, bth_dst_qp, underlay_qpn);
3118                 MLX5_SET(fte_match_set_misc,
3119                          misc_params_c, bth_dst_qp, 0xffffff);
3120         }
3121 }
3122
3123 static int read_flow_counters(struct ib_device *ibdev,
3124                               struct mlx5_read_counters_attr *read_attr)
3125 {
3126         struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
3127         struct mlx5_ib_dev *dev = to_mdev(ibdev);
3128
3129         return mlx5_fc_query(dev->mdev, fc,
3130                              &read_attr->out[IB_COUNTER_PACKETS],
3131                              &read_attr->out[IB_COUNTER_BYTES]);
3132 }
3133
3134 /* flow counters currently expose two counters packets and bytes */
3135 #define FLOW_COUNTERS_NUM 2
3136 static int counters_set_description(struct ib_counters *counters,
3137                                     enum mlx5_ib_counters_type counters_type,
3138                                     struct mlx5_ib_flow_counters_desc *desc_data,
3139                                     u32 ncounters)
3140 {
3141         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
3142         u32 cntrs_max_index = 0;
3143         int i;
3144
3145         if (counters_type != MLX5_IB_COUNTERS_FLOW)
3146                 return -EINVAL;
3147
3148         /* init the fields for the object */
3149         mcounters->type = counters_type;
3150         mcounters->read_counters = read_flow_counters;
3151         mcounters->counters_num = FLOW_COUNTERS_NUM;
3152         mcounters->ncounters = ncounters;
3153         /* each counter entry have both description and index pair */
3154         for (i = 0; i < ncounters; i++) {
3155                 if (desc_data[i].description > IB_COUNTER_BYTES)
3156                         return -EINVAL;
3157
3158                 if (cntrs_max_index <= desc_data[i].index)
3159                         cntrs_max_index = desc_data[i].index + 1;
3160         }
3161
3162         mutex_lock(&mcounters->mcntrs_mutex);
3163         mcounters->counters_data = desc_data;
3164         mcounters->cntrs_max_index = cntrs_max_index;
3165         mutex_unlock(&mcounters->mcntrs_mutex);
3166
3167         return 0;
3168 }
3169
3170 #define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
3171 static int flow_counters_set_data(struct ib_counters *ibcounters,
3172                                   struct mlx5_ib_create_flow *ucmd)
3173 {
3174         struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
3175         struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
3176         struct mlx5_ib_flow_counters_desc *desc_data = NULL;
3177         bool hw_hndl = false;
3178         int ret = 0;
3179
3180         if (ucmd && ucmd->ncounters_data != 0) {
3181                 cntrs_data = ucmd->data;
3182                 if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
3183                         return -EINVAL;
3184
3185                 desc_data = kcalloc(cntrs_data->ncounters,
3186                                     sizeof(*desc_data),
3187                                     GFP_KERNEL);
3188                 if (!desc_data)
3189                         return  -ENOMEM;
3190
3191                 if (copy_from_user(desc_data,
3192                                    u64_to_user_ptr(cntrs_data->counters_data),
3193                                    sizeof(*desc_data) * cntrs_data->ncounters)) {
3194                         ret = -EFAULT;
3195                         goto free;
3196                 }
3197         }
3198
3199         if (!mcounters->hw_cntrs_hndl) {
3200                 mcounters->hw_cntrs_hndl = mlx5_fc_create(
3201                         to_mdev(ibcounters->device)->mdev, false);
3202                 if (IS_ERR(mcounters->hw_cntrs_hndl)) {
3203                         ret = PTR_ERR(mcounters->hw_cntrs_hndl);
3204                         goto free;
3205                 }
3206                 hw_hndl = true;
3207         }
3208
3209         if (desc_data) {
3210                 /* counters already bound to at least one flow */
3211                 if (mcounters->cntrs_max_index) {
3212                         ret = -EINVAL;
3213                         goto free_hndl;
3214                 }
3215
3216                 ret = counters_set_description(ibcounters,
3217                                                MLX5_IB_COUNTERS_FLOW,
3218                                                desc_data,
3219                                                cntrs_data->ncounters);
3220                 if (ret)
3221                         goto free_hndl;
3222
3223         } else if (!mcounters->cntrs_max_index) {
3224                 /* counters not bound yet, must have udata passed */
3225                 ret = -EINVAL;
3226                 goto free_hndl;
3227         }
3228
3229         return 0;
3230
3231 free_hndl:
3232         if (hw_hndl) {
3233                 mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
3234                                 mcounters->hw_cntrs_hndl);
3235                 mcounters->hw_cntrs_hndl = NULL;
3236         }
3237 free:
3238         kfree(desc_data);
3239         return ret;
3240 }
3241
3242 static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
3243                                                       struct mlx5_ib_flow_prio *ft_prio,
3244                                                       const struct ib_flow_attr *flow_attr,
3245                                                       struct mlx5_flow_destination *dst,
3246                                                       u32 underlay_qpn,
3247                                                       struct mlx5_ib_create_flow *ucmd)
3248 {
3249         struct mlx5_flow_table  *ft = ft_prio->flow_table;
3250         struct mlx5_ib_flow_handler *handler;
3251         struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
3252         struct mlx5_flow_spec *spec;
3253         struct mlx5_flow_destination dest_arr[2] = {};
3254         struct mlx5_flow_destination *rule_dst = dest_arr;
3255         const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
3256         unsigned int spec_index;
3257         u32 prev_type = 0;
3258         int err = 0;
3259         int dest_num = 0;
3260         bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3261
3262         if (!is_valid_attr(dev->mdev, flow_attr))
3263                 return ERR_PTR(-EINVAL);
3264
3265         spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
3266         handler = kzalloc(sizeof(*handler), GFP_KERNEL);
3267         if (!handler || !spec) {
3268                 err = -ENOMEM;
3269                 goto free;
3270         }
3271
3272         INIT_LIST_HEAD(&handler->list);
3273         if (dst) {
3274                 memcpy(&dest_arr[0], dst, sizeof(*dst));
3275                 dest_num++;
3276         }
3277
3278         for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
3279                 err = parse_flow_attr(dev->mdev, spec->match_criteria,
3280                                       spec->match_value,
3281                                       ib_flow, flow_attr, &flow_act,
3282                                       prev_type);
3283                 if (err < 0)
3284                         goto free;
3285
3286                 prev_type = ((union ib_flow_spec *)ib_flow)->type;
3287                 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
3288         }
3289
3290         if (!flow_is_multicast_only(flow_attr))
3291                 set_underlay_qp(dev, spec, underlay_qpn);
3292
3293         if (dev->rep) {
3294                 void *misc;
3295
3296                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
3297                                     misc_parameters);
3298                 MLX5_SET(fte_match_set_misc, misc, source_port,
3299                          dev->rep->vport);
3300                 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
3301                                     misc_parameters);
3302                 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
3303         }
3304
3305         spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
3306
3307         if (is_egress &&
3308             !is_valid_spec(dev->mdev, spec, &flow_act, is_egress)) {
3309                 err = -EINVAL;
3310                 goto free;
3311         }
3312
3313         if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3314                 err = flow_counters_set_data(flow_act.counters, ucmd);
3315                 if (err)
3316                         goto free;
3317
3318                 handler->ibcounters = flow_act.counters;
3319                 dest_arr[dest_num].type =
3320                         MLX5_FLOW_DESTINATION_TYPE_COUNTER;
3321                 dest_arr[dest_num].counter =
3322                         to_mcounters(flow_act.counters)->hw_cntrs_hndl;
3323                 dest_num++;
3324         }
3325
3326         if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3327                 if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) {
3328                         rule_dst = NULL;
3329                         dest_num = 0;
3330                 }
3331         } else {
3332                 if (is_egress)
3333                         flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
3334                 else
3335                         flow_act.action |=
3336                                 dest_num ?  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
3337                                         MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
3338         }
3339
3340         if (flow_act.has_flow_tag &&
3341             (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3342              flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3343                 mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
3344                              flow_act.flow_tag, flow_attr->type);
3345                 err = -EINVAL;
3346                 goto free;
3347         }
3348         handler->rule = mlx5_add_flow_rules(ft, spec,
3349                                             &flow_act,
3350                                             rule_dst, dest_num);
3351
3352         if (IS_ERR(handler->rule)) {
3353                 err = PTR_ERR(handler->rule);
3354                 goto free;
3355         }
3356
3357         ft_prio->refcount++;
3358         handler->prio = ft_prio;
3359
3360         ft_prio->flow_table = ft;
3361 free:
3362         if (err && handler) {
3363                 if (handler->ibcounters &&
3364                     atomic_read(&handler->ibcounters->usecnt) == 1)
3365                         counters_clear_description(handler->ibcounters);
3366                 kfree(handler);
3367         }
3368         kvfree(spec);
3369         return err ? ERR_PTR(err) : handler;
3370 }
3371
3372 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
3373                                                      struct mlx5_ib_flow_prio *ft_prio,
3374                                                      const struct ib_flow_attr *flow_attr,
3375                                                      struct mlx5_flow_destination *dst)
3376 {
3377         return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
3378 }
3379
3380 static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
3381                                                           struct mlx5_ib_flow_prio *ft_prio,
3382                                                           struct ib_flow_attr *flow_attr,
3383                                                           struct mlx5_flow_destination *dst)
3384 {
3385         struct mlx5_ib_flow_handler *handler_dst = NULL;
3386         struct mlx5_ib_flow_handler *handler = NULL;
3387
3388         handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
3389         if (!IS_ERR(handler)) {
3390                 handler_dst = create_flow_rule(dev, ft_prio,
3391                                                flow_attr, dst);
3392                 if (IS_ERR(handler_dst)) {
3393                         mlx5_del_flow_rules(handler->rule);
3394                         ft_prio->refcount--;
3395                         kfree(handler);
3396                         handler = handler_dst;
3397                 } else {
3398                         list_add(&handler_dst->list, &handler->list);
3399                 }
3400         }
3401
3402         return handler;
3403 }
3404 enum {
3405         LEFTOVERS_MC,
3406         LEFTOVERS_UC,
3407 };
3408
3409 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
3410                                                           struct mlx5_ib_flow_prio *ft_prio,
3411                                                           struct ib_flow_attr *flow_attr,
3412                                                           struct mlx5_flow_destination *dst)
3413 {
3414         struct mlx5_ib_flow_handler *handler_ucast = NULL;
3415         struct mlx5_ib_flow_handler *handler = NULL;
3416
3417         static struct {
3418                 struct ib_flow_attr     flow_attr;
3419                 struct ib_flow_spec_eth eth_flow;
3420         } leftovers_specs[] = {
3421                 [LEFTOVERS_MC] = {
3422                         .flow_attr = {
3423                                 .num_of_specs = 1,
3424                                 .size = sizeof(leftovers_specs[0])
3425                         },
3426                         .eth_flow = {
3427                                 .type = IB_FLOW_SPEC_ETH,
3428                                 .size = sizeof(struct ib_flow_spec_eth),
3429                                 .mask = {.dst_mac = {0x1} },
3430                                 .val =  {.dst_mac = {0x1} }
3431                         }
3432                 },
3433                 [LEFTOVERS_UC] = {
3434                         .flow_attr = {
3435                                 .num_of_specs = 1,
3436                                 .size = sizeof(leftovers_specs[0])
3437                         },
3438                         .eth_flow = {
3439                                 .type = IB_FLOW_SPEC_ETH,
3440                                 .size = sizeof(struct ib_flow_spec_eth),
3441                                 .mask = {.dst_mac = {0x1} },
3442                                 .val = {.dst_mac = {} }
3443                         }
3444                 }
3445         };
3446
3447         handler = create_flow_rule(dev, ft_prio,
3448                                    &leftovers_specs[LEFTOVERS_MC].flow_attr,
3449                                    dst);
3450         if (!IS_ERR(handler) &&
3451             flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
3452                 handler_ucast = create_flow_rule(dev, ft_prio,
3453                                                  &leftovers_specs[LEFTOVERS_UC].flow_attr,
3454                                                  dst);
3455                 if (IS_ERR(handler_ucast)) {
3456                         mlx5_del_flow_rules(handler->rule);
3457                         ft_prio->refcount--;
3458                         kfree(handler);
3459                         handler = handler_ucast;
3460                 } else {
3461                         list_add(&handler_ucast->list, &handler->list);
3462                 }
3463         }
3464
3465         return handler;
3466 }
3467
3468 static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
3469                                                         struct mlx5_ib_flow_prio *ft_rx,
3470                                                         struct mlx5_ib_flow_prio *ft_tx,
3471                                                         struct mlx5_flow_destination *dst)
3472 {
3473         struct mlx5_ib_flow_handler *handler_rx;
3474         struct mlx5_ib_flow_handler *handler_tx;
3475         int err;
3476         static const struct ib_flow_attr flow_attr  = {
3477                 .num_of_specs = 0,
3478                 .size = sizeof(flow_attr)
3479         };
3480
3481         handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
3482         if (IS_ERR(handler_rx)) {
3483                 err = PTR_ERR(handler_rx);
3484                 goto err;
3485         }
3486
3487         handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
3488         if (IS_ERR(handler_tx)) {
3489                 err = PTR_ERR(handler_tx);
3490                 goto err_tx;
3491         }
3492
3493         list_add(&handler_tx->list, &handler_rx->list);
3494
3495         return handler_rx;
3496
3497 err_tx:
3498         mlx5_del_flow_rules(handler_rx->rule);
3499         ft_rx->refcount--;
3500         kfree(handler_rx);
3501 err:
3502         return ERR_PTR(err);
3503 }
3504
3505 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
3506                                            struct ib_flow_attr *flow_attr,
3507                                            int domain,
3508                                            struct ib_udata *udata)
3509 {
3510         struct mlx5_ib_dev *dev = to_mdev(qp->device);
3511         struct mlx5_ib_qp *mqp = to_mqp(qp);
3512         struct mlx5_ib_flow_handler *handler = NULL;
3513         struct mlx5_flow_destination *dst = NULL;
3514         struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
3515         struct mlx5_ib_flow_prio *ft_prio;
3516         bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
3517         struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
3518         size_t min_ucmd_sz, required_ucmd_sz;
3519         int err;
3520         int underlay_qpn;
3521
3522         if (udata && udata->inlen) {
3523                 min_ucmd_sz = offsetof(typeof(ucmd_hdr), reserved) +
3524                                 sizeof(ucmd_hdr.reserved);
3525                 if (udata->inlen < min_ucmd_sz)
3526                         return ERR_PTR(-EOPNOTSUPP);
3527
3528                 err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
3529                 if (err)
3530                         return ERR_PTR(err);
3531
3532                 /* currently supports only one counters data */
3533                 if (ucmd_hdr.ncounters_data > 1)
3534                         return ERR_PTR(-EINVAL);
3535
3536                 required_ucmd_sz = min_ucmd_sz +
3537                         sizeof(struct mlx5_ib_flow_counters_data) *
3538                         ucmd_hdr.ncounters_data;
3539                 if (udata->inlen > required_ucmd_sz &&
3540                     !ib_is_udata_cleared(udata, required_ucmd_sz,
3541                                          udata->inlen - required_ucmd_sz))
3542                         return ERR_PTR(-EOPNOTSUPP);
3543
3544                 ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
3545                 if (!ucmd)
3546                         return ERR_PTR(-ENOMEM);
3547
3548                 err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
3549                 if (err)
3550                         goto free_ucmd;
3551         }
3552
3553         if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
3554                 err = -ENOMEM;
3555                 goto free_ucmd;
3556         }
3557
3558         if (domain != IB_FLOW_DOMAIN_USER ||
3559             flow_attr->port > dev->num_ports ||
3560             (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
3561                                   IB_FLOW_ATTR_FLAGS_EGRESS))) {
3562                 err = -EINVAL;
3563                 goto free_ucmd;
3564         }
3565
3566         if (is_egress &&
3567             (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3568              flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
3569                 err = -EINVAL;
3570                 goto free_ucmd;
3571         }
3572
3573         dst = kzalloc(sizeof(*dst), GFP_KERNEL);
3574         if (!dst) {
3575                 err = -ENOMEM;
3576                 goto free_ucmd;
3577         }
3578
3579         mutex_lock(&dev->flow_db->lock);
3580
3581         ft_prio = get_flow_table(dev, flow_attr,
3582                                  is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
3583         if (IS_ERR(ft_prio)) {
3584                 err = PTR_ERR(ft_prio);
3585                 goto unlock;
3586         }
3587         if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3588                 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
3589                 if (IS_ERR(ft_prio_tx)) {
3590                         err = PTR_ERR(ft_prio_tx);
3591                         ft_prio_tx = NULL;
3592                         goto destroy_ft;
3593                 }
3594         }
3595
3596         if (is_egress) {
3597                 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
3598         } else {
3599                 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
3600                 if (mqp->flags & MLX5_IB_QP_RSS)
3601                         dst->tir_num = mqp->rss_qp.tirn;
3602                 else
3603                         dst->tir_num = mqp->raw_packet_qp.rq.tirn;
3604         }
3605
3606         if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3607                 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
3608                         handler = create_dont_trap_rule(dev, ft_prio,
3609                                                         flow_attr, dst);
3610                 } else {
3611                         underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
3612                                         mqp->underlay_qpn : 0;
3613                         handler = _create_flow_rule(dev, ft_prio, flow_attr,
3614                                                     dst, underlay_qpn, ucmd);
3615                 }
3616         } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
3617                    flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
3618                 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
3619                                                 dst);
3620         } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
3621                 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
3622         } else {
3623                 err = -EINVAL;
3624                 goto destroy_ft;
3625         }
3626
3627         if (IS_ERR(handler)) {
3628                 err = PTR_ERR(handler);
3629                 handler = NULL;
3630                 goto destroy_ft;
3631         }
3632
3633         mutex_unlock(&dev->flow_db->lock);
3634         kfree(dst);
3635         kfree(ucmd);
3636
3637         return &handler->ibflow;
3638
3639 destroy_ft:
3640         put_flow_table(dev, ft_prio, false);
3641         if (ft_prio_tx)
3642                 put_flow_table(dev, ft_prio_tx, false);
3643 unlock:
3644         mutex_unlock(&dev->flow_db->lock);
3645         kfree(dst);
3646 free_ucmd:
3647         kfree(ucmd);
3648         return ERR_PTR(err);
3649 }
3650
3651 static u32 mlx5_ib_flow_action_flags_to_accel_xfrm_flags(u32 mlx5_flags)
3652 {
3653         u32 flags = 0;
3654
3655         if (mlx5_flags & MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA)
3656                 flags |= MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA;
3657
3658         return flags;
3659 }
3660
3661 #define MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED      MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA
3662 static struct ib_flow_action *
3663 mlx5_ib_create_flow_action_esp(struct ib_device *device,
3664                                const struct ib_flow_action_attrs_esp *attr,
3665                                struct uverbs_attr_bundle *attrs)
3666 {
3667         struct mlx5_ib_dev *mdev = to_mdev(device);
3668         struct ib_uverbs_flow_action_esp_keymat_aes_gcm *aes_gcm;
3669         struct mlx5_accel_esp_xfrm_attrs accel_attrs = {};
3670         struct mlx5_ib_flow_action *action;
3671         u64 action_flags;
3672         u64 flags;
3673         int err = 0;
3674
3675         if (IS_UVERBS_COPY_ERR(uverbs_copy_from(&action_flags, attrs,
3676                                                 MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS)))
3677                 return ERR_PTR(-EFAULT);
3678
3679         if (action_flags >= (MLX5_FLOW_ACTION_ESP_CREATE_LAST_SUPPORTED << 1))
3680                 return ERR_PTR(-EOPNOTSUPP);
3681
3682         flags = mlx5_ib_flow_action_flags_to_accel_xfrm_flags(action_flags);
3683
3684         /* We current only support a subset of the standard features. Only a
3685          * keymat of type AES_GCM, with icv_len == 16, iv_algo == SEQ and esn
3686          * (with overlap). Full offload mode isn't supported.
3687          */
3688         if (!attr->keymat || attr->replay || attr->encap ||
3689             attr->spi || attr->seq || attr->tfc_pad ||
3690             attr->hard_limit_pkts ||
3691             (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
3692                              IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)))
3693                 return ERR_PTR(-EOPNOTSUPP);
3694
3695         if (attr->keymat->protocol !=
3696             IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM)
3697                 return ERR_PTR(-EOPNOTSUPP);
3698
3699         aes_gcm = &attr->keymat->keymat.aes_gcm;
3700
3701         if (aes_gcm->icv_len != 16 ||
3702             aes_gcm->iv_algo != IB_UVERBS_FLOW_ACTION_IV_ALGO_SEQ)
3703                 return ERR_PTR(-EOPNOTSUPP);
3704
3705         action = kmalloc(sizeof(*action), GFP_KERNEL);
3706         if (!action)
3707                 return ERR_PTR(-ENOMEM);
3708
3709         action->esp_aes_gcm.ib_flags = attr->flags;
3710         memcpy(&accel_attrs.keymat.aes_gcm.aes_key, &aes_gcm->aes_key,
3711                sizeof(accel_attrs.keymat.aes_gcm.aes_key));
3712         accel_attrs.keymat.aes_gcm.key_len = aes_gcm->key_len * 8;
3713         memcpy(&accel_attrs.keymat.aes_gcm.salt, &aes_gcm->salt,
3714                sizeof(accel_attrs.keymat.aes_gcm.salt));
3715         memcpy(&accel_attrs.keymat.aes_gcm.seq_iv, &aes_gcm->iv,
3716                sizeof(accel_attrs.keymat.aes_gcm.seq_iv));
3717         accel_attrs.keymat.aes_gcm.icv_len = aes_gcm->icv_len * 8;
3718         accel_attrs.keymat.aes_gcm.iv_algo = MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ;
3719         accel_attrs.keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
3720
3721         accel_attrs.esn = attr->esn;
3722         if (attr->flags & IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED)
3723                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
3724         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
3725                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
3726
3727         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ENCRYPT)
3728                 accel_attrs.action |= MLX5_ACCEL_ESP_ACTION_ENCRYPT;
3729
3730         action->esp_aes_gcm.ctx =
3731                 mlx5_accel_esp_create_xfrm(mdev->mdev, &accel_attrs, flags);
3732         if (IS_ERR(action->esp_aes_gcm.ctx)) {
3733                 err = PTR_ERR(action->esp_aes_gcm.ctx);
3734                 goto err_parse;
3735         }
3736
3737         action->esp_aes_gcm.ib_flags = attr->flags;
3738
3739         return &action->ib_action;
3740
3741 err_parse:
3742         kfree(action);
3743         return ERR_PTR(err);
3744 }
3745
3746 static int
3747 mlx5_ib_modify_flow_action_esp(struct ib_flow_action *action,
3748                                const struct ib_flow_action_attrs_esp *attr,
3749                                struct uverbs_attr_bundle *attrs)
3750 {
3751         struct mlx5_ib_flow_action *maction = to_mflow_act(action);
3752         struct mlx5_accel_esp_xfrm_attrs accel_attrs;
3753         int err = 0;
3754
3755         if (attr->keymat || attr->replay || attr->encap ||
3756             attr->spi || attr->seq || attr->tfc_pad ||
3757             attr->hard_limit_pkts ||
3758             (attr->flags & ~(IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
3759                              IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS |
3760                              IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)))
3761                 return -EOPNOTSUPP;
3762
3763         /* Only the ESN value or the MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP can
3764          * be modified.
3765          */
3766         if (!(maction->esp_aes_gcm.ib_flags &
3767               IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED) &&
3768             attr->flags & (IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED |
3769                            IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW))
3770                 return -EINVAL;
3771
3772         memcpy(&accel_attrs, &maction->esp_aes_gcm.ctx->attrs,
3773                sizeof(accel_attrs));
3774
3775         accel_attrs.esn = attr->esn;
3776         if (attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW)
3777                 accel_attrs.flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
3778         else
3779                 accel_attrs.flags &= ~MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
3780
3781         err = mlx5_accel_esp_modify_xfrm(maction->esp_aes_gcm.ctx,
3782                                          &accel_attrs);
3783         if (err)
3784                 return err;
3785
3786         maction->esp_aes_gcm.ib_flags &=
3787                 ~IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
3788         maction->esp_aes_gcm.ib_flags |=
3789                 attr->flags & IB_UVERBS_FLOW_ACTION_ESP_FLAGS_ESN_NEW_WINDOW;
3790
3791         return 0;
3792 }
3793
3794 static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
3795 {
3796         struct mlx5_ib_flow_action *maction = to_mflow_act(action);
3797
3798         switch (action->type) {
3799         case IB_FLOW_ACTION_ESP:
3800                 /*
3801                  * We only support aes_gcm by now, so we implicitly know this is
3802                  * the underline crypto.
3803                  */
3804                 mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
3805                 break;
3806         default:
3807                 WARN_ON(true);
3808                 break;
3809         }
3810
3811         kfree(maction);
3812         return 0;
3813 }
3814
3815 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
3816 {
3817         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3818         struct mlx5_ib_qp *mqp = to_mqp(ibqp);
3819         int err;
3820
3821         if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
3822                 mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
3823                 return -EOPNOTSUPP;
3824         }
3825
3826         err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
3827         if (err)
3828                 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
3829                              ibqp->qp_num, gid->raw);
3830
3831         return err;
3832 }
3833
3834 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
3835 {
3836         struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
3837         int err;
3838
3839         err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
3840         if (err)
3841                 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
3842                              ibqp->qp_num, gid->raw);
3843
3844         return err;
3845 }
3846
3847 static int init_node_data(struct mlx5_ib_dev *dev)
3848 {
3849         int err;
3850
3851         err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
3852         if (err)
3853                 return err;
3854
3855         dev->mdev->rev_id = dev->mdev->pdev->revision;
3856
3857         return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
3858 }
3859
3860 static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
3861                              char *buf)
3862 {
3863         struct mlx5_ib_dev *dev =
3864                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
3865
3866         return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
3867 }
3868
3869 static ssize_t show_reg_pages(struct device *device,
3870                               struct device_attribute *attr, char *buf)
3871 {
3872         struct mlx5_ib_dev *dev =
3873                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
3874
3875         return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
3876 }
3877
3878 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
3879                         char *buf)
3880 {
3881         struct mlx5_ib_dev *dev =
3882                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
3883         return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
3884 }
3885
3886 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
3887                         char *buf)
3888 {
3889         struct mlx5_ib_dev *dev =
3890                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
3891         return sprintf(buf, "%x\n", dev->mdev->rev_id);
3892 }
3893
3894 static ssize_t show_board(struct device *device, struct device_attribute *attr,
3895                           char *buf)
3896 {
3897         struct mlx5_ib_dev *dev =
3898                 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
3899         return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
3900                        dev->mdev->board_id);
3901 }
3902
3903 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
3904 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
3905 static DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
3906 static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
3907 static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
3908
3909 static struct device_attribute *mlx5_class_attributes[] = {
3910         &dev_attr_hw_rev,
3911         &dev_attr_hca_type,
3912         &dev_attr_board_id,
3913         &dev_attr_fw_pages,
3914         &dev_attr_reg_pages,
3915 };
3916
3917 static void pkey_change_handler(struct work_struct *work)
3918 {
3919         struct mlx5_ib_port_resources *ports =
3920                 container_of(work, struct mlx5_ib_port_resources,
3921                              pkey_change_work);
3922
3923         mutex_lock(&ports->devr->mutex);
3924         mlx5_ib_gsi_pkey_change(ports->gsi);
3925         mutex_unlock(&ports->devr->mutex);
3926 }
3927
3928 static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
3929 {
3930         struct mlx5_ib_qp *mqp;
3931         struct mlx5_ib_cq *send_mcq, *recv_mcq;
3932         struct mlx5_core_cq *mcq;
3933         struct list_head cq_armed_list;
3934         unsigned long flags_qp;
3935         unsigned long flags_cq;
3936         unsigned long flags;
3937
3938         INIT_LIST_HEAD(&cq_armed_list);
3939
3940         /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3941         spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3942         list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3943                 spin_lock_irqsave(&mqp->sq.lock, flags_qp);
3944                 if (mqp->sq.tail != mqp->sq.head) {
3945                         send_mcq = to_mcq(mqp->ibqp.send_cq);
3946                         spin_lock_irqsave(&send_mcq->lock, flags_cq);
3947                         if (send_mcq->mcq.comp &&
3948                             mqp->ibqp.send_cq->comp_handler) {
3949                                 if (!send_mcq->mcq.reset_notify_added) {
3950                                         send_mcq->mcq.reset_notify_added = 1;
3951                                         list_add_tail(&send_mcq->mcq.reset_notify,
3952                                                       &cq_armed_list);
3953                                 }
3954                         }
3955                         spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
3956                 }
3957                 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
3958                 spin_lock_irqsave(&mqp->rq.lock, flags_qp);
3959                 /* no handling is needed for SRQ */
3960                 if (!mqp->ibqp.srq) {
3961                         if (mqp->rq.tail != mqp->rq.head) {
3962                                 recv_mcq = to_mcq(mqp->ibqp.recv_cq);
3963                                 spin_lock_irqsave(&recv_mcq->lock, flags_cq);
3964                                 if (recv_mcq->mcq.comp &&
3965                                     mqp->ibqp.recv_cq->comp_handler) {
3966                                         if (!recv_mcq->mcq.reset_notify_added) {
3967                                                 recv_mcq->mcq.reset_notify_added = 1;
3968                                                 list_add_tail(&recv_mcq->mcq.reset_notify,
3969                                                               &cq_armed_list);
3970                                         }
3971                                 }
3972                                 spin_unlock_irqrestore(&recv_mcq->lock,
3973                                                        flags_cq);
3974                         }
3975                 }
3976                 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
3977         }
3978         /*At that point all inflight post send were put to be executed as of we
3979          * lock/unlock above locks Now need to arm all involved CQs.
3980          */
3981         list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
3982                 mcq->comp(mcq);
3983         }
3984         spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3985 }
3986
3987 static void delay_drop_handler(struct work_struct *work)
3988 {
3989         int err;
3990         struct mlx5_ib_delay_drop *delay_drop =
3991                 container_of(work, struct mlx5_ib_delay_drop,
3992                              delay_drop_work);
3993
3994         atomic_inc(&delay_drop->events_cnt);
3995
3996         mutex_lock(&delay_drop->lock);
3997         err = mlx5_core_set_delay_drop(delay_drop->dev->mdev,
3998                                        delay_drop->timeout);
3999         if (err) {
4000                 mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
4001                              delay_drop->timeout);
4002                 delay_drop->activate = false;
4003         }
4004         mutex_unlock(&delay_drop->lock);
4005 }
4006
4007 static void mlx5_ib_handle_event(struct work_struct *_work)
4008 {
4009         struct mlx5_ib_event_work *work =
4010                 container_of(_work, struct mlx5_ib_event_work, work);
4011         struct mlx5_ib_dev *ibdev;
4012         struct ib_event ibev;
4013         bool fatal = false;
4014         u8 port = (u8)work->param;
4015
4016         if (mlx5_core_is_mp_slave(work->dev)) {
4017                 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
4018                 if (!ibdev)
4019                         goto out;
4020         } else {
4021                 ibdev = work->context;
4022         }
4023
4024         switch (work->event) {
4025         case MLX5_DEV_EVENT_SYS_ERROR:
4026                 ibev.event = IB_EVENT_DEVICE_FATAL;
4027                 mlx5_ib_handle_internal_error(ibdev);
4028                 fatal = true;
4029                 break;
4030
4031         case MLX5_DEV_EVENT_PORT_UP:
4032         case MLX5_DEV_EVENT_PORT_DOWN:
4033         case MLX5_DEV_EVENT_PORT_INITIALIZED:
4034                 /* In RoCE, port up/down events are handled in
4035                  * mlx5_netdev_event().
4036                  */
4037                 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
4038                         IB_LINK_LAYER_ETHERNET)
4039                         goto out;
4040
4041                 ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ?
4042                              IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
4043                 break;
4044
4045         case MLX5_DEV_EVENT_LID_CHANGE:
4046                 ibev.event = IB_EVENT_LID_CHANGE;
4047                 break;
4048
4049         case MLX5_DEV_EVENT_PKEY_CHANGE:
4050                 ibev.event = IB_EVENT_PKEY_CHANGE;
4051                 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
4052                 break;
4053
4054         case MLX5_DEV_EVENT_GUID_CHANGE:
4055                 ibev.event = IB_EVENT_GID_CHANGE;
4056                 break;
4057
4058         case MLX5_DEV_EVENT_CLIENT_REREG:
4059                 ibev.event = IB_EVENT_CLIENT_REREGISTER;
4060                 break;
4061         case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
4062                 schedule_work(&ibdev->delay_drop.delay_drop_work);
4063                 goto out;
4064         default:
4065                 goto out;
4066         }
4067
4068         ibev.device           = &ibdev->ib_dev;
4069         ibev.element.port_num = port;
4070
4071         if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
4072                 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
4073                 goto out;
4074         }
4075
4076         if (ibdev->ib_active)
4077                 ib_dispatch_event(&ibev);
4078
4079         if (fatal)
4080                 ibdev->ib_active = false;
4081 out:
4082         kfree(work);
4083 }
4084
4085 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
4086                           enum mlx5_dev_event event, unsigned long param)
4087 {
4088         struct mlx5_ib_event_work *work;
4089
4090         work = kmalloc(sizeof(*work), GFP_ATOMIC);
4091         if (!work)
4092                 return;
4093
4094         INIT_WORK(&work->work, mlx5_ib_handle_event);
4095         work->dev = dev;
4096         work->param = param;
4097         work->context = context;
4098         work->event = event;
4099
4100         queue_work(mlx5_ib_event_wq, &work->work);
4101 }
4102
4103 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
4104 {
4105         struct mlx5_hca_vport_context vport_ctx;
4106         int err;
4107         int port;
4108
4109         for (port = 1; port <= dev->num_ports; port++) {
4110                 dev->mdev->port_caps[port - 1].has_smi = false;
4111                 if (MLX5_CAP_GEN(dev->mdev, port_type) ==
4112                     MLX5_CAP_PORT_TYPE_IB) {
4113                         if (MLX5_CAP_GEN(dev->mdev, ib_virt)) {
4114                                 err = mlx5_query_hca_vport_context(dev->mdev, 0,
4115                                                                    port, 0,
4116                                                                    &vport_ctx);
4117                                 if (err) {
4118                                         mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
4119                                                     port, err);
4120                                         return err;
4121                                 }
4122                                 dev->mdev->port_caps[port - 1].has_smi =
4123                                         vport_ctx.has_smi;
4124                         } else {
4125                                 dev->mdev->port_caps[port - 1].has_smi = true;
4126                         }
4127                 }
4128         }
4129         return 0;
4130 }
4131
4132 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
4133 {
4134         int port;
4135
4136         for (port = 1; port <= dev->num_ports; port++)
4137                 mlx5_query_ext_port_caps(dev, port);
4138 }
4139
4140 static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
4141 {
4142         struct ib_device_attr *dprops = NULL;
4143         struct ib_port_attr *pprops = NULL;
4144         int err = -ENOMEM;
4145         struct ib_udata uhw = {.inlen = 0, .outlen = 0};
4146
4147         pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
4148         if (!pprops)
4149                 goto out;
4150
4151         dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
4152         if (!dprops)
4153                 goto out;
4154
4155         err = set_has_smi_cap(dev);
4156         if (err)
4157                 goto out;
4158
4159         err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
4160         if (err) {
4161                 mlx5_ib_warn(dev, "query_device failed %d\n", err);
4162                 goto out;
4163         }
4164
4165         memset(pprops, 0, sizeof(*pprops));
4166         err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
4167         if (err) {
4168                 mlx5_ib_warn(dev, "query_port %d failed %d\n",
4169                              port, err);
4170                 goto out;
4171         }
4172
4173         dev->mdev->port_caps[port - 1].pkey_table_len =
4174                                         dprops->max_pkeys;
4175         dev->mdev->port_caps[port - 1].gid_table_len =
4176                                         pprops->gid_tbl_len;
4177         mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
4178                     port, dprops->max_pkeys, pprops->gid_tbl_len);
4179
4180 out:
4181         kfree(pprops);
4182         kfree(dprops);
4183
4184         return err;
4185 }
4186
4187 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
4188 {
4189         int err;
4190
4191         err = mlx5_mr_cache_cleanup(dev);
4192         if (err)
4193                 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
4194
4195         if (dev->umrc.qp)
4196                 mlx5_ib_destroy_qp(dev->umrc.qp);
4197         if (dev->umrc.cq)
4198                 ib_free_cq(dev->umrc.cq);
4199         if (dev->umrc.pd)
4200                 ib_dealloc_pd(dev->umrc.pd);
4201 }
4202
4203 enum {
4204         MAX_UMR_WR = 128,
4205 };
4206
4207 static int create_umr_res(struct mlx5_ib_dev *dev)
4208 {
4209         struct ib_qp_init_attr *init_attr = NULL;
4210         struct ib_qp_attr *attr = NULL;
4211         struct ib_pd *pd;
4212         struct ib_cq *cq;
4213         struct ib_qp *qp;
4214         int ret;
4215
4216         attr = kzalloc(sizeof(*attr), GFP_KERNEL);
4217         init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
4218         if (!attr || !init_attr) {
4219                 ret = -ENOMEM;
4220                 goto error_0;
4221         }
4222
4223         pd = ib_alloc_pd(&dev->ib_dev, 0);
4224         if (IS_ERR(pd)) {
4225                 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
4226                 ret = PTR_ERR(pd);
4227                 goto error_0;
4228         }
4229
4230         cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
4231         if (IS_ERR(cq)) {
4232                 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
4233                 ret = PTR_ERR(cq);
4234                 goto error_2;
4235         }
4236
4237         init_attr->send_cq = cq;
4238         init_attr->recv_cq = cq;
4239         init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
4240         init_attr->cap.max_send_wr = MAX_UMR_WR;
4241         init_attr->cap.max_send_sge = 1;
4242         init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
4243         init_attr->port_num = 1;
4244         qp = mlx5_ib_create_qp(pd, init_attr, NULL);
4245         if (IS_ERR(qp)) {
4246                 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
4247                 ret = PTR_ERR(qp);
4248                 goto error_3;
4249         }
4250         qp->device     = &dev->ib_dev;
4251         qp->real_qp    = qp;
4252         qp->uobject    = NULL;
4253         qp->qp_type    = MLX5_IB_QPT_REG_UMR;
4254         qp->send_cq    = init_attr->send_cq;
4255         qp->recv_cq    = init_attr->recv_cq;
4256
4257         attr->qp_state = IB_QPS_INIT;
4258         attr->port_num = 1;
4259         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
4260                                 IB_QP_PORT, NULL);
4261         if (ret) {
4262                 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
4263                 goto error_4;
4264         }
4265
4266         memset(attr, 0, sizeof(*attr));
4267         attr->qp_state = IB_QPS_RTR;
4268         attr->path_mtu = IB_MTU_256;
4269
4270         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4271         if (ret) {
4272                 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
4273                 goto error_4;
4274         }
4275
4276         memset(attr, 0, sizeof(*attr));
4277         attr->qp_state = IB_QPS_RTS;
4278         ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
4279         if (ret) {
4280                 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
4281                 goto error_4;
4282         }
4283
4284         dev->umrc.qp = qp;
4285         dev->umrc.cq = cq;
4286         dev->umrc.pd = pd;
4287
4288         sema_init(&dev->umrc.sem, MAX_UMR_WR);
4289         ret = mlx5_mr_cache_init(dev);
4290         if (ret) {
4291                 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
4292                 goto error_4;
4293         }
4294
4295         kfree(attr);
4296         kfree(init_attr);
4297
4298         return 0;
4299
4300 error_4:
4301         mlx5_ib_destroy_qp(qp);
4302         dev->umrc.qp = NULL;
4303
4304 error_3:
4305         ib_free_cq(cq);
4306         dev->umrc.cq = NULL;
4307
4308 error_2:
4309         ib_dealloc_pd(pd);
4310         dev->umrc.pd = NULL;
4311
4312 error_0:
4313         kfree(attr);
4314         kfree(init_attr);
4315         return ret;
4316 }
4317
4318 static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
4319 {
4320         switch (umr_fence_cap) {
4321         case MLX5_CAP_UMR_FENCE_NONE:
4322                 return MLX5_FENCE_MODE_NONE;
4323         case MLX5_CAP_UMR_FENCE_SMALL:
4324                 return MLX5_FENCE_MODE_INITIATOR_SMALL;
4325         default:
4326                 return MLX5_FENCE_MODE_STRONG_ORDERING;
4327         }
4328 }
4329
4330 static int create_dev_resources(struct mlx5_ib_resources *devr)
4331 {
4332         struct ib_srq_init_attr attr;
4333         struct mlx5_ib_dev *dev;
4334         struct ib_cq_init_attr cq_attr = {.cqe = 1};
4335         int port;
4336         int ret = 0;
4337
4338         dev = container_of(devr, struct mlx5_ib_dev, devr);
4339
4340         mutex_init(&devr->mutex);
4341
4342         devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
4343         if (IS_ERR(devr->p0)) {
4344                 ret = PTR_ERR(devr->p0);
4345                 goto error0;
4346         }
4347         devr->p0->device  = &dev->ib_dev;
4348         devr->p0->uobject = NULL;
4349         atomic_set(&devr->p0->usecnt, 0);
4350
4351         devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
4352         if (IS_ERR(devr->c0)) {
4353                 ret = PTR_ERR(devr->c0);
4354                 goto error1;
4355         }
4356         devr->c0->device        = &dev->ib_dev;
4357         devr->c0->uobject       = NULL;
4358         devr->c0->comp_handler  = NULL;
4359         devr->c0->event_handler = NULL;
4360         devr->c0->cq_context    = NULL;
4361         atomic_set(&devr->c0->usecnt, 0);
4362
4363         devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
4364         if (IS_ERR(devr->x0)) {
4365                 ret = PTR_ERR(devr->x0);
4366                 goto error2;
4367         }
4368         devr->x0->device = &dev->ib_dev;
4369         devr->x0->inode = NULL;
4370         atomic_set(&devr->x0->usecnt, 0);
4371         mutex_init(&devr->x0->tgt_qp_mutex);
4372         INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
4373
4374         devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
4375         if (IS_ERR(devr->x1)) {
4376                 ret = PTR_ERR(devr->x1);
4377                 goto error3;
4378         }
4379         devr->x1->device = &dev->ib_dev;
4380         devr->x1->inode = NULL;
4381         atomic_set(&devr->x1->usecnt, 0);
4382         mutex_init(&devr->x1->tgt_qp_mutex);
4383         INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
4384
4385         memset(&attr, 0, sizeof(attr));
4386         attr.attr.max_sge = 1;
4387         attr.attr.max_wr = 1;
4388         attr.srq_type = IB_SRQT_XRC;
4389         attr.ext.cq = devr->c0;
4390         attr.ext.xrc.xrcd = devr->x0;
4391
4392         devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
4393         if (IS_ERR(devr->s0)) {
4394                 ret = PTR_ERR(devr->s0);
4395                 goto error4;
4396         }
4397         devr->s0->device        = &dev->ib_dev;
4398         devr->s0->pd            = devr->p0;
4399         devr->s0->uobject       = NULL;
4400         devr->s0->event_handler = NULL;
4401         devr->s0->srq_context   = NULL;
4402         devr->s0->srq_type      = IB_SRQT_XRC;
4403         devr->s0->ext.xrc.xrcd  = devr->x0;
4404         devr->s0->ext.cq        = devr->c0;
4405         atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
4406         atomic_inc(&devr->s0->ext.cq->usecnt);
4407         atomic_inc(&devr->p0->usecnt);
4408         atomic_set(&devr->s0->usecnt, 0);
4409
4410         memset(&attr, 0, sizeof(attr));
4411         attr.attr.max_sge = 1;
4412         attr.attr.max_wr = 1;
4413         attr.srq_type = IB_SRQT_BASIC;
4414         devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
4415         if (IS_ERR(devr->s1)) {
4416                 ret = PTR_ERR(devr->s1);
4417                 goto error5;
4418         }
4419         devr->s1->device        = &dev->ib_dev;
4420         devr->s1->pd            = devr->p0;
4421         devr->s1->uobject       = NULL;
4422         devr->s1->event_handler = NULL;
4423         devr->s1->srq_context   = NULL;
4424         devr->s1->srq_type      = IB_SRQT_BASIC;
4425         devr->s1->ext.cq        = devr->c0;
4426         atomic_inc(&devr->p0->usecnt);
4427         atomic_set(&devr->s1->usecnt, 0);
4428
4429         for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
4430                 INIT_WORK(&devr->ports[port].pkey_change_work,
4431                           pkey_change_handler);
4432                 devr->ports[port].devr = devr;
4433         }
4434
4435         return 0;
4436
4437 error5:
4438         mlx5_ib_destroy_srq(devr->s0);
4439 error4:
4440         mlx5_ib_dealloc_xrcd(devr->x1);
4441 error3:
4442         mlx5_ib_dealloc_xrcd(devr->x0);
4443 error2:
4444         mlx5_ib_destroy_cq(devr->c0);
4445 error1:
4446         mlx5_ib_dealloc_pd(devr->p0);
4447 error0:
4448         return ret;
4449 }
4450
4451 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
4452 {
4453         struct mlx5_ib_dev *dev =
4454                 container_of(devr, struct mlx5_ib_dev, devr);
4455         int port;
4456
4457         mlx5_ib_destroy_srq(devr->s1);
4458         mlx5_ib_destroy_srq(devr->s0);
4459         mlx5_ib_dealloc_xrcd(devr->x0);
4460         mlx5_ib_dealloc_xrcd(devr->x1);
4461         mlx5_ib_destroy_cq(devr->c0);
4462         mlx5_ib_dealloc_pd(devr->p0);
4463
4464         /* Make sure no change P_Key work items are still executing */
4465         for (port = 0; port < dev->num_ports; ++port)
4466                 cancel_work_sync(&devr->ports[port].pkey_change_work);
4467 }
4468
4469 static u32 get_core_cap_flags(struct ib_device *ibdev)
4470 {
4471         struct mlx5_ib_dev *dev = to_mdev(ibdev);
4472         enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
4473         u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
4474         u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
4475         bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
4476         u32 ret = 0;
4477
4478         if (ll == IB_LINK_LAYER_INFINIBAND)
4479                 return RDMA_CORE_PORT_IBA_IB;
4480
4481         if (raw_support)
4482                 ret = RDMA_CORE_PORT_RAW_PACKET;
4483
4484         if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
4485                 return ret;
4486
4487         if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
4488                 return ret;
4489
4490         if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
4491                 ret |= RDMA_CORE_PORT_IBA_ROCE;
4492
4493         if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
4494                 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
4495
4496         return ret;
4497 }
4498
4499 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
4500                                struct ib_port_immutable *immutable)
4501 {
4502         struct ib_port_attr attr;
4503         struct mlx5_ib_dev *dev = to_mdev(ibdev);
4504         enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
4505         int err;
4506
4507         immutable->core_cap_flags = get_core_cap_flags(ibdev);
4508
4509         err = ib_query_port(ibdev, port_num, &attr);
4510         if (err)
4511                 return err;
4512
4513         immutable->pkey_tbl_len = attr.pkey_tbl_len;
4514         immutable->gid_tbl_len = attr.gid_tbl_len;
4515         immutable->core_cap_flags = get_core_cap_flags(ibdev);
4516         if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
4517                 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
4518
4519         return 0;
4520 }
4521
4522 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
4523                                    struct ib_port_immutable *immutable)
4524 {
4525         struct ib_port_attr attr;
4526         int err;
4527
4528         immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
4529
4530         err = ib_query_port(ibdev, port_num, &attr);
4531         if (err)
4532                 return err;
4533
4534         immutable->pkey_tbl_len = attr.pkey_tbl_len;
4535         immutable->gid_tbl_len = attr.gid_tbl_len;
4536         immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
4537
4538         return 0;
4539 }
4540
4541 static void get_dev_fw_str(struct ib_device *ibdev, char *str)
4542 {
4543         struct mlx5_ib_dev *dev =
4544                 container_of(ibdev, struct mlx5_ib_dev, ib_dev);
4545         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
4546                  fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
4547                  fw_rev_sub(dev->mdev));
4548 }
4549
4550 static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
4551 {
4552         struct mlx5_core_dev *mdev = dev->mdev;
4553         struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
4554                                                                  MLX5_FLOW_NAMESPACE_LAG);
4555         struct mlx5_flow_table *ft;
4556         int err;
4557
4558         if (!ns || !mlx5_lag_is_active(mdev))
4559                 return 0;
4560
4561         err = mlx5_cmd_create_vport_lag(mdev);
4562         if (err)
4563                 return err;
4564
4565         ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
4566         if (IS_ERR(ft)) {
4567                 err = PTR_ERR(ft);
4568                 goto err_destroy_vport_lag;
4569         }
4570
4571         dev->flow_db->lag_demux_ft = ft;
4572         return 0;
4573
4574 err_destroy_vport_lag:
4575         mlx5_cmd_destroy_vport_lag(mdev);
4576         return err;
4577 }
4578
4579 static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
4580 {
4581         struct mlx5_core_dev *mdev = dev->mdev;
4582
4583         if (dev->flow_db->lag_demux_ft) {
4584                 mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
4585                 dev->flow_db->lag_demux_ft = NULL;
4586
4587                 mlx5_cmd_destroy_vport_lag(mdev);
4588         }
4589 }
4590
4591 static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
4592 {
4593         int err;
4594
4595         dev->roce[port_num].nb.notifier_call = mlx5_netdev_event;
4596         err = register_netdevice_notifier(&dev->roce[port_num].nb);
4597         if (err) {
4598                 dev->roce[port_num].nb.notifier_call = NULL;
4599                 return err;
4600         }
4601
4602         return 0;
4603 }
4604
4605 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
4606 {
4607         if (dev->roce[port_num].nb.notifier_call) {
4608                 unregister_netdevice_notifier(&dev->roce[port_num].nb);
4609                 dev->roce[port_num].nb.notifier_call = NULL;
4610         }
4611 }
4612
4613 static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
4614 {
4615         int err;
4616
4617         if (MLX5_CAP_GEN(dev->mdev, roce)) {
4618                 err = mlx5_nic_vport_enable_roce(dev->mdev);
4619                 if (err)
4620                         return err;
4621         }
4622
4623         err = mlx5_eth_lag_init(dev);
4624         if (err)
4625                 goto err_disable_roce;
4626
4627         return 0;
4628
4629 err_disable_roce:
4630         if (MLX5_CAP_GEN(dev->mdev, roce))
4631                 mlx5_nic_vport_disable_roce(dev->mdev);
4632
4633         return err;
4634 }
4635
4636 static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
4637 {
4638         mlx5_eth_lag_cleanup(dev);
4639         if (MLX5_CAP_GEN(dev->mdev, roce))
4640                 mlx5_nic_vport_disable_roce(dev->mdev);
4641 }
4642
4643 struct mlx5_ib_counter {
4644         const char *name;
4645         size_t offset;
4646 };
4647
4648 #define INIT_Q_COUNTER(_name)           \
4649         { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
4650
4651 static const struct mlx5_ib_counter basic_q_cnts[] = {
4652         INIT_Q_COUNTER(rx_write_requests),
4653         INIT_Q_COUNTER(rx_read_requests),
4654         INIT_Q_COUNTER(rx_atomic_requests),
4655         INIT_Q_COUNTER(out_of_buffer),
4656 };
4657
4658 static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
4659         INIT_Q_COUNTER(out_of_sequence),
4660 };
4661
4662 static const struct mlx5_ib_counter retrans_q_cnts[] = {
4663         INIT_Q_COUNTER(duplicate_request),
4664         INIT_Q_COUNTER(rnr_nak_retry_err),
4665         INIT_Q_COUNTER(packet_seq_err),
4666         INIT_Q_COUNTER(implied_nak_seq_err),
4667         INIT_Q_COUNTER(local_ack_timeout_err),
4668 };
4669
4670 #define INIT_CONG_COUNTER(_name)                \
4671         { .name = #_name, .offset =     \
4672                 MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
4673
4674 static const struct mlx5_ib_counter cong_cnts[] = {
4675         INIT_CONG_COUNTER(rp_cnp_ignored),
4676         INIT_CONG_COUNTER(rp_cnp_handled),
4677         INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
4678         INIT_CONG_COUNTER(np_cnp_sent),
4679 };
4680
4681 static const struct mlx5_ib_counter extended_err_cnts[] = {
4682         INIT_Q_COUNTER(resp_local_length_error),
4683         INIT_Q_COUNTER(resp_cqe_error),
4684         INIT_Q_COUNTER(req_cqe_error),
4685         INIT_Q_COUNTER(req_remote_invalid_request),
4686         INIT_Q_COUNTER(req_remote_access_errors),
4687         INIT_Q_COUNTER(resp_remote_access_errors),
4688         INIT_Q_COUNTER(resp_cqe_flush_error),
4689         INIT_Q_COUNTER(req_cqe_flush_error),
4690 };
4691
4692 static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
4693 {
4694         int i;
4695
4696         for (i = 0; i < dev->num_ports; i++) {
4697                 if (dev->port[i].cnts.set_id)
4698                         mlx5_core_dealloc_q_counter(dev->mdev,
4699                                                     dev->port[i].cnts.set_id);
4700                 kfree(dev->port[i].cnts.names);
4701                 kfree(dev->port[i].cnts.offsets);
4702         }
4703 }
4704
4705 static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
4706                                     struct mlx5_ib_counters *cnts)
4707 {
4708         u32 num_counters;
4709
4710         num_counters = ARRAY_SIZE(basic_q_cnts);
4711
4712         if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
4713                 num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
4714
4715         if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
4716                 num_counters += ARRAY_SIZE(retrans_q_cnts);
4717
4718         if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
4719                 num_counters += ARRAY_SIZE(extended_err_cnts);
4720
4721         cnts->num_q_counters = num_counters;
4722
4723         if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
4724                 cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
4725                 num_counters += ARRAY_SIZE(cong_cnts);
4726         }
4727
4728         cnts->names = kcalloc(num_counters, sizeof(cnts->names), GFP_KERNEL);
4729         if (!cnts->names)
4730                 return -ENOMEM;
4731
4732         cnts->offsets = kcalloc(num_counters,
4733                                 sizeof(cnts->offsets), GFP_KERNEL);
4734         if (!cnts->offsets)
4735                 goto err_names;
4736
4737         return 0;
4738
4739 err_names:
4740         kfree(cnts->names);
4741         cnts->names = NULL;
4742         return -ENOMEM;
4743 }
4744
4745 static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
4746                                   const char **names,
4747                                   size_t *offsets)
4748 {
4749         int i;
4750         int j = 0;
4751
4752         for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
4753                 names[j] = basic_q_cnts[i].name;
4754                 offsets[j] = basic_q_cnts[i].offset;
4755         }
4756
4757         if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
4758                 for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
4759                         names[j] = out_of_seq_q_cnts[i].name;
4760                         offsets[j] = out_of_seq_q_cnts[i].offset;
4761                 }
4762         }
4763
4764         if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
4765                 for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
4766                         names[j] = retrans_q_cnts[i].name;
4767                         offsets[j] = retrans_q_cnts[i].offset;
4768                 }
4769         }
4770
4771         if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
4772                 for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
4773                         names[j] = extended_err_cnts[i].name;
4774                         offsets[j] = extended_err_cnts[i].offset;
4775                 }
4776         }
4777
4778         if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
4779                 for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
4780                         names[j] = cong_cnts[i].name;
4781                         offsets[j] = cong_cnts[i].offset;
4782                 }
4783         }
4784 }
4785
4786 static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
4787 {
4788         int err = 0;
4789         int i;
4790
4791         for (i = 0; i < dev->num_ports; i++) {
4792                 err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
4793                 if (err)
4794                         goto err_alloc;
4795
4796                 mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
4797                                       dev->port[i].cnts.offsets);
4798
4799                 err = mlx5_core_alloc_q_counter(dev->mdev,
4800                                                 &dev->port[i].cnts.set_id);
4801                 if (err) {
4802                         mlx5_ib_warn(dev,
4803                                      "couldn't allocate queue counter for port %d, err %d\n",
4804                                      i + 1, err);
4805                         goto err_alloc;
4806                 }
4807                 dev->port[i].cnts.set_id_valid = true;
4808         }
4809
4810         return 0;
4811
4812 err_alloc:
4813         mlx5_ib_dealloc_counters(dev);
4814         return err;
4815 }
4816
4817 static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev,
4818                                                     u8 port_num)
4819 {
4820         struct mlx5_ib_dev *dev = to_mdev(ibdev);
4821         struct mlx5_ib_port *port = &dev->port[port_num - 1];
4822
4823         /* We support only per port stats */
4824         if (port_num == 0)
4825                 return NULL;
4826
4827         return rdma_alloc_hw_stats_struct(port->cnts.names,
4828                                           port->cnts.num_q_counters +
4829                                           port->cnts.num_cong_counters,
4830                                           RDMA_HW_STATS_DEFAULT_LIFESPAN);
4831 }
4832
4833 static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
4834                                     struct mlx5_ib_port *port,
4835                                     struct rdma_hw_stats *stats)
4836 {
4837         int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
4838         void *out;
4839         __be32 val;
4840         int ret, i;
4841
4842         out = kvzalloc(outlen, GFP_KERNEL);
4843         if (!out)
4844                 return -ENOMEM;
4845
4846         ret = mlx5_core_query_q_counter(mdev,
4847                                         port->cnts.set_id, 0,
4848                                         out, outlen);
4849         if (ret)
4850                 goto free;
4851
4852         for (i = 0; i < port->cnts.num_q_counters; i++) {
4853                 val = *(__be32 *)(out + port->cnts.offsets[i]);
4854                 stats->value[i] = (u64)be32_to_cpu(val);
4855         }
4856
4857 free:
4858         kvfree(out);
4859         return ret;
4860 }
4861
4862 static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
4863                                 struct rdma_hw_stats *stats,
4864                                 u8 port_num, int index)
4865 {
4866         struct mlx5_ib_dev *dev = to_mdev(ibdev);
4867         struct mlx5_ib_port *port = &dev->port[port_num - 1];
4868         struct mlx5_core_dev *mdev;
4869         int ret, num_counters;
4870         u8 mdev_port_num;
4871
4872         if (!stats)
4873                 return -EINVAL;
4874
4875         num_counters = port->cnts.num_q_counters + port->cnts.num_cong_counters;
4876
4877         /* q_counters are per IB device, query the master mdev */
4878         ret = mlx5_ib_query_q_counters(dev->mdev, port, stats);
4879         if (ret)
4880                 return ret;
4881
4882         if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
4883                 mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
4884                                                     &mdev_port_num);
4885                 if (!mdev) {
4886                         /* If port is not affiliated yet, its in down state
4887                          * which doesn't have any counters yet, so it would be
4888                          * zero. So no need to read from the HCA.
4889                          */
4890                         goto done;
4891                 }
4892                 ret = mlx5_lag_query_cong_counters(dev->mdev,
4893                                                    stats->value +
4894                                                    port->cnts.num_q_counters,
4895                                                    port->cnts.num_cong_counters,
4896                                                    port->cnts.offsets +
4897                                                    port->cnts.num_q_counters);
4898
4899                 mlx5_ib_put_native_port_mdev(dev, port_num);
4900                 if (ret)
4901                         return ret;
4902         }
4903
4904 done:
4905         return num_counters;
4906 }
4907
4908 static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
4909 {
4910         return mlx5_rdma_netdev_free(netdev);
4911 }
4912
4913 static struct net_device*
4914 mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
4915                           u8 port_num,
4916                           enum rdma_netdev_t type,
4917                           const char *name,
4918                           unsigned char name_assign_type,
4919                           void (*setup)(struct net_device *))
4920 {
4921         struct net_device *netdev;
4922         struct rdma_netdev *rn;
4923
4924         if (type != RDMA_NETDEV_IPOIB)
4925                 return ERR_PTR(-EOPNOTSUPP);
4926
4927         netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
4928                                         name, setup);
4929         if (likely(!IS_ERR_OR_NULL(netdev))) {
4930                 rn = netdev_priv(netdev);
4931                 rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev;
4932         }
4933         return netdev;
4934 }
4935
4936 static void delay_drop_debugfs_cleanup(struct mlx5_ib_dev *dev)
4937 {
4938         if (!dev->delay_drop.dbg)
4939                 return;
4940         debugfs_remove_recursive(dev->delay_drop.dbg->dir_debugfs);
4941         kfree(dev->delay_drop.dbg);
4942         dev->delay_drop.dbg = NULL;
4943 }
4944
4945 static void cancel_delay_drop(struct mlx5_ib_dev *dev)
4946 {
4947         if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
4948                 return;
4949
4950         cancel_work_sync(&dev->delay_drop.delay_drop_work);
4951         delay_drop_debugfs_cleanup(dev);
4952 }
4953
4954 static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
4955                                        size_t count, loff_t *pos)
4956 {
4957         struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
4958         char lbuf[20];
4959         int len;
4960
4961         len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
4962         return simple_read_from_buffer(buf, count, pos, lbuf, len);
4963 }
4964
4965 static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
4966                                         size_t count, loff_t *pos)
4967 {
4968         struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
4969         u32 timeout;
4970         u32 var;
4971
4972         if (kstrtouint_from_user(buf, count, 0, &var))
4973                 return -EFAULT;
4974
4975         timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
4976                         1000);
4977         if (timeout != var)
4978                 mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
4979                             timeout);
4980
4981         delay_drop->timeout = timeout;
4982
4983         return count;
4984 }
4985
4986 static const struct file_operations fops_delay_drop_timeout = {
4987         .owner  = THIS_MODULE,
4988         .open   = simple_open,
4989         .write  = delay_drop_timeout_write,
4990         .read   = delay_drop_timeout_read,
4991 };
4992
4993 static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
4994 {
4995         struct mlx5_ib_dbg_delay_drop *dbg;
4996
4997         if (!mlx5_debugfs_root)
4998                 return 0;
4999
5000         dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
5001         if (!dbg)
5002                 return -ENOMEM;
5003
5004         dev->delay_drop.dbg = dbg;
5005
5006         dbg->dir_debugfs =
5007                 debugfs_create_dir("delay_drop",
5008                                    dev->mdev->priv.dbg_root);
5009         if (!dbg->dir_debugfs)
5010                 goto out_debugfs;
5011
5012         dbg->events_cnt_debugfs =
5013                 debugfs_create_atomic_t("num_timeout_events", 0400,
5014                                         dbg->dir_debugfs,
5015                                         &dev->delay_drop.events_cnt);
5016         if (!dbg->events_cnt_debugfs)
5017                 goto out_debugfs;
5018
5019         dbg->rqs_cnt_debugfs =
5020                 debugfs_create_atomic_t("num_rqs", 0400,
5021                                         dbg->dir_debugfs,
5022                                         &dev->delay_drop.rqs_cnt);
5023         if (!dbg->rqs_cnt_debugfs)
5024                 goto out_debugfs;
5025
5026         dbg->timeout_debugfs =
5027                 debugfs_create_file("timeout", 0600,
5028                                     dbg->dir_debugfs,
5029                                     &dev->delay_drop,
5030                                     &fops_delay_drop_timeout);
5031         if (!dbg->timeout_debugfs)
5032                 goto out_debugfs;
5033
5034         return 0;
5035
5036 out_debugfs:
5037         delay_drop_debugfs_cleanup(dev);
5038         return -ENOMEM;
5039 }
5040
5041 static void init_delay_drop(struct mlx5_ib_dev *dev)
5042 {
5043         if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
5044                 return;
5045
5046         mutex_init(&dev->delay_drop.lock);
5047         dev->delay_drop.dev = dev;
5048         dev->delay_drop.activate = false;
5049         dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
5050         INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
5051         atomic_set(&dev->delay_drop.rqs_cnt, 0);
5052         atomic_set(&dev->delay_drop.events_cnt, 0);
5053
5054         if (delay_drop_debugfs_init(dev))
5055                 mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
5056 }
5057
5058 static const struct cpumask *
5059 mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
5060 {
5061         struct mlx5_ib_dev *dev = to_mdev(ibdev);
5062
5063         return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
5064 }
5065
5066 /* The mlx5_ib_multiport_mutex should be held when calling this function */
5067 static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
5068                                       struct mlx5_ib_multiport_info *mpi)
5069 {
5070         u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5071         struct mlx5_ib_port *port = &ibdev->port[port_num];
5072         int comps;
5073         int err;
5074         int i;
5075
5076         mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
5077
5078         spin_lock(&port->mp.mpi_lock);
5079         if (!mpi->ibdev) {
5080                 spin_unlock(&port->mp.mpi_lock);
5081                 return;
5082         }
5083         mpi->ibdev = NULL;
5084
5085         spin_unlock(&port->mp.mpi_lock);
5086         mlx5_remove_netdev_notifier(ibdev, port_num);
5087         spin_lock(&port->mp.mpi_lock);
5088
5089         comps = mpi->mdev_refcnt;
5090         if (comps) {
5091                 mpi->unaffiliate = true;
5092                 init_completion(&mpi->unref_comp);
5093                 spin_unlock(&port->mp.mpi_lock);
5094
5095                 for (i = 0; i < comps; i++)
5096                         wait_for_completion(&mpi->unref_comp);
5097
5098                 spin_lock(&port->mp.mpi_lock);
5099                 mpi->unaffiliate = false;
5100         }
5101
5102         port->mp.mpi = NULL;
5103
5104         list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
5105
5106         spin_unlock(&port->mp.mpi_lock);
5107
5108         err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
5109
5110         mlx5_ib_dbg(ibdev, "unaffiliated port %d\n", port_num + 1);
5111         /* Log an error, still needed to cleanup the pointers and add
5112          * it back to the list.
5113          */
5114         if (err)
5115                 mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
5116                             port_num + 1);
5117
5118         ibdev->roce[port_num].last_port_state = IB_PORT_DOWN;
5119 }
5120
5121 /* The mlx5_ib_multiport_mutex should be held when calling this function */
5122 static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
5123                                     struct mlx5_ib_multiport_info *mpi)
5124 {
5125         u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
5126         int err;
5127
5128         spin_lock(&ibdev->port[port_num].mp.mpi_lock);
5129         if (ibdev->port[port_num].mp.mpi) {
5130                 mlx5_ib_warn(ibdev, "port %d already affiliated.\n",
5131                              port_num + 1);
5132                 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5133                 return false;
5134         }
5135
5136         ibdev->port[port_num].mp.mpi = mpi;
5137         mpi->ibdev = ibdev;
5138         spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
5139
5140         err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
5141         if (err)
5142                 goto unbind;
5143
5144         err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
5145         if (err)
5146                 goto unbind;
5147
5148         err = mlx5_add_netdev_notifier(ibdev, port_num);
5149         if (err) {
5150                 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
5151                             port_num + 1);
5152                 goto unbind;
5153         }
5154
5155         err = mlx5_ib_init_cong_debugfs(ibdev, port_num);
5156         if (err)
5157                 goto unbind;
5158
5159         return true;
5160
5161 unbind:
5162         mlx5_ib_unbind_slave_port(ibdev, mpi);
5163         return false;
5164 }
5165
5166 static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
5167 {
5168         int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5169         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5170                                                           port_num + 1);
5171         struct mlx5_ib_multiport_info *mpi;
5172         int err;
5173         int i;
5174
5175         if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5176                 return 0;
5177
5178         err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
5179                                                      &dev->sys_image_guid);
5180         if (err)
5181                 return err;
5182
5183         err = mlx5_nic_vport_enable_roce(dev->mdev);
5184         if (err)
5185                 return err;
5186
5187         mutex_lock(&mlx5_ib_multiport_mutex);
5188         for (i = 0; i < dev->num_ports; i++) {
5189                 bool bound = false;
5190
5191                 /* build a stub multiport info struct for the native port. */
5192                 if (i == port_num) {
5193                         mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
5194                         if (!mpi) {
5195                                 mutex_unlock(&mlx5_ib_multiport_mutex);
5196                                 mlx5_nic_vport_disable_roce(dev->mdev);
5197                                 return -ENOMEM;
5198                         }
5199
5200                         mpi->is_master = true;
5201                         mpi->mdev = dev->mdev;
5202                         mpi->sys_image_guid = dev->sys_image_guid;
5203                         dev->port[i].mp.mpi = mpi;
5204                         mpi->ibdev = dev;
5205                         mpi = NULL;
5206                         continue;
5207                 }
5208
5209                 list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
5210                                     list) {
5211                         if (dev->sys_image_guid == mpi->sys_image_guid &&
5212                             (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
5213                                 bound = mlx5_ib_bind_slave_port(dev, mpi);
5214                         }
5215
5216                         if (bound) {
5217                                 dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
5218                                 mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
5219                                 list_del(&mpi->list);
5220                                 break;
5221                         }
5222                 }
5223                 if (!bound) {
5224                         get_port_caps(dev, i + 1);
5225                         mlx5_ib_dbg(dev, "no free port found for port %d\n",
5226                                     i + 1);
5227                 }
5228         }
5229
5230         list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
5231         mutex_unlock(&mlx5_ib_multiport_mutex);
5232         return err;
5233 }
5234
5235 static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
5236 {
5237         int port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5238         enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
5239                                                           port_num + 1);
5240         int i;
5241
5242         if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
5243                 return;
5244
5245         mutex_lock(&mlx5_ib_multiport_mutex);
5246         for (i = 0; i < dev->num_ports; i++) {
5247                 if (dev->port[i].mp.mpi) {
5248                         /* Destroy the native port stub */
5249                         if (i == port_num) {
5250                                 kfree(dev->port[i].mp.mpi);
5251                                 dev->port[i].mp.mpi = NULL;
5252                         } else {
5253                                 mlx5_ib_dbg(dev, "unbinding port_num: %d\n", i + 1);
5254                                 mlx5_ib_unbind_slave_port(dev, dev->port[i].mp.mpi);
5255                         }
5256                 }
5257         }
5258
5259         mlx5_ib_dbg(dev, "removing from devlist\n");
5260         list_del(&dev->ib_dev_list);
5261         mutex_unlock(&mlx5_ib_multiport_mutex);
5262
5263         mlx5_nic_vport_disable_roce(dev->mdev);
5264 }
5265
5266 ADD_UVERBS_ATTRIBUTES_SIMPLE(mlx5_ib_dm, UVERBS_OBJECT_DM,
5267                              UVERBS_METHOD_DM_ALLOC,
5268                              &UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
5269                                                   UVERBS_ATTR_TYPE(u64),
5270                                                   UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
5271                              &UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
5272                                                   UVERBS_ATTR_TYPE(u16),
5273                                                   UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
5274
5275 ADD_UVERBS_ATTRIBUTES_SIMPLE(mlx5_ib_flow_action, UVERBS_OBJECT_FLOW_ACTION,
5276                              UVERBS_METHOD_FLOW_ACTION_ESP_CREATE,
5277                              &UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
5278                                                  UVERBS_ATTR_TYPE(u64),
5279                                                  UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
5280
5281 #define NUM_TREES       2
5282 static int populate_specs_root(struct mlx5_ib_dev *dev)
5283 {
5284         const struct uverbs_object_tree_def *default_root[NUM_TREES + 1] = {
5285                 uverbs_default_get_objects()};
5286         size_t num_trees = 1;
5287
5288         if (mlx5_accel_ipsec_device_caps(dev->mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
5289             !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
5290                 default_root[num_trees++] = &mlx5_ib_flow_action;
5291
5292         if (MLX5_CAP_DEV_MEM(dev->mdev, memic) &&
5293             !WARN_ON(num_trees >= ARRAY_SIZE(default_root)))
5294                 default_root[num_trees++] = &mlx5_ib_dm;
5295
5296         dev->ib_dev.specs_root =
5297                 uverbs_alloc_spec_tree(num_trees, default_root);
5298
5299         return PTR_ERR_OR_ZERO(dev->ib_dev.specs_root);
5300 }
5301
5302 static void depopulate_specs_root(struct mlx5_ib_dev *dev)
5303 {
5304         uverbs_free_spec_tree(dev->ib_dev.specs_root);
5305 }
5306
5307 static int mlx5_ib_read_counters(struct ib_counters *counters,
5308                                  struct ib_counters_read_attr *read_attr,
5309                                  struct uverbs_attr_bundle *attrs)
5310 {
5311         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
5312         struct mlx5_read_counters_attr mread_attr = {};
5313         struct mlx5_ib_flow_counters_desc *desc;
5314         int ret, i;
5315
5316         mutex_lock(&mcounters->mcntrs_mutex);
5317         if (mcounters->cntrs_max_index > read_attr->ncounters) {
5318                 ret = -EINVAL;
5319                 goto err_bound;
5320         }
5321
5322         mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
5323                                  GFP_KERNEL);
5324         if (!mread_attr.out) {
5325                 ret = -ENOMEM;
5326                 goto err_bound;
5327         }
5328
5329         mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
5330         mread_attr.flags = read_attr->flags;
5331         ret = mcounters->read_counters(counters->device, &mread_attr);
5332         if (ret)
5333                 goto err_read;
5334
5335         /* do the pass over the counters data array to assign according to the
5336          * descriptions and indexing pairs
5337          */
5338         desc = mcounters->counters_data;
5339         for (i = 0; i < mcounters->ncounters; i++)
5340                 read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
5341
5342 err_read:
5343         kfree(mread_attr.out);
5344 err_bound:
5345         mutex_unlock(&mcounters->mcntrs_mutex);
5346         return ret;
5347 }
5348
5349 static int mlx5_ib_destroy_counters(struct ib_counters *counters)
5350 {
5351         struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
5352
5353         counters_clear_description(counters);
5354         if (mcounters->hw_cntrs_hndl)
5355                 mlx5_fc_destroy(to_mdev(counters->device)->mdev,
5356                                 mcounters->hw_cntrs_hndl);
5357
5358         kfree(mcounters);
5359
5360         return 0;
5361 }
5362
5363 static struct ib_counters *mlx5_ib_create_counters(struct ib_device *device,
5364                                                    struct uverbs_attr_bundle *attrs)
5365 {
5366         struct mlx5_ib_mcounters *mcounters;
5367
5368         mcounters = kzalloc(sizeof(*mcounters), GFP_KERNEL);
5369         if (!mcounters)
5370                 return ERR_PTR(-ENOMEM);
5371
5372         mutex_init(&mcounters->mcntrs_mutex);
5373
5374         return &mcounters->ibcntrs;
5375 }
5376
5377 void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
5378 {
5379         mlx5_ib_cleanup_multiport_master(dev);
5380 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
5381         cleanup_srcu_struct(&dev->mr_srcu);
5382 #endif
5383         kfree(dev->port);
5384 }
5385
5386 int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
5387 {
5388         struct mlx5_core_dev *mdev = dev->mdev;
5389         const char *name;
5390         int err;
5391         int i;
5392
5393         dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
5394                             GFP_KERNEL);
5395         if (!dev->port)
5396                 return -ENOMEM;
5397
5398         for (i = 0; i < dev->num_ports; i++) {
5399                 spin_lock_init(&dev->port[i].mp.mpi_lock);
5400                 rwlock_init(&dev->roce[i].netdev_lock);
5401         }
5402
5403         err = mlx5_ib_init_multiport_master(dev);
5404         if (err)
5405                 goto err_free_port;
5406
5407         if (!mlx5_core_mp_enabled(mdev)) {
5408                 for (i = 1; i <= dev->num_ports; i++) {
5409                         err = get_port_caps(dev, i);
5410                         if (err)
5411                                 break;
5412                 }
5413         } else {
5414                 err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
5415         }
5416         if (err)
5417                 goto err_mp;
5418
5419         if (mlx5_use_mad_ifc(dev))
5420                 get_ext_port_caps(dev);
5421
5422         if (!mlx5_lag_is_active(mdev))
5423                 name = "mlx5_%d";
5424         else
5425                 name = "mlx5_bond_%d";
5426
5427         strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
5428         dev->ib_dev.owner               = THIS_MODULE;
5429         dev->ib_dev.node_type           = RDMA_NODE_IB_CA;
5430         dev->ib_dev.local_dma_lkey      = 0 /* not supported for now */;
5431         dev->ib_dev.phys_port_cnt       = dev->num_ports;
5432         dev->ib_dev.num_comp_vectors    =
5433                 dev->mdev->priv.eq_table.num_comp_vectors;
5434         dev->ib_dev.dev.parent          = &mdev->pdev->dev;
5435
5436         mutex_init(&dev->cap_mask_mutex);
5437         INIT_LIST_HEAD(&dev->qp_list);
5438         spin_lock_init(&dev->reset_flow_resource_lock);
5439
5440         spin_lock_init(&dev->memic.memic_lock);
5441         dev->memic.dev = mdev;
5442
5443 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
5444         err = init_srcu_struct(&dev->mr_srcu);
5445         if (err)
5446                 goto err_free_port;
5447 #endif
5448
5449         return 0;
5450 err_mp:
5451         mlx5_ib_cleanup_multiport_master(dev);
5452
5453 err_free_port:
5454         kfree(dev->port);
5455
5456         return -ENOMEM;
5457 }
5458
5459 static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
5460 {
5461         dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
5462
5463         if (!dev->flow_db)
5464                 return -ENOMEM;
5465
5466         mutex_init(&dev->flow_db->lock);
5467
5468         return 0;
5469 }
5470
5471 int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev)
5472 {
5473         struct mlx5_ib_dev *nic_dev;
5474
5475         nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch);
5476
5477         if (!nic_dev)
5478                 return -EINVAL;
5479
5480         dev->flow_db = nic_dev->flow_db;
5481
5482         return 0;
5483 }
5484
5485 static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
5486 {
5487         kfree(dev->flow_db);
5488 }
5489
5490 int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
5491 {
5492         struct mlx5_core_dev *mdev = dev->mdev;
5493         int err;
5494
5495         dev->ib_dev.uverbs_abi_ver      = MLX5_IB_UVERBS_ABI_VERSION;
5496         dev->ib_dev.uverbs_cmd_mask     =
5497                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
5498                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
5499                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
5500                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
5501                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
5502                 (1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
5503                 (1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
5504                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
5505                 (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
5506                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
5507                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
5508                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
5509                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
5510                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
5511                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
5512                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
5513                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
5514                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
5515                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
5516                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
5517                 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
5518                 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
5519                 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
5520                 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
5521                 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
5522                 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
5523         dev->ib_dev.uverbs_ex_cmd_mask =
5524                 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)     |
5525                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)        |
5526                 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP)        |
5527                 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP)        |
5528                 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
5529
5530         dev->ib_dev.query_device        = mlx5_ib_query_device;
5531         dev->ib_dev.get_link_layer      = mlx5_ib_port_link_layer;
5532         dev->ib_dev.query_gid           = mlx5_ib_query_gid;
5533         dev->ib_dev.add_gid             = mlx5_ib_add_gid;
5534         dev->ib_dev.del_gid             = mlx5_ib_del_gid;
5535         dev->ib_dev.query_pkey          = mlx5_ib_query_pkey;
5536         dev->ib_dev.modify_device       = mlx5_ib_modify_device;
5537         dev->ib_dev.modify_port         = mlx5_ib_modify_port;
5538         dev->ib_dev.alloc_ucontext      = mlx5_ib_alloc_ucontext;
5539         dev->ib_dev.dealloc_ucontext    = mlx5_ib_dealloc_ucontext;
5540         dev->ib_dev.mmap                = mlx5_ib_mmap;
5541         dev->ib_dev.alloc_pd            = mlx5_ib_alloc_pd;
5542         dev->ib_dev.dealloc_pd          = mlx5_ib_dealloc_pd;
5543         dev->ib_dev.create_ah           = mlx5_ib_create_ah;
5544         dev->ib_dev.query_ah            = mlx5_ib_query_ah;
5545         dev->ib_dev.destroy_ah          = mlx5_ib_destroy_ah;
5546         dev->ib_dev.create_srq          = mlx5_ib_create_srq;
5547         dev->ib_dev.modify_srq          = mlx5_ib_modify_srq;
5548         dev->ib_dev.query_srq           = mlx5_ib_query_srq;
5549         dev->ib_dev.destroy_srq         = mlx5_ib_destroy_srq;
5550         dev->ib_dev.post_srq_recv       = mlx5_ib_post_srq_recv;
5551         dev->ib_dev.create_qp           = mlx5_ib_create_qp;
5552         dev->ib_dev.modify_qp           = mlx5_ib_modify_qp;
5553         dev->ib_dev.query_qp            = mlx5_ib_query_qp;
5554         dev->ib_dev.destroy_qp          = mlx5_ib_destroy_qp;
5555         dev->ib_dev.post_send           = mlx5_ib_post_send;
5556         dev->ib_dev.post_recv           = mlx5_ib_post_recv;
5557         dev->ib_dev.create_cq           = mlx5_ib_create_cq;
5558         dev->ib_dev.modify_cq           = mlx5_ib_modify_cq;
5559         dev->ib_dev.resize_cq           = mlx5_ib_resize_cq;
5560         dev->ib_dev.destroy_cq          = mlx5_ib_destroy_cq;
5561         dev->ib_dev.poll_cq             = mlx5_ib_poll_cq;
5562         dev->ib_dev.req_notify_cq       = mlx5_ib_arm_cq;
5563         dev->ib_dev.get_dma_mr          = mlx5_ib_get_dma_mr;
5564         dev->ib_dev.reg_user_mr         = mlx5_ib_reg_user_mr;
5565         dev->ib_dev.rereg_user_mr       = mlx5_ib_rereg_user_mr;
5566         dev->ib_dev.dereg_mr            = mlx5_ib_dereg_mr;
5567         dev->ib_dev.attach_mcast        = mlx5_ib_mcg_attach;
5568         dev->ib_dev.detach_mcast        = mlx5_ib_mcg_detach;
5569         dev->ib_dev.process_mad         = mlx5_ib_process_mad;
5570         dev->ib_dev.alloc_mr            = mlx5_ib_alloc_mr;
5571         dev->ib_dev.map_mr_sg           = mlx5_ib_map_mr_sg;
5572         dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
5573         dev->ib_dev.get_dev_fw_str      = get_dev_fw_str;
5574         dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
5575         if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
5576                 dev->ib_dev.alloc_rdma_netdev   = mlx5_ib_alloc_rdma_netdev;
5577
5578         if (mlx5_core_is_pf(mdev)) {
5579                 dev->ib_dev.get_vf_config       = mlx5_ib_get_vf_config;
5580                 dev->ib_dev.set_vf_link_state   = mlx5_ib_set_vf_link_state;
5581                 dev->ib_dev.get_vf_stats        = mlx5_ib_get_vf_stats;
5582                 dev->ib_dev.set_vf_guid         = mlx5_ib_set_vf_guid;
5583         }
5584
5585         dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
5586
5587         dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
5588
5589         if (MLX5_CAP_GEN(mdev, imaicl)) {
5590                 dev->ib_dev.alloc_mw            = mlx5_ib_alloc_mw;
5591                 dev->ib_dev.dealloc_mw          = mlx5_ib_dealloc_mw;
5592                 dev->ib_dev.uverbs_cmd_mask |=
5593                         (1ull << IB_USER_VERBS_CMD_ALLOC_MW)    |
5594                         (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
5595         }
5596
5597         if (MLX5_CAP_GEN(mdev, xrc)) {
5598                 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
5599                 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
5600                 dev->ib_dev.uverbs_cmd_mask |=
5601                         (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
5602                         (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
5603         }
5604
5605         if (MLX5_CAP_DEV_MEM(mdev, memic)) {
5606                 dev->ib_dev.alloc_dm = mlx5_ib_alloc_dm;
5607                 dev->ib_dev.dealloc_dm = mlx5_ib_dealloc_dm;
5608                 dev->ib_dev.reg_dm_mr = mlx5_ib_reg_dm_mr;
5609         }
5610
5611         dev->ib_dev.create_flow = mlx5_ib_create_flow;
5612         dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
5613         dev->ib_dev.uverbs_ex_cmd_mask |=
5614                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
5615                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
5616         dev->ib_dev.create_flow_action_esp = mlx5_ib_create_flow_action_esp;
5617         dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action;
5618         dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp;
5619         dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
5620         dev->ib_dev.create_counters = mlx5_ib_create_counters;
5621         dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters;
5622         dev->ib_dev.read_counters = mlx5_ib_read_counters;
5623
5624         err = init_node_data(dev);
5625         if (err)
5626                 return err;
5627
5628         if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
5629             (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
5630              MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
5631                 mutex_init(&dev->lb_mutex);
5632
5633         return 0;
5634 }
5635
5636 static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
5637 {
5638         dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
5639         dev->ib_dev.query_port          = mlx5_ib_query_port;
5640
5641         return 0;
5642 }
5643
5644 int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
5645 {
5646         dev->ib_dev.get_port_immutable  = mlx5_port_rep_immutable;
5647         dev->ib_dev.query_port          = mlx5_ib_rep_query_port;
5648
5649         return 0;
5650 }
5651
5652 static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev,
5653                                           u8 port_num)
5654 {
5655         int i;
5656
5657         for (i = 0; i < dev->num_ports; i++) {
5658                 dev->roce[i].dev = dev;
5659                 dev->roce[i].native_port_num = i + 1;
5660                 dev->roce[i].last_port_state = IB_PORT_DOWN;
5661         }
5662
5663         dev->ib_dev.get_netdev  = mlx5_ib_get_netdev;
5664         dev->ib_dev.create_wq    = mlx5_ib_create_wq;
5665         dev->ib_dev.modify_wq    = mlx5_ib_modify_wq;
5666         dev->ib_dev.destroy_wq   = mlx5_ib_destroy_wq;
5667         dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
5668         dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
5669
5670         dev->ib_dev.uverbs_ex_cmd_mask |=
5671                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
5672                         (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
5673                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
5674                         (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
5675                         (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
5676
5677         return mlx5_add_netdev_notifier(dev, port_num);
5678 }
5679
5680 static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
5681 {
5682         u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5683
5684         mlx5_remove_netdev_notifier(dev, port_num);
5685 }
5686
5687 int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
5688 {
5689         struct mlx5_core_dev *mdev = dev->mdev;
5690         enum rdma_link_layer ll;
5691         int port_type_cap;
5692         int err = 0;
5693         u8 port_num;
5694
5695         port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5696         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
5697         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
5698
5699         if (ll == IB_LINK_LAYER_ETHERNET)
5700                 err = mlx5_ib_stage_common_roce_init(dev, port_num);
5701
5702         return err;
5703 }
5704
5705 void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
5706 {
5707         mlx5_ib_stage_common_roce_cleanup(dev);
5708 }
5709
5710 static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
5711 {
5712         struct mlx5_core_dev *mdev = dev->mdev;
5713         enum rdma_link_layer ll;
5714         int port_type_cap;
5715         u8 port_num;
5716         int err;
5717
5718         port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5719         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
5720         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
5721
5722         if (ll == IB_LINK_LAYER_ETHERNET) {
5723                 err = mlx5_ib_stage_common_roce_init(dev, port_num);
5724                 if (err)
5725                         return err;
5726
5727                 err = mlx5_enable_eth(dev, port_num);
5728                 if (err)
5729                         goto cleanup;
5730         }
5731
5732         return 0;
5733 cleanup:
5734         mlx5_ib_stage_common_roce_cleanup(dev);
5735
5736         return err;
5737 }
5738
5739 static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
5740 {
5741         struct mlx5_core_dev *mdev = dev->mdev;
5742         enum rdma_link_layer ll;
5743         int port_type_cap;
5744         u8 port_num;
5745
5746         port_num = mlx5_core_native_port_num(dev->mdev) - 1;
5747         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
5748         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
5749
5750         if (ll == IB_LINK_LAYER_ETHERNET) {
5751                 mlx5_disable_eth(dev);
5752                 mlx5_ib_stage_common_roce_cleanup(dev);
5753         }
5754 }
5755
5756 int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
5757 {
5758         return create_dev_resources(&dev->devr);
5759 }
5760
5761 void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
5762 {
5763         destroy_dev_resources(&dev->devr);
5764 }
5765
5766 static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
5767 {
5768         mlx5_ib_internal_fill_odp_caps(dev);
5769
5770         return mlx5_ib_odp_init_one(dev);
5771 }
5772
5773 int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
5774 {
5775         if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
5776                 dev->ib_dev.get_hw_stats        = mlx5_ib_get_hw_stats;
5777                 dev->ib_dev.alloc_hw_stats      = mlx5_ib_alloc_hw_stats;
5778
5779                 return mlx5_ib_alloc_counters(dev);
5780         }
5781
5782         return 0;
5783 }
5784
5785 void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
5786 {
5787         if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
5788                 mlx5_ib_dealloc_counters(dev);
5789 }
5790
5791 static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
5792 {
5793         return mlx5_ib_init_cong_debugfs(dev,
5794                                          mlx5_core_native_port_num(dev->mdev) - 1);
5795 }
5796
5797 static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
5798 {
5799         mlx5_ib_cleanup_cong_debugfs(dev,
5800                                      mlx5_core_native_port_num(dev->mdev) - 1);
5801 }
5802
5803 static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
5804 {
5805         dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
5806         return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
5807 }
5808
5809 static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
5810 {
5811         mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
5812 }
5813
5814 int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
5815 {
5816         int err;
5817
5818         err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
5819         if (err)
5820                 return err;
5821
5822         err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
5823         if (err)
5824                 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
5825
5826         return err;
5827 }
5828
5829 void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
5830 {
5831         mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
5832         mlx5_free_bfreg(dev->mdev, &dev->bfreg);
5833 }
5834
5835 static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev)
5836 {
5837         return populate_specs_root(dev);
5838 }
5839
5840 int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
5841 {
5842         return ib_register_device(&dev->ib_dev, NULL);
5843 }
5844
5845 static void mlx5_ib_stage_depopulate_specs(struct mlx5_ib_dev *dev)
5846 {
5847         depopulate_specs_root(dev);
5848 }
5849
5850 void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
5851 {
5852         destroy_umrc_res(dev);
5853 }
5854
5855 void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
5856 {
5857         ib_unregister_device(&dev->ib_dev);
5858 }
5859
5860 int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
5861 {
5862         return create_umr_res(dev);
5863 }
5864
5865 static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
5866 {
5867         init_delay_drop(dev);
5868
5869         return 0;
5870 }
5871
5872 static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
5873 {
5874         cancel_delay_drop(dev);
5875 }
5876
5877 int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
5878 {
5879         int err;
5880         int i;
5881
5882         for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
5883                 err = device_create_file(&dev->ib_dev.dev,
5884                                          mlx5_class_attributes[i]);
5885                 if (err)
5886                         return err;
5887         }
5888
5889         return 0;
5890 }
5891
5892 static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
5893 {
5894         mlx5_ib_register_vport_reps(dev);
5895
5896         return 0;
5897 }
5898
5899 static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
5900 {
5901         mlx5_ib_unregister_vport_reps(dev);
5902 }
5903
5904 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
5905                       const struct mlx5_ib_profile *profile,
5906                       int stage)
5907 {
5908         /* Number of stages to cleanup */
5909         while (stage) {
5910                 stage--;
5911                 if (profile->stage[stage].cleanup)
5912                         profile->stage[stage].cleanup(dev);
5913         }
5914
5915         ib_dealloc_device((struct ib_device *)dev);
5916 }
5917
5918 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
5919
5920 void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
5921                     const struct mlx5_ib_profile *profile)
5922 {
5923         int err;
5924         int i;
5925
5926         printk_once(KERN_INFO "%s", mlx5_version);
5927
5928         for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
5929                 if (profile->stage[i].init) {
5930                         err = profile->stage[i].init(dev);
5931                         if (err)
5932                                 goto err_out;
5933                 }
5934         }
5935
5936         dev->profile = profile;
5937         dev->ib_active = true;
5938
5939         return dev;
5940
5941 err_out:
5942         __mlx5_ib_remove(dev, profile, i);
5943
5944         return NULL;
5945 }
5946
5947 static const struct mlx5_ib_profile pf_profile = {
5948         STAGE_CREATE(MLX5_IB_STAGE_INIT,
5949                      mlx5_ib_stage_init_init,
5950                      mlx5_ib_stage_init_cleanup),
5951         STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
5952                      mlx5_ib_stage_flow_db_init,
5953                      mlx5_ib_stage_flow_db_cleanup),
5954         STAGE_CREATE(MLX5_IB_STAGE_CAPS,
5955                      mlx5_ib_stage_caps_init,
5956                      NULL),
5957         STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
5958                      mlx5_ib_stage_non_default_cb,
5959                      NULL),
5960         STAGE_CREATE(MLX5_IB_STAGE_ROCE,
5961                      mlx5_ib_stage_roce_init,
5962                      mlx5_ib_stage_roce_cleanup),
5963         STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
5964                      mlx5_ib_stage_dev_res_init,
5965                      mlx5_ib_stage_dev_res_cleanup),
5966         STAGE_CREATE(MLX5_IB_STAGE_ODP,
5967                      mlx5_ib_stage_odp_init,
5968                      NULL),
5969         STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
5970                      mlx5_ib_stage_counters_init,
5971                      mlx5_ib_stage_counters_cleanup),
5972         STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
5973                      mlx5_ib_stage_cong_debugfs_init,
5974                      mlx5_ib_stage_cong_debugfs_cleanup),
5975         STAGE_CREATE(MLX5_IB_STAGE_UAR,
5976                      mlx5_ib_stage_uar_init,
5977                      mlx5_ib_stage_uar_cleanup),
5978         STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5979                      mlx5_ib_stage_bfrag_init,
5980                      mlx5_ib_stage_bfrag_cleanup),
5981         STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5982                      NULL,
5983                      mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5984         STAGE_CREATE(MLX5_IB_STAGE_SPECS,
5985                      mlx5_ib_stage_populate_specs,
5986                      mlx5_ib_stage_depopulate_specs),
5987         STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5988                      mlx5_ib_stage_ib_reg_init,
5989                      mlx5_ib_stage_ib_reg_cleanup),
5990         STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5991                      mlx5_ib_stage_post_ib_reg_umr_init,
5992                      NULL),
5993         STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
5994                      mlx5_ib_stage_delay_drop_init,
5995                      mlx5_ib_stage_delay_drop_cleanup),
5996         STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
5997                      mlx5_ib_stage_class_attr_init,
5998                      NULL),
5999 };
6000
6001 static const struct mlx5_ib_profile nic_rep_profile = {
6002         STAGE_CREATE(MLX5_IB_STAGE_INIT,
6003                      mlx5_ib_stage_init_init,
6004                      mlx5_ib_stage_init_cleanup),
6005         STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
6006                      mlx5_ib_stage_flow_db_init,
6007                      mlx5_ib_stage_flow_db_cleanup),
6008         STAGE_CREATE(MLX5_IB_STAGE_CAPS,
6009                      mlx5_ib_stage_caps_init,
6010                      NULL),
6011         STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
6012                      mlx5_ib_stage_rep_non_default_cb,
6013                      NULL),
6014         STAGE_CREATE(MLX5_IB_STAGE_ROCE,
6015                      mlx5_ib_stage_rep_roce_init,
6016                      mlx5_ib_stage_rep_roce_cleanup),
6017         STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
6018                      mlx5_ib_stage_dev_res_init,
6019                      mlx5_ib_stage_dev_res_cleanup),
6020         STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
6021                      mlx5_ib_stage_counters_init,
6022                      mlx5_ib_stage_counters_cleanup),
6023         STAGE_CREATE(MLX5_IB_STAGE_UAR,
6024                      mlx5_ib_stage_uar_init,
6025                      mlx5_ib_stage_uar_cleanup),
6026         STAGE_CREATE(MLX5_IB_STAGE_BFREG,
6027                      mlx5_ib_stage_bfrag_init,
6028                      mlx5_ib_stage_bfrag_cleanup),
6029         STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
6030                      NULL,
6031                      mlx5_ib_stage_pre_ib_reg_umr_cleanup),
6032         STAGE_CREATE(MLX5_IB_STAGE_SPECS,
6033                      mlx5_ib_stage_populate_specs,
6034                      mlx5_ib_stage_depopulate_specs),
6035         STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
6036                      mlx5_ib_stage_ib_reg_init,
6037                      mlx5_ib_stage_ib_reg_cleanup),
6038         STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
6039                      mlx5_ib_stage_post_ib_reg_umr_init,
6040                      NULL),
6041         STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
6042                      mlx5_ib_stage_class_attr_init,
6043                      NULL),
6044         STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
6045                      mlx5_ib_stage_rep_reg_init,
6046                      mlx5_ib_stage_rep_reg_cleanup),
6047 };
6048
6049 static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
6050 {
6051         struct mlx5_ib_multiport_info *mpi;
6052         struct mlx5_ib_dev *dev;
6053         bool bound = false;
6054         int err;
6055
6056         mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
6057         if (!mpi)
6058                 return NULL;
6059
6060         mpi->mdev = mdev;
6061
6062         err = mlx5_query_nic_vport_system_image_guid(mdev,
6063                                                      &mpi->sys_image_guid);
6064         if (err) {
6065                 kfree(mpi);
6066                 return NULL;
6067         }
6068
6069         mutex_lock(&mlx5_ib_multiport_mutex);
6070         list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
6071                 if (dev->sys_image_guid == mpi->sys_image_guid)
6072                         bound = mlx5_ib_bind_slave_port(dev, mpi);
6073
6074                 if (bound) {
6075                         rdma_roce_rescan_device(&dev->ib_dev);
6076                         break;
6077                 }
6078         }
6079
6080         if (!bound) {
6081                 list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
6082                 dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
6083         } else {
6084                 mlx5_ib_dbg(dev, "bound port %u\n", port_num + 1);
6085         }
6086         mutex_unlock(&mlx5_ib_multiport_mutex);
6087
6088         return mpi;
6089 }
6090
6091 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
6092 {
6093         enum rdma_link_layer ll;
6094         struct mlx5_ib_dev *dev;
6095         int port_type_cap;
6096
6097         printk_once(KERN_INFO "%s", mlx5_version);
6098
6099         port_type_cap = MLX5_CAP_GEN(mdev, port_type);
6100         ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
6101
6102         if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET) {
6103                 u8 port_num = mlx5_core_native_port_num(mdev) - 1;
6104
6105                 return mlx5_ib_add_slave_port(mdev, port_num);
6106         }
6107
6108         dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
6109         if (!dev)
6110                 return NULL;
6111
6112         dev->mdev = mdev;
6113         dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6114                              MLX5_CAP_GEN(mdev, num_vhca_ports));
6115
6116         if (MLX5_ESWITCH_MANAGER(mdev) &&
6117             mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
6118                 dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
6119
6120                 return __mlx5_ib_add(dev, &nic_rep_profile);
6121         }
6122
6123         return __mlx5_ib_add(dev, &pf_profile);
6124 }
6125
6126 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
6127 {
6128         struct mlx5_ib_multiport_info *mpi;
6129         struct mlx5_ib_dev *dev;
6130
6131         if (mlx5_core_is_mp_slave(mdev)) {
6132                 mpi = context;
6133                 mutex_lock(&mlx5_ib_multiport_mutex);
6134                 if (mpi->ibdev)
6135                         mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
6136                 list_del(&mpi->list);
6137                 mutex_unlock(&mlx5_ib_multiport_mutex);
6138                 return;
6139         }
6140
6141         dev = context;
6142         __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
6143 }
6144
6145 static struct mlx5_interface mlx5_ib_interface = {
6146         .add            = mlx5_ib_add,
6147         .remove         = mlx5_ib_remove,
6148         .event          = mlx5_ib_event,
6149 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
6150         .pfault         = mlx5_ib_pfault,
6151 #endif
6152         .protocol       = MLX5_INTERFACE_PROTOCOL_IB,
6153 };
6154
6155 unsigned long mlx5_ib_get_xlt_emergency_page(void)
6156 {
6157         mutex_lock(&xlt_emergency_page_mutex);
6158         return xlt_emergency_page;
6159 }
6160
6161 void mlx5_ib_put_xlt_emergency_page(void)
6162 {
6163         mutex_unlock(&xlt_emergency_page_mutex);
6164 }
6165
6166 static int __init mlx5_ib_init(void)
6167 {
6168         int err;
6169
6170         xlt_emergency_page = __get_free_page(GFP_KERNEL);
6171         if (!xlt_emergency_page)
6172                 return -ENOMEM;
6173
6174         mutex_init(&xlt_emergency_page_mutex);
6175
6176         mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
6177         if (!mlx5_ib_event_wq) {
6178                 free_page(xlt_emergency_page);
6179                 return -ENOMEM;
6180         }
6181
6182         mlx5_ib_odp_init();
6183
6184         err = mlx5_register_interface(&mlx5_ib_interface);
6185
6186         return err;
6187 }
6188
6189 static void __exit mlx5_ib_cleanup(void)
6190 {
6191         mlx5_unregister_interface(&mlx5_ib_interface);
6192         destroy_workqueue(mlx5_ib_event_wq);
6193         mutex_destroy(&xlt_emergency_page_mutex);
6194         free_page(xlt_emergency_page);
6195 }
6196
6197 module_init(mlx5_ib_init);
6198 module_exit(mlx5_ib_cleanup);