2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/sched.h>
42 #include <rdma/ib_user_verbs.h>
43 #include <linux/mlx5/vport.h>
44 #include <rdma/ib_smi.h>
45 #include <rdma/ib_umem.h>
49 #define DRIVER_NAME "mlx5_ib"
50 #define DRIVER_VERSION "2.2-1"
51 #define DRIVER_RELDATE "Feb 2014"
53 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
54 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRIVER_VERSION);
58 static int deprecated_prof_sel = 2;
59 module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
60 MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
62 static char mlx5_version[] =
63 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
64 DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
66 static enum rdma_link_layer
67 mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
69 switch (port_type_cap) {
70 case MLX5_CAP_PORT_TYPE_IB:
71 return IB_LINK_LAYER_INFINIBAND;
72 case MLX5_CAP_PORT_TYPE_ETH:
73 return IB_LINK_LAYER_ETHERNET;
75 return IB_LINK_LAYER_UNSPECIFIED;
79 static enum rdma_link_layer
80 mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
82 struct mlx5_ib_dev *dev = to_mdev(device);
83 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
85 return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
88 static int mlx5_netdev_event(struct notifier_block *this,
89 unsigned long event, void *ptr)
91 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
92 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
95 if ((event != NETDEV_UNREGISTER) && (event != NETDEV_REGISTER))
98 write_lock(&ibdev->roce.netdev_lock);
99 if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
100 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev;
101 write_unlock(&ibdev->roce.netdev_lock);
106 static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
109 struct mlx5_ib_dev *ibdev = to_mdev(device);
110 struct net_device *ndev;
112 /* Ensure ndev does not disappear before we invoke dev_hold()
114 read_lock(&ibdev->roce.netdev_lock);
115 ndev = ibdev->roce.netdev;
118 read_unlock(&ibdev->roce.netdev_lock);
123 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
125 return !dev->mdev->issi;
129 MLX5_VPORT_ACCESS_METHOD_MAD,
130 MLX5_VPORT_ACCESS_METHOD_HCA,
131 MLX5_VPORT_ACCESS_METHOD_NIC,
134 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
136 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
137 return MLX5_VPORT_ACCESS_METHOD_MAD;
139 if (mlx5_ib_port_link_layer(ibdev, 1) ==
140 IB_LINK_LAYER_ETHERNET)
141 return MLX5_VPORT_ACCESS_METHOD_NIC;
143 return MLX5_VPORT_ACCESS_METHOD_HCA;
146 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
147 __be64 *sys_image_guid)
149 struct mlx5_ib_dev *dev = to_mdev(ibdev);
150 struct mlx5_core_dev *mdev = dev->mdev;
154 switch (mlx5_get_vport_access_method(ibdev)) {
155 case MLX5_VPORT_ACCESS_METHOD_MAD:
156 return mlx5_query_mad_ifc_system_image_guid(ibdev,
159 case MLX5_VPORT_ACCESS_METHOD_HCA:
160 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
162 *sys_image_guid = cpu_to_be64(tmp);
170 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
173 struct mlx5_ib_dev *dev = to_mdev(ibdev);
174 struct mlx5_core_dev *mdev = dev->mdev;
176 switch (mlx5_get_vport_access_method(ibdev)) {
177 case MLX5_VPORT_ACCESS_METHOD_MAD:
178 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
180 case MLX5_VPORT_ACCESS_METHOD_HCA:
181 case MLX5_VPORT_ACCESS_METHOD_NIC:
182 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
191 static int mlx5_query_vendor_id(struct ib_device *ibdev,
194 struct mlx5_ib_dev *dev = to_mdev(ibdev);
196 switch (mlx5_get_vport_access_method(ibdev)) {
197 case MLX5_VPORT_ACCESS_METHOD_MAD:
198 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
200 case MLX5_VPORT_ACCESS_METHOD_HCA:
201 case MLX5_VPORT_ACCESS_METHOD_NIC:
202 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
209 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
215 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
216 case MLX5_VPORT_ACCESS_METHOD_MAD:
217 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
219 case MLX5_VPORT_ACCESS_METHOD_HCA:
220 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
222 *node_guid = cpu_to_be64(tmp);
230 struct mlx5_reg_node_desc {
234 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
236 struct mlx5_reg_node_desc in;
238 if (mlx5_use_mad_ifc(dev))
239 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
241 memset(&in, 0, sizeof(in));
243 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
244 sizeof(struct mlx5_reg_node_desc),
245 MLX5_REG_NODE_DESC, 0, 0);
248 static int mlx5_ib_query_device(struct ib_device *ibdev,
249 struct ib_device_attr *props,
250 struct ib_udata *uhw)
252 struct mlx5_ib_dev *dev = to_mdev(ibdev);
253 struct mlx5_core_dev *mdev = dev->mdev;
257 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
259 if (uhw->inlen || uhw->outlen)
262 memset(props, 0, sizeof(*props));
263 err = mlx5_query_system_image_guid(ibdev,
264 &props->sys_image_guid);
268 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
272 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
276 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
277 (fw_rev_min(dev->mdev) << 16) |
278 fw_rev_sub(dev->mdev);
279 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
280 IB_DEVICE_PORT_ACTIVE_EVENT |
281 IB_DEVICE_SYS_IMAGE_GUID |
282 IB_DEVICE_RC_RNR_NAK_GEN;
284 if (MLX5_CAP_GEN(mdev, pkv))
285 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
286 if (MLX5_CAP_GEN(mdev, qkv))
287 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
288 if (MLX5_CAP_GEN(mdev, apm))
289 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
290 if (MLX5_CAP_GEN(mdev, xrc))
291 props->device_cap_flags |= IB_DEVICE_XRC;
292 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
293 if (MLX5_CAP_GEN(mdev, sho)) {
294 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
295 /* At this stage no support for signature handover */
296 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
297 IB_PROT_T10DIF_TYPE_2 |
298 IB_PROT_T10DIF_TYPE_3;
299 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
300 IB_GUARD_T10DIF_CSUM;
302 if (MLX5_CAP_GEN(mdev, block_lb_mc))
303 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
305 props->vendor_part_id = mdev->pdev->device;
306 props->hw_ver = mdev->pdev->revision;
308 props->max_mr_size = ~0ull;
309 props->page_size_cap = ~(min_page_size - 1);
310 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
311 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
312 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
313 sizeof(struct mlx5_wqe_data_seg);
314 max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
315 sizeof(struct mlx5_wqe_ctrl_seg)) /
316 sizeof(struct mlx5_wqe_data_seg);
317 props->max_sge = min(max_rq_sg, max_sq_sg);
318 props->max_sge_rd = props->max_sge;
319 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
320 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
321 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
322 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
323 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
324 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
325 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
326 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
327 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
328 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
329 props->max_srq_sge = max_rq_sg - 1;
330 props->max_fast_reg_page_list_len = (unsigned int)-1;
331 props->atomic_cap = IB_ATOMIC_NONE;
332 props->masked_atomic_cap = IB_ATOMIC_NONE;
333 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
334 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
335 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
336 props->max_mcast_grp;
337 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
339 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
340 if (MLX5_CAP_GEN(mdev, pg))
341 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
342 props->odp_caps = dev->odp_caps;
349 MLX5_IB_WIDTH_1X = 1 << 0,
350 MLX5_IB_WIDTH_2X = 1 << 1,
351 MLX5_IB_WIDTH_4X = 1 << 2,
352 MLX5_IB_WIDTH_8X = 1 << 3,
353 MLX5_IB_WIDTH_12X = 1 << 4
356 static int translate_active_width(struct ib_device *ibdev, u8 active_width,
359 struct mlx5_ib_dev *dev = to_mdev(ibdev);
362 if (active_width & MLX5_IB_WIDTH_1X) {
363 *ib_width = IB_WIDTH_1X;
364 } else if (active_width & MLX5_IB_WIDTH_2X) {
365 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
368 } else if (active_width & MLX5_IB_WIDTH_4X) {
369 *ib_width = IB_WIDTH_4X;
370 } else if (active_width & MLX5_IB_WIDTH_8X) {
371 *ib_width = IB_WIDTH_8X;
372 } else if (active_width & MLX5_IB_WIDTH_12X) {
373 *ib_width = IB_WIDTH_12X;
375 mlx5_ib_dbg(dev, "Invalid active_width %d\n",
383 static int mlx5_mtu_to_ib_mtu(int mtu)
392 pr_warn("invalid mtu\n");
402 __IB_MAX_VL_0_14 = 5,
405 enum mlx5_vl_hw_cap {
417 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
422 *max_vl_num = __IB_MAX_VL_0;
425 *max_vl_num = __IB_MAX_VL_0_1;
428 *max_vl_num = __IB_MAX_VL_0_3;
431 *max_vl_num = __IB_MAX_VL_0_7;
433 case MLX5_VL_HW_0_14:
434 *max_vl_num = __IB_MAX_VL_0_14;
444 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
445 struct ib_port_attr *props)
447 struct mlx5_ib_dev *dev = to_mdev(ibdev);
448 struct mlx5_core_dev *mdev = dev->mdev;
449 struct mlx5_hca_vport_context *rep;
453 u8 ib_link_width_oper;
456 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
462 memset(props, 0, sizeof(*props));
464 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
468 props->lid = rep->lid;
469 props->lmc = rep->lmc;
470 props->sm_lid = rep->sm_lid;
471 props->sm_sl = rep->sm_sl;
472 props->state = rep->vport_state;
473 props->phys_state = rep->port_physical_state;
474 props->port_cap_flags = rep->cap_mask1;
475 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
476 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
477 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
478 props->bad_pkey_cntr = rep->pkey_violation_counter;
479 props->qkey_viol_cntr = rep->qkey_violation_counter;
480 props->subnet_timeout = rep->subnet_timeout;
481 props->init_type_reply = rep->init_type_reply;
483 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
487 err = translate_active_width(ibdev, ib_link_width_oper,
488 &props->active_width);
491 err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
496 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
498 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
500 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
502 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
504 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
508 err = translate_max_vl_num(ibdev, vl_hw_cap,
515 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
516 struct ib_port_attr *props)
518 switch (mlx5_get_vport_access_method(ibdev)) {
519 case MLX5_VPORT_ACCESS_METHOD_MAD:
520 return mlx5_query_mad_ifc_port(ibdev, port, props);
522 case MLX5_VPORT_ACCESS_METHOD_HCA:
523 return mlx5_query_hca_port(ibdev, port, props);
530 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
533 struct mlx5_ib_dev *dev = to_mdev(ibdev);
534 struct mlx5_core_dev *mdev = dev->mdev;
536 switch (mlx5_get_vport_access_method(ibdev)) {
537 case MLX5_VPORT_ACCESS_METHOD_MAD:
538 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
540 case MLX5_VPORT_ACCESS_METHOD_HCA:
541 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
549 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
552 struct mlx5_ib_dev *dev = to_mdev(ibdev);
553 struct mlx5_core_dev *mdev = dev->mdev;
555 switch (mlx5_get_vport_access_method(ibdev)) {
556 case MLX5_VPORT_ACCESS_METHOD_MAD:
557 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
559 case MLX5_VPORT_ACCESS_METHOD_HCA:
560 case MLX5_VPORT_ACCESS_METHOD_NIC:
561 return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
568 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
569 struct ib_device_modify *props)
571 struct mlx5_ib_dev *dev = to_mdev(ibdev);
572 struct mlx5_reg_node_desc in;
573 struct mlx5_reg_node_desc out;
576 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
579 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
583 * If possible, pass node desc to FW, so it can generate
584 * a 144 trap. If cmd fails, just ignore.
586 memcpy(&in, props->node_desc, 64);
587 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
588 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
592 memcpy(ibdev->node_desc, props->node_desc, 64);
597 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
598 struct ib_port_modify *props)
600 struct mlx5_ib_dev *dev = to_mdev(ibdev);
601 struct ib_port_attr attr;
605 mutex_lock(&dev->cap_mask_mutex);
607 err = mlx5_ib_query_port(ibdev, port, &attr);
611 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
612 ~props->clr_port_cap_mask;
614 err = mlx5_set_port_caps(dev->mdev, port, tmp);
617 mutex_unlock(&dev->cap_mask_mutex);
621 static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
622 struct ib_udata *udata)
624 struct mlx5_ib_dev *dev = to_mdev(ibdev);
625 struct mlx5_ib_alloc_ucontext_req_v2 req;
626 struct mlx5_ib_alloc_ucontext_resp resp;
627 struct mlx5_ib_ucontext *context;
628 struct mlx5_uuar_info *uuari;
629 struct mlx5_uar *uars;
639 return ERR_PTR(-EAGAIN);
641 memset(&req, 0, sizeof(req));
642 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
643 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
645 else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
648 return ERR_PTR(-EINVAL);
650 err = ib_copy_from_udata(&req, udata, reqlen);
654 if (req.flags || req.reserved)
655 return ERR_PTR(-EINVAL);
657 if (req.total_num_uuars > MLX5_MAX_UUARS)
658 return ERR_PTR(-ENOMEM);
660 if (req.total_num_uuars == 0)
661 return ERR_PTR(-EINVAL);
663 req.total_num_uuars = ALIGN(req.total_num_uuars,
664 MLX5_NON_FP_BF_REGS_PER_PAGE);
665 if (req.num_low_latency_uuars > req.total_num_uuars - 1)
666 return ERR_PTR(-EINVAL);
668 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
669 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
670 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
671 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
672 resp.cache_line_size = L1_CACHE_BYTES;
673 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
674 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
675 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
676 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
677 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
679 context = kzalloc(sizeof(*context), GFP_KERNEL);
681 return ERR_PTR(-ENOMEM);
683 uuari = &context->uuari;
684 mutex_init(&uuari->lock);
685 uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
691 uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
692 sizeof(*uuari->bitmap),
694 if (!uuari->bitmap) {
699 * clear all fast path uuars
701 for (i = 0; i < gross_uuars; i++) {
703 if (uuarn == 2 || uuarn == 3)
704 set_bit(i, uuari->bitmap);
707 uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
713 for (i = 0; i < num_uars; i++) {
714 err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
719 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
720 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
723 INIT_LIST_HEAD(&context->db_page_list);
724 mutex_init(&context->db_page_mutex);
726 resp.tot_uuars = req.total_num_uuars;
727 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
728 err = ib_copy_to_udata(udata, &resp,
729 sizeof(resp) - sizeof(resp.reserved));
734 uuari->num_low_latency_uuars = req.num_low_latency_uuars;
736 uuari->num_uars = num_uars;
737 return &context->ibucontext;
740 for (i--; i >= 0; i--)
741 mlx5_cmd_free_uar(dev->mdev, uars[i].index);
746 kfree(uuari->bitmap);
756 static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
758 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
759 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
760 struct mlx5_uuar_info *uuari = &context->uuari;
763 for (i = 0; i < uuari->num_uars; i++) {
764 if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
765 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
769 kfree(uuari->bitmap);
776 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
778 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
781 static int get_command(unsigned long offset)
783 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
786 static int get_arg(unsigned long offset)
788 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
791 static int get_index(unsigned long offset)
793 return get_arg(offset);
796 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
798 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
799 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
800 struct mlx5_uuar_info *uuari = &context->uuari;
801 unsigned long command;
805 command = get_command(vma->vm_pgoff);
807 case MLX5_IB_MMAP_REGULAR_PAGE:
808 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
811 idx = get_index(vma->vm_pgoff);
812 if (idx >= uuari->num_uars)
815 pfn = uar_index2pfn(dev, uuari->uars[idx].index);
816 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
817 (unsigned long long)pfn);
819 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
820 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
821 PAGE_SIZE, vma->vm_page_prot))
824 mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
826 (unsigned long long)pfn << PAGE_SHIFT);
829 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
839 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
840 struct ib_ucontext *context,
841 struct ib_udata *udata)
843 struct mlx5_ib_alloc_pd_resp resp;
844 struct mlx5_ib_pd *pd;
847 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
849 return ERR_PTR(-ENOMEM);
851 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
859 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
860 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
862 return ERR_PTR(-EFAULT);
869 static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
871 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
872 struct mlx5_ib_pd *mpd = to_mpd(pd);
874 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
880 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
882 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
885 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
887 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
888 ibqp->qp_num, gid->raw);
893 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
895 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
898 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
900 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
901 ibqp->qp_num, gid->raw);
906 static int init_node_data(struct mlx5_ib_dev *dev)
910 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
914 dev->mdev->rev_id = dev->mdev->pdev->revision;
916 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
919 static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
922 struct mlx5_ib_dev *dev =
923 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
925 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
928 static ssize_t show_reg_pages(struct device *device,
929 struct device_attribute *attr, char *buf)
931 struct mlx5_ib_dev *dev =
932 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
934 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
937 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
940 struct mlx5_ib_dev *dev =
941 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
942 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
945 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
948 struct mlx5_ib_dev *dev =
949 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
950 return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
951 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
954 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
957 struct mlx5_ib_dev *dev =
958 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
959 return sprintf(buf, "%x\n", dev->mdev->rev_id);
962 static ssize_t show_board(struct device *device, struct device_attribute *attr,
965 struct mlx5_ib_dev *dev =
966 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
967 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
968 dev->mdev->board_id);
971 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
972 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
973 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
974 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
975 static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
976 static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
978 static struct device_attribute *mlx5_class_attributes[] = {
987 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
988 enum mlx5_dev_event event, unsigned long param)
990 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
991 struct ib_event ibev;
996 case MLX5_DEV_EVENT_SYS_ERROR:
997 ibdev->ib_active = false;
998 ibev.event = IB_EVENT_DEVICE_FATAL;
1001 case MLX5_DEV_EVENT_PORT_UP:
1002 ibev.event = IB_EVENT_PORT_ACTIVE;
1006 case MLX5_DEV_EVENT_PORT_DOWN:
1007 ibev.event = IB_EVENT_PORT_ERR;
1011 case MLX5_DEV_EVENT_PORT_INITIALIZED:
1012 /* not used by ULPs */
1015 case MLX5_DEV_EVENT_LID_CHANGE:
1016 ibev.event = IB_EVENT_LID_CHANGE;
1020 case MLX5_DEV_EVENT_PKEY_CHANGE:
1021 ibev.event = IB_EVENT_PKEY_CHANGE;
1025 case MLX5_DEV_EVENT_GUID_CHANGE:
1026 ibev.event = IB_EVENT_GID_CHANGE;
1030 case MLX5_DEV_EVENT_CLIENT_REREG:
1031 ibev.event = IB_EVENT_CLIENT_REREGISTER;
1036 ibev.device = &ibdev->ib_dev;
1037 ibev.element.port_num = port;
1039 if (port < 1 || port > ibdev->num_ports) {
1040 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
1044 if (ibdev->ib_active)
1045 ib_dispatch_event(&ibev);
1048 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
1052 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
1053 mlx5_query_ext_port_caps(dev, port);
1056 static int get_port_caps(struct mlx5_ib_dev *dev)
1058 struct ib_device_attr *dprops = NULL;
1059 struct ib_port_attr *pprops = NULL;
1062 struct ib_udata uhw = {.inlen = 0, .outlen = 0};
1064 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
1068 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
1072 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
1074 mlx5_ib_warn(dev, "query_device failed %d\n", err);
1078 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
1079 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
1081 mlx5_ib_warn(dev, "query_port %d failed %d\n",
1085 dev->mdev->port_caps[port - 1].pkey_table_len =
1087 dev->mdev->port_caps[port - 1].gid_table_len =
1088 pprops->gid_tbl_len;
1089 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
1090 dprops->max_pkeys, pprops->gid_tbl_len);
1100 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
1104 err = mlx5_mr_cache_cleanup(dev);
1106 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
1108 mlx5_ib_destroy_qp(dev->umrc.qp);
1109 ib_destroy_cq(dev->umrc.cq);
1110 ib_dealloc_pd(dev->umrc.pd);
1117 static int create_umr_res(struct mlx5_ib_dev *dev)
1119 struct ib_qp_init_attr *init_attr = NULL;
1120 struct ib_qp_attr *attr = NULL;
1124 struct ib_cq_init_attr cq_attr = {};
1127 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1128 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1129 if (!attr || !init_attr) {
1134 pd = ib_alloc_pd(&dev->ib_dev);
1136 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
1142 cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
1145 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
1149 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1151 init_attr->send_cq = cq;
1152 init_attr->recv_cq = cq;
1153 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1154 init_attr->cap.max_send_wr = MAX_UMR_WR;
1155 init_attr->cap.max_send_sge = 1;
1156 init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
1157 init_attr->port_num = 1;
1158 qp = mlx5_ib_create_qp(pd, init_attr, NULL);
1160 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
1164 qp->device = &dev->ib_dev;
1167 qp->qp_type = MLX5_IB_QPT_REG_UMR;
1169 attr->qp_state = IB_QPS_INIT;
1171 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
1174 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
1178 memset(attr, 0, sizeof(*attr));
1179 attr->qp_state = IB_QPS_RTR;
1180 attr->path_mtu = IB_MTU_256;
1182 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
1184 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
1188 memset(attr, 0, sizeof(*attr));
1189 attr->qp_state = IB_QPS_RTS;
1190 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
1192 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
1200 sema_init(&dev->umrc.sem, MAX_UMR_WR);
1201 ret = mlx5_mr_cache_init(dev);
1203 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
1213 mlx5_ib_destroy_qp(qp);
1227 static int create_dev_resources(struct mlx5_ib_resources *devr)
1229 struct ib_srq_init_attr attr;
1230 struct mlx5_ib_dev *dev;
1231 struct ib_cq_init_attr cq_attr = {.cqe = 1};
1234 dev = container_of(devr, struct mlx5_ib_dev, devr);
1236 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
1237 if (IS_ERR(devr->p0)) {
1238 ret = PTR_ERR(devr->p0);
1241 devr->p0->device = &dev->ib_dev;
1242 devr->p0->uobject = NULL;
1243 atomic_set(&devr->p0->usecnt, 0);
1245 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
1246 if (IS_ERR(devr->c0)) {
1247 ret = PTR_ERR(devr->c0);
1250 devr->c0->device = &dev->ib_dev;
1251 devr->c0->uobject = NULL;
1252 devr->c0->comp_handler = NULL;
1253 devr->c0->event_handler = NULL;
1254 devr->c0->cq_context = NULL;
1255 atomic_set(&devr->c0->usecnt, 0);
1257 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
1258 if (IS_ERR(devr->x0)) {
1259 ret = PTR_ERR(devr->x0);
1262 devr->x0->device = &dev->ib_dev;
1263 devr->x0->inode = NULL;
1264 atomic_set(&devr->x0->usecnt, 0);
1265 mutex_init(&devr->x0->tgt_qp_mutex);
1266 INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
1268 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
1269 if (IS_ERR(devr->x1)) {
1270 ret = PTR_ERR(devr->x1);
1273 devr->x1->device = &dev->ib_dev;
1274 devr->x1->inode = NULL;
1275 atomic_set(&devr->x1->usecnt, 0);
1276 mutex_init(&devr->x1->tgt_qp_mutex);
1277 INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
1279 memset(&attr, 0, sizeof(attr));
1280 attr.attr.max_sge = 1;
1281 attr.attr.max_wr = 1;
1282 attr.srq_type = IB_SRQT_XRC;
1283 attr.ext.xrc.cq = devr->c0;
1284 attr.ext.xrc.xrcd = devr->x0;
1286 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
1287 if (IS_ERR(devr->s0)) {
1288 ret = PTR_ERR(devr->s0);
1291 devr->s0->device = &dev->ib_dev;
1292 devr->s0->pd = devr->p0;
1293 devr->s0->uobject = NULL;
1294 devr->s0->event_handler = NULL;
1295 devr->s0->srq_context = NULL;
1296 devr->s0->srq_type = IB_SRQT_XRC;
1297 devr->s0->ext.xrc.xrcd = devr->x0;
1298 devr->s0->ext.xrc.cq = devr->c0;
1299 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
1300 atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
1301 atomic_inc(&devr->p0->usecnt);
1302 atomic_set(&devr->s0->usecnt, 0);
1304 memset(&attr, 0, sizeof(attr));
1305 attr.attr.max_sge = 1;
1306 attr.attr.max_wr = 1;
1307 attr.srq_type = IB_SRQT_BASIC;
1308 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
1309 if (IS_ERR(devr->s1)) {
1310 ret = PTR_ERR(devr->s1);
1313 devr->s1->device = &dev->ib_dev;
1314 devr->s1->pd = devr->p0;
1315 devr->s1->uobject = NULL;
1316 devr->s1->event_handler = NULL;
1317 devr->s1->srq_context = NULL;
1318 devr->s1->srq_type = IB_SRQT_BASIC;
1319 devr->s1->ext.xrc.cq = devr->c0;
1320 atomic_inc(&devr->p0->usecnt);
1321 atomic_set(&devr->s0->usecnt, 0);
1326 mlx5_ib_destroy_srq(devr->s0);
1328 mlx5_ib_dealloc_xrcd(devr->x1);
1330 mlx5_ib_dealloc_xrcd(devr->x0);
1332 mlx5_ib_destroy_cq(devr->c0);
1334 mlx5_ib_dealloc_pd(devr->p0);
1339 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
1341 mlx5_ib_destroy_srq(devr->s1);
1342 mlx5_ib_destroy_srq(devr->s0);
1343 mlx5_ib_dealloc_xrcd(devr->x0);
1344 mlx5_ib_dealloc_xrcd(devr->x1);
1345 mlx5_ib_destroy_cq(devr->c0);
1346 mlx5_ib_dealloc_pd(devr->p0);
1349 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
1350 struct ib_port_immutable *immutable)
1352 struct ib_port_attr attr;
1355 err = mlx5_ib_query_port(ibdev, port_num, &attr);
1359 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1360 immutable->gid_tbl_len = attr.gid_tbl_len;
1361 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1362 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1367 static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
1369 dev->roce.nb.notifier_call = mlx5_netdev_event;
1370 return register_netdevice_notifier(&dev->roce.nb);
1373 static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
1375 unregister_netdevice_notifier(&dev->roce.nb);
1378 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1380 struct mlx5_ib_dev *dev;
1381 enum rdma_link_layer ll;
1386 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
1387 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
1389 /* don't create IB instance over Eth ports, no RoCE yet! */
1390 if (ll == IB_LINK_LAYER_ETHERNET)
1393 printk_once(KERN_INFO "%s", mlx5_version);
1395 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
1401 rwlock_init(&dev->roce.netdev_lock);
1402 err = get_port_caps(dev);
1406 if (mlx5_use_mad_ifc(dev))
1407 get_ext_port_caps(dev);
1409 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
1411 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
1412 dev->ib_dev.owner = THIS_MODULE;
1413 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1414 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
1415 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
1416 dev->ib_dev.phys_port_cnt = dev->num_ports;
1417 dev->ib_dev.num_comp_vectors =
1418 dev->mdev->priv.eq_table.num_comp_vectors;
1419 dev->ib_dev.dma_device = &mdev->pdev->dev;
1421 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
1422 dev->ib_dev.uverbs_cmd_mask =
1423 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1424 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1425 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1426 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1427 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1428 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1429 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1430 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1431 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1432 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1433 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1434 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1435 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1436 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1437 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1438 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1439 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1440 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1441 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1442 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1443 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1444 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1445 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1446 dev->ib_dev.uverbs_ex_cmd_mask =
1447 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
1449 dev->ib_dev.query_device = mlx5_ib_query_device;
1450 dev->ib_dev.query_port = mlx5_ib_query_port;
1451 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
1452 if (ll == IB_LINK_LAYER_ETHERNET)
1453 dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
1454 dev->ib_dev.query_gid = mlx5_ib_query_gid;
1455 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
1456 dev->ib_dev.modify_device = mlx5_ib_modify_device;
1457 dev->ib_dev.modify_port = mlx5_ib_modify_port;
1458 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
1459 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
1460 dev->ib_dev.mmap = mlx5_ib_mmap;
1461 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
1462 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
1463 dev->ib_dev.create_ah = mlx5_ib_create_ah;
1464 dev->ib_dev.query_ah = mlx5_ib_query_ah;
1465 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
1466 dev->ib_dev.create_srq = mlx5_ib_create_srq;
1467 dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
1468 dev->ib_dev.query_srq = mlx5_ib_query_srq;
1469 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
1470 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
1471 dev->ib_dev.create_qp = mlx5_ib_create_qp;
1472 dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
1473 dev->ib_dev.query_qp = mlx5_ib_query_qp;
1474 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
1475 dev->ib_dev.post_send = mlx5_ib_post_send;
1476 dev->ib_dev.post_recv = mlx5_ib_post_recv;
1477 dev->ib_dev.create_cq = mlx5_ib_create_cq;
1478 dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
1479 dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
1480 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
1481 dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
1482 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
1483 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
1484 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
1485 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
1486 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
1487 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
1488 dev->ib_dev.process_mad = mlx5_ib_process_mad;
1489 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
1490 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
1491 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
1492 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
1494 mlx5_ib_internal_fill_odp_caps(dev);
1496 if (MLX5_CAP_GEN(mdev, xrc)) {
1497 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
1498 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
1499 dev->ib_dev.uverbs_cmd_mask |=
1500 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
1501 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
1504 err = init_node_data(dev);
1508 mutex_init(&dev->cap_mask_mutex);
1510 if (ll == IB_LINK_LAYER_ETHERNET) {
1511 err = mlx5_enable_roce(dev);
1516 err = create_dev_resources(&dev->devr);
1518 goto err_disable_roce;
1520 err = mlx5_ib_odp_init_one(dev);
1524 err = ib_register_device(&dev->ib_dev, NULL);
1528 err = create_umr_res(dev);
1532 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
1533 err = device_create_file(&dev->ib_dev.dev,
1534 mlx5_class_attributes[i]);
1539 dev->ib_active = true;
1544 destroy_umrc_res(dev);
1547 ib_unregister_device(&dev->ib_dev);
1550 mlx5_ib_odp_remove_one(dev);
1553 destroy_dev_resources(&dev->devr);
1556 if (ll == IB_LINK_LAYER_ETHERNET)
1557 mlx5_disable_roce(dev);
1560 ib_dealloc_device((struct ib_device *)dev);
1565 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
1567 struct mlx5_ib_dev *dev = context;
1568 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
1570 ib_unregister_device(&dev->ib_dev);
1571 destroy_umrc_res(dev);
1572 mlx5_ib_odp_remove_one(dev);
1573 destroy_dev_resources(&dev->devr);
1574 if (ll == IB_LINK_LAYER_ETHERNET)
1575 mlx5_disable_roce(dev);
1576 ib_dealloc_device(&dev->ib_dev);
1579 static struct mlx5_interface mlx5_ib_interface = {
1581 .remove = mlx5_ib_remove,
1582 .event = mlx5_ib_event,
1583 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
1586 static int __init mlx5_ib_init(void)
1590 if (deprecated_prof_sel != 2)
1591 pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
1593 err = mlx5_ib_odp_init();
1597 err = mlx5_register_interface(&mlx5_ib_interface);
1604 mlx5_ib_odp_cleanup();
1608 static void __exit mlx5_ib_cleanup(void)
1610 mlx5_unregister_interface(&mlx5_ib_interface);
1611 mlx5_ib_odp_cleanup();
1614 module_init(mlx5_ib_init);
1615 module_exit(mlx5_ib_cleanup);