2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/debugfs.h>
47 #include <linux/kmod.h>
48 #include <linux/mlx5/mlx5_ifc.h>
49 #include <linux/mlx5/vport.h>
50 #ifdef CONFIG_RFS_ACCEL
51 #include <linux/cpu_rmap.h>
53 #include <linux/version.h>
54 #include <net/devlink.h>
55 #include "mlx5_core.h"
63 #include "fpga/core.h"
64 #include "fpga/ipsec.h"
65 #include "accel/ipsec.h"
66 #include "accel/tls.h"
67 #include "lib/clock.h"
68 #include "lib/vxlan.h"
69 #include "lib/geneve.h"
70 #include "lib/devcom.h"
71 #include "lib/pci_vsc.h"
72 #include "diag/fw_tracer.h"
74 #include "lib/hv_vhca.h"
75 #include "diag/rsc_dump.h"
76 #include "sf/vhca_event.h"
77 #include "sf/dev/dev.h"
81 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
82 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
83 MODULE_LICENSE("Dual BSD/GPL");
85 unsigned int mlx5_core_debug_mask;
86 module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
87 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
89 static unsigned int prof_sel = MLX5_DEFAULT_PROF;
90 module_param_named(prof_sel, prof_sel, uint, 0444);
91 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
93 static u32 sw_owner_id[4];
96 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
97 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
100 static struct mlx5_profile profile[] = {
105 .mask = MLX5_PROF_MASK_QP_SIZE,
109 .mask = MLX5_PROF_MASK_QP_SIZE |
110 MLX5_PROF_MASK_MR_CACHE,
179 #define FW_INIT_TIMEOUT_MILI 2000
180 #define FW_INIT_WAIT_MS 2
181 #define FW_PRE_INIT_TIMEOUT_MILI 120000
182 #define FW_INIT_WARN_MESSAGE_INTERVAL 20000
184 static int fw_initializing(struct mlx5_core_dev *dev)
186 return ioread32be(&dev->iseg->initializing) >> 31;
189 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
192 unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
193 unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
196 BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL);
198 while (fw_initializing(dev)) {
199 if (time_after(jiffies, end)) {
203 if (warn_time_mili && time_after(jiffies, warn)) {
204 mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds\n",
205 jiffies_to_msecs(end - warn) / 1000);
206 warn = jiffies + msecs_to_jiffies(warn_time_mili);
208 msleep(FW_INIT_WAIT_MS);
214 static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
216 int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
218 u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
219 int remaining_size = driver_ver_sz;
222 if (!MLX5_CAP_GEN(dev, driver_version))
225 string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
227 strncpy(string, "Linux", remaining_size);
229 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
230 strncat(string, ",", remaining_size);
232 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
233 strncat(string, KBUILD_MODNAME, remaining_size);
235 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
236 strncat(string, ",", remaining_size);
238 remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
240 snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
241 LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
242 LINUX_VERSION_SUBLEVEL);
245 MLX5_SET(set_driver_version_in, in, opcode,
246 MLX5_CMD_OP_SET_DRIVER_VERSION);
248 mlx5_cmd_exec_in(dev, set_driver_version, in);
251 static int set_dma_caps(struct pci_dev *pdev)
255 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
257 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
258 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
260 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
265 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
268 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
269 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
272 "Can't set consistent PCI DMA mask, aborting\n");
277 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
281 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
283 struct pci_dev *pdev = dev->pdev;
286 mutex_lock(&dev->pci_status_mutex);
287 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
288 err = pci_enable_device(pdev);
290 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
292 mutex_unlock(&dev->pci_status_mutex);
297 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
299 struct pci_dev *pdev = dev->pdev;
301 mutex_lock(&dev->pci_status_mutex);
302 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
303 pci_disable_device(pdev);
304 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
306 mutex_unlock(&dev->pci_status_mutex);
309 static int request_bar(struct pci_dev *pdev)
313 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
314 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
318 err = pci_request_regions(pdev, KBUILD_MODNAME);
320 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
325 static void release_bar(struct pci_dev *pdev)
327 pci_release_regions(pdev);
330 struct mlx5_reg_host_endianness {
335 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
338 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
339 MLX5_DEV_CAP_FLAG_DCT,
342 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
358 mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
363 static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
364 enum mlx5_cap_type cap_type,
365 enum mlx5_cap_mode cap_mode)
367 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
368 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
369 void *out, *hca_caps;
370 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
373 memset(in, 0, sizeof(in));
374 out = kzalloc(out_sz, GFP_KERNEL);
378 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
379 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
380 err = mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
383 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
384 cap_type, cap_mode, err);
388 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
391 case HCA_CAP_OPMOD_GET_MAX:
392 memcpy(dev->caps.hca_max[cap_type], hca_caps,
393 MLX5_UN_SZ_BYTES(hca_cap_union));
395 case HCA_CAP_OPMOD_GET_CUR:
396 memcpy(dev->caps.hca_cur[cap_type], hca_caps,
397 MLX5_UN_SZ_BYTES(hca_cap_union));
401 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
411 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
415 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
418 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
421 static int set_caps(struct mlx5_core_dev *dev, void *in, int opmod)
423 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
424 MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
425 return mlx5_cmd_exec_in(dev, set_hca_cap, in);
428 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
434 if (!MLX5_CAP_GEN(dev, atomic))
437 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
443 supported_atomic_req_8B_endianness_mode_1);
445 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
448 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
450 /* Set requestor to host endianness */
451 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
452 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
454 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
457 static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
463 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
464 !MLX5_CAP_GEN(dev, pg))
467 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
471 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
472 memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP],
473 MLX5_ST_SZ_BYTES(odp_cap));
475 #define ODP_CAP_SET_MAX(dev, field) \
477 u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
480 MLX5_SET(odp_cap, set_hca_cap, field, _res); \
484 ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
485 ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
486 ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
487 ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
488 ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
489 ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
490 ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
491 ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
492 ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
493 ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
494 ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
495 ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
496 ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
497 ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
502 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
505 static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
507 struct mlx5_profile *prof = &dev->profile;
511 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
515 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
517 memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
518 MLX5_ST_SZ_BYTES(cmd_hca_cap));
520 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
521 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
523 /* we limit the size of the pkey table to 128 entries for now */
524 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
525 to_fw_pkey_sz(dev, 128));
527 /* Check log_max_qp from HCA caps to set in current profile */
528 if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
529 mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
531 MLX5_CAP_GEN_MAX(dev, log_max_qp));
532 prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
534 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
535 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
538 /* disable cmdif checksum */
539 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
541 /* Enable 4K UAR only when HCA supports it and page size is bigger
544 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
545 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
547 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
549 if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
550 MLX5_SET(cmd_hca_cap,
553 cache_line_size() >= 128 ? 1 : 0);
555 if (MLX5_CAP_GEN_MAX(dev, dct))
556 MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
558 if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
559 MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
561 if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
562 MLX5_SET(cmd_hca_cap,
565 MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
567 if (MLX5_CAP_GEN_MAX(dev, release_all_pages))
568 MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1);
570 if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
571 MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
573 mlx5_vhca_state_cap_handle(dev, set_hca_cap);
575 if (MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix))
576 MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
577 MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
579 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
582 static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
587 if (!MLX5_CAP_GEN(dev, roce))
590 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
594 if (MLX5_CAP_ROCE(dev, sw_r_roce_src_udp_port) ||
595 !MLX5_CAP_ROCE_MAX(dev, sw_r_roce_src_udp_port))
598 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
599 memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE],
600 MLX5_ST_SZ_BYTES(roce_cap));
601 MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
603 err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE);
607 static int set_hca_cap(struct mlx5_core_dev *dev)
609 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
613 set_ctx = kzalloc(set_sz, GFP_KERNEL);
617 err = handle_hca_cap(dev, set_ctx);
619 mlx5_core_err(dev, "handle_hca_cap failed\n");
623 memset(set_ctx, 0, set_sz);
624 err = handle_hca_cap_atomic(dev, set_ctx);
626 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
630 memset(set_ctx, 0, set_sz);
631 err = handle_hca_cap_odp(dev, set_ctx);
633 mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
637 memset(set_ctx, 0, set_sz);
638 err = handle_hca_cap_roce(dev, set_ctx);
640 mlx5_core_err(dev, "handle_hca_cap_roce failed\n");
649 static int set_hca_ctrl(struct mlx5_core_dev *dev)
651 struct mlx5_reg_host_endianness he_in;
652 struct mlx5_reg_host_endianness he_out;
655 if (!mlx5_core_is_pf(dev))
658 memset(&he_in, 0, sizeof(he_in));
659 he_in.he = MLX5_SET_HOST_ENDIANNESS;
660 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
661 &he_out, sizeof(he_out),
662 MLX5_REG_HOST_ENDIANNESS, 0, 1);
666 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
670 /* Disable local_lb by default */
671 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
672 ret = mlx5_nic_vport_update_local_lb(dev, false);
677 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
679 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
681 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
682 MLX5_SET(enable_hca_in, in, function_id, func_id);
683 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
684 dev->caps.embedded_cpu);
685 return mlx5_cmd_exec_in(dev, enable_hca, in);
688 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
690 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
692 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
693 MLX5_SET(disable_hca_in, in, function_id, func_id);
694 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
695 dev->caps.embedded_cpu);
696 return mlx5_cmd_exec_in(dev, disable_hca, in);
699 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
701 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {};
702 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {};
706 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
707 err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
712 mlx5_cmd_mbox_status(query_out, &status, &syndrome);
713 if (!status || syndrome == MLX5_DRIVER_SYND) {
714 mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
715 err, status, syndrome);
719 mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
724 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
726 if (sup_issi & (1 << 1)) {
727 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {};
729 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
730 MLX5_SET(set_issi_in, set_in, current_issi, 1);
731 err = mlx5_cmd_exec_in(dev, set_issi, set_in);
733 mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
741 } else if (sup_issi & (1 << 0) || !sup_issi) {
748 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
749 const struct pci_device_id *id)
751 struct mlx5_priv *priv = &dev->priv;
754 mutex_init(&dev->pci_status_mutex);
755 pci_set_drvdata(dev->pdev, dev);
757 dev->bar_addr = pci_resource_start(pdev, 0);
758 priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
760 err = mlx5_pci_enable_device(dev);
762 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
766 err = request_bar(pdev);
768 mlx5_core_err(dev, "error requesting BARs, aborting\n");
772 pci_set_master(pdev);
774 err = set_dma_caps(pdev);
776 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
780 if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
781 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
782 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
783 mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
785 dev->iseg_base = dev->bar_addr;
786 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
789 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
793 mlx5_pci_vsc_init(dev);
794 dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
798 pci_clear_master(dev->pdev);
799 release_bar(dev->pdev);
801 mlx5_pci_disable_device(dev);
805 static void mlx5_pci_close(struct mlx5_core_dev *dev)
807 /* health work might still be active, and it needs pci bar in
808 * order to know the NIC state. Therefore, drain the health WQ
809 * before removing the pci bars
811 mlx5_drain_health_wq(dev);
813 pci_clear_master(dev->pdev);
814 release_bar(dev->pdev);
815 mlx5_pci_disable_device(dev);
818 static int mlx5_init_once(struct mlx5_core_dev *dev)
822 dev->priv.devcom = mlx5_devcom_register_device(dev);
823 if (IS_ERR(dev->priv.devcom))
824 mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
827 err = mlx5_query_board_id(dev);
829 mlx5_core_err(dev, "query board id failed\n");
833 err = mlx5_irq_table_init(dev);
835 mlx5_core_err(dev, "failed to initialize irq table\n");
839 err = mlx5_eq_table_init(dev);
841 mlx5_core_err(dev, "failed to initialize eq\n");
842 goto err_irq_cleanup;
845 err = mlx5_events_init(dev);
847 mlx5_core_err(dev, "failed to initialize events\n");
851 err = mlx5_fw_reset_init(dev);
853 mlx5_core_err(dev, "failed to initialize fw reset events\n");
854 goto err_events_cleanup;
857 mlx5_cq_debugfs_init(dev);
859 mlx5_init_reserved_gids(dev);
861 mlx5_init_clock(dev);
863 dev->vxlan = mlx5_vxlan_create(dev);
864 dev->geneve = mlx5_geneve_create(dev);
866 err = mlx5_init_rl_table(dev);
868 mlx5_core_err(dev, "Failed to init rate limiting\n");
869 goto err_tables_cleanup;
872 err = mlx5_mpfs_init(dev);
874 mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
878 err = mlx5_sriov_init(dev);
880 mlx5_core_err(dev, "Failed to init sriov %d\n", err);
881 goto err_mpfs_cleanup;
884 err = mlx5_eswitch_init(dev);
886 mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
887 goto err_sriov_cleanup;
890 err = mlx5_fpga_init(dev);
892 mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
893 goto err_eswitch_cleanup;
896 err = mlx5_vhca_event_init(dev);
898 mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err);
899 goto err_fpga_cleanup;
902 err = mlx5_sf_hw_table_init(dev);
904 mlx5_core_err(dev, "Failed to init SF HW table %d\n", err);
905 goto err_sf_hw_table_cleanup;
908 err = mlx5_sf_table_init(dev);
910 mlx5_core_err(dev, "Failed to init SF table %d\n", err);
911 goto err_sf_table_cleanup;
914 dev->dm = mlx5_dm_create(dev);
916 mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
918 dev->tracer = mlx5_fw_tracer_create(dev);
919 dev->hv_vhca = mlx5_hv_vhca_create(dev);
920 dev->rsc_dump = mlx5_rsc_dump_create(dev);
924 err_sf_table_cleanup:
925 mlx5_sf_hw_table_cleanup(dev);
926 err_sf_hw_table_cleanup:
927 mlx5_vhca_event_cleanup(dev);
929 mlx5_fpga_cleanup(dev);
931 mlx5_eswitch_cleanup(dev->priv.eswitch);
933 mlx5_sriov_cleanup(dev);
935 mlx5_mpfs_cleanup(dev);
937 mlx5_cleanup_rl_table(dev);
939 mlx5_geneve_destroy(dev->geneve);
940 mlx5_vxlan_destroy(dev->vxlan);
941 mlx5_cq_debugfs_cleanup(dev);
942 mlx5_fw_reset_cleanup(dev);
944 mlx5_events_cleanup(dev);
946 mlx5_eq_table_cleanup(dev);
948 mlx5_irq_table_cleanup(dev);
950 mlx5_devcom_unregister_device(dev->priv.devcom);
955 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
957 mlx5_rsc_dump_destroy(dev);
958 mlx5_hv_vhca_destroy(dev->hv_vhca);
959 mlx5_fw_tracer_destroy(dev->tracer);
960 mlx5_dm_cleanup(dev);
961 mlx5_sf_table_cleanup(dev);
962 mlx5_sf_hw_table_cleanup(dev);
963 mlx5_vhca_event_cleanup(dev);
964 mlx5_fpga_cleanup(dev);
965 mlx5_eswitch_cleanup(dev->priv.eswitch);
966 mlx5_sriov_cleanup(dev);
967 mlx5_mpfs_cleanup(dev);
968 mlx5_cleanup_rl_table(dev);
969 mlx5_geneve_destroy(dev->geneve);
970 mlx5_vxlan_destroy(dev->vxlan);
971 mlx5_cleanup_clock(dev);
972 mlx5_cleanup_reserved_gids(dev);
973 mlx5_cq_debugfs_cleanup(dev);
974 mlx5_fw_reset_cleanup(dev);
975 mlx5_events_cleanup(dev);
976 mlx5_eq_table_cleanup(dev);
977 mlx5_irq_table_cleanup(dev);
978 mlx5_devcom_unregister_device(dev->priv.devcom);
981 static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
985 mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
986 fw_rev_min(dev), fw_rev_sub(dev));
988 /* Only PFs hold the relevant PCIe information for this query */
989 if (mlx5_core_is_pf(dev))
990 pcie_print_link_status(dev->pdev);
992 /* wait for firmware to accept initialization segments configurations
994 err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL);
996 mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
997 FW_PRE_INIT_TIMEOUT_MILI);
1001 err = mlx5_cmd_init(dev);
1003 mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
1007 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
1009 mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
1010 FW_INIT_TIMEOUT_MILI);
1011 goto err_cmd_cleanup;
1014 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
1016 err = mlx5_core_enable_hca(dev, 0);
1018 mlx5_core_err(dev, "enable hca failed\n");
1019 goto err_cmd_cleanup;
1022 err = mlx5_core_set_issi(dev);
1024 mlx5_core_err(dev, "failed to set issi\n");
1025 goto err_disable_hca;
1028 err = mlx5_satisfy_startup_pages(dev, 1);
1030 mlx5_core_err(dev, "failed to allocate boot pages\n");
1031 goto err_disable_hca;
1034 err = set_hca_ctrl(dev);
1036 mlx5_core_err(dev, "set_hca_ctrl failed\n");
1037 goto reclaim_boot_pages;
1040 err = set_hca_cap(dev);
1042 mlx5_core_err(dev, "set_hca_cap failed\n");
1043 goto reclaim_boot_pages;
1046 err = mlx5_satisfy_startup_pages(dev, 0);
1048 mlx5_core_err(dev, "failed to allocate init pages\n");
1049 goto reclaim_boot_pages;
1052 err = mlx5_cmd_init_hca(dev, sw_owner_id);
1054 mlx5_core_err(dev, "init hca failed\n");
1055 goto reclaim_boot_pages;
1058 mlx5_set_driver_version(dev);
1060 mlx5_start_health_poll(dev);
1062 err = mlx5_query_hca_caps(dev);
1064 mlx5_core_err(dev, "query hca failed\n");
1071 mlx5_stop_health_poll(dev, boot);
1073 mlx5_reclaim_startup_pages(dev);
1075 mlx5_core_disable_hca(dev, 0);
1077 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
1078 mlx5_cmd_cleanup(dev);
1083 static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
1087 mlx5_stop_health_poll(dev, boot);
1088 err = mlx5_cmd_teardown_hca(dev);
1090 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1093 mlx5_reclaim_startup_pages(dev);
1094 mlx5_core_disable_hca(dev, 0);
1095 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
1096 mlx5_cmd_cleanup(dev);
1101 static int mlx5_load(struct mlx5_core_dev *dev)
1105 dev->priv.uar = mlx5_get_uars_page(dev);
1106 if (IS_ERR(dev->priv.uar)) {
1107 mlx5_core_err(dev, "Failed allocating uar, aborting\n");
1108 err = PTR_ERR(dev->priv.uar);
1112 mlx5_events_start(dev);
1113 mlx5_pagealloc_start(dev);
1115 err = mlx5_irq_table_create(dev);
1117 mlx5_core_err(dev, "Failed to alloc IRQs\n");
1121 err = mlx5_eq_table_create(dev);
1123 mlx5_core_err(dev, "Failed to create EQs\n");
1127 err = mlx5_fw_tracer_init(dev->tracer);
1129 mlx5_core_err(dev, "Failed to init FW tracer\n");
1133 mlx5_fw_reset_events_start(dev);
1134 mlx5_hv_vhca_init(dev->hv_vhca);
1136 err = mlx5_rsc_dump_init(dev);
1138 mlx5_core_err(dev, "Failed to init Resource dump\n");
1142 err = mlx5_fpga_device_start(dev);
1144 mlx5_core_err(dev, "fpga device start failed %d\n", err);
1145 goto err_fpga_start;
1148 mlx5_accel_ipsec_init(dev);
1150 err = mlx5_accel_tls_init(dev);
1152 mlx5_core_err(dev, "TLS device start failed %d\n", err);
1156 err = mlx5_init_fs(dev);
1158 mlx5_core_err(dev, "Failed to init flow steering\n");
1162 err = mlx5_core_set_hca_defaults(dev);
1164 mlx5_core_err(dev, "Failed to set hca defaults\n");
1168 mlx5_vhca_event_start(dev);
1170 err = mlx5_sf_hw_table_create(dev);
1172 mlx5_core_err(dev, "sf table create failed %d\n", err);
1176 err = mlx5_ec_init(dev);
1178 mlx5_core_err(dev, "Failed to init embedded CPU\n");
1182 err = mlx5_sriov_attach(dev);
1184 mlx5_core_err(dev, "sriov init failed %d\n", err);
1188 mlx5_sf_dev_table_create(dev);
1189 mlx5_lag_add_mdev(dev);
1194 mlx5_ec_cleanup(dev);
1196 mlx5_sf_hw_table_destroy(dev);
1198 mlx5_vhca_event_stop(dev);
1200 mlx5_cleanup_fs(dev);
1202 mlx5_accel_tls_cleanup(dev);
1204 mlx5_accel_ipsec_cleanup(dev);
1205 mlx5_fpga_device_stop(dev);
1207 mlx5_rsc_dump_cleanup(dev);
1209 mlx5_hv_vhca_cleanup(dev->hv_vhca);
1210 mlx5_fw_reset_events_stop(dev);
1211 mlx5_fw_tracer_cleanup(dev->tracer);
1213 mlx5_eq_table_destroy(dev);
1215 mlx5_irq_table_destroy(dev);
1217 mlx5_pagealloc_stop(dev);
1218 mlx5_events_stop(dev);
1219 mlx5_put_uars_page(dev, dev->priv.uar);
1223 static void mlx5_unload(struct mlx5_core_dev *dev)
1225 mlx5_lag_remove_mdev(dev);
1226 mlx5_sf_dev_table_destroy(dev);
1227 mlx5_sriov_detach(dev);
1228 mlx5_ec_cleanup(dev);
1229 mlx5_sf_hw_table_destroy(dev);
1230 mlx5_vhca_event_stop(dev);
1231 mlx5_cleanup_fs(dev);
1232 mlx5_accel_ipsec_cleanup(dev);
1233 mlx5_accel_tls_cleanup(dev);
1234 mlx5_fpga_device_stop(dev);
1235 mlx5_rsc_dump_cleanup(dev);
1236 mlx5_hv_vhca_cleanup(dev->hv_vhca);
1237 mlx5_fw_reset_events_stop(dev);
1238 mlx5_fw_tracer_cleanup(dev->tracer);
1239 mlx5_eq_table_destroy(dev);
1240 mlx5_irq_table_destroy(dev);
1241 mlx5_pagealloc_stop(dev);
1242 mlx5_events_stop(dev);
1243 mlx5_put_uars_page(dev, dev->priv.uar);
1246 int mlx5_init_one(struct mlx5_core_dev *dev)
1250 mutex_lock(&dev->intf_state_mutex);
1251 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1252 mlx5_core_warn(dev, "interface is up, NOP\n");
1255 /* remove any previous indication of internal error */
1256 dev->state = MLX5_DEVICE_STATE_UP;
1258 err = mlx5_function_setup(dev, true);
1262 err = mlx5_init_once(dev);
1264 mlx5_core_err(dev, "sw objs init failed\n");
1265 goto function_teardown;
1268 err = mlx5_load(dev);
1272 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1274 err = mlx5_devlink_register(priv_to_devlink(dev), dev->device);
1276 goto err_devlink_reg;
1278 err = mlx5_register_device(dev);
1282 mutex_unlock(&dev->intf_state_mutex);
1286 mlx5_devlink_unregister(priv_to_devlink(dev));
1288 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1291 mlx5_cleanup_once(dev);
1293 mlx5_function_teardown(dev, true);
1295 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1297 mutex_unlock(&dev->intf_state_mutex);
1301 void mlx5_uninit_one(struct mlx5_core_dev *dev)
1303 mutex_lock(&dev->intf_state_mutex);
1305 mlx5_unregister_device(dev);
1306 mlx5_devlink_unregister(priv_to_devlink(dev));
1308 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1309 mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1311 mlx5_cleanup_once(dev);
1315 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1317 mlx5_cleanup_once(dev);
1318 mlx5_function_teardown(dev, true);
1320 mutex_unlock(&dev->intf_state_mutex);
1323 int mlx5_load_one(struct mlx5_core_dev *dev)
1327 mutex_lock(&dev->intf_state_mutex);
1328 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1329 mlx5_core_warn(dev, "interface is up, NOP\n");
1332 /* remove any previous indication of internal error */
1333 dev->state = MLX5_DEVICE_STATE_UP;
1335 err = mlx5_function_setup(dev, false);
1339 err = mlx5_load(dev);
1343 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1345 err = mlx5_attach_device(dev);
1349 mutex_unlock(&dev->intf_state_mutex);
1353 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1356 mlx5_function_teardown(dev, false);
1358 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1360 mutex_unlock(&dev->intf_state_mutex);
1364 void mlx5_unload_one(struct mlx5_core_dev *dev)
1366 mutex_lock(&dev->intf_state_mutex);
1368 mlx5_detach_device(dev);
1370 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1371 mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1376 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1378 mlx5_function_teardown(dev, false);
1380 mutex_unlock(&dev->intf_state_mutex);
1383 int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
1385 struct mlx5_priv *priv = &dev->priv;
1388 memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
1389 INIT_LIST_HEAD(&priv->ctx_list);
1390 spin_lock_init(&priv->ctx_lock);
1391 mutex_init(&dev->intf_state_mutex);
1393 mutex_init(&priv->bfregs.reg_head.lock);
1394 mutex_init(&priv->bfregs.wc_head.lock);
1395 INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
1396 INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
1398 mutex_init(&priv->alloc_mutex);
1399 mutex_init(&priv->pgdir_mutex);
1400 INIT_LIST_HEAD(&priv->pgdir_list);
1402 priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
1404 INIT_LIST_HEAD(&priv->traps);
1406 err = mlx5_health_init(dev);
1408 goto err_health_init;
1410 err = mlx5_pagealloc_init(dev);
1412 goto err_pagealloc_init;
1414 err = mlx5_adev_init(dev);
1421 mlx5_pagealloc_cleanup(dev);
1423 mlx5_health_cleanup(dev);
1425 debugfs_remove(dev->priv.dbg_root);
1426 mutex_destroy(&priv->pgdir_mutex);
1427 mutex_destroy(&priv->alloc_mutex);
1428 mutex_destroy(&priv->bfregs.wc_head.lock);
1429 mutex_destroy(&priv->bfregs.reg_head.lock);
1430 mutex_destroy(&dev->intf_state_mutex);
1434 void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
1436 struct mlx5_priv *priv = &dev->priv;
1438 mlx5_adev_cleanup(dev);
1439 mlx5_pagealloc_cleanup(dev);
1440 mlx5_health_cleanup(dev);
1441 debugfs_remove_recursive(dev->priv.dbg_root);
1442 mutex_destroy(&priv->pgdir_mutex);
1443 mutex_destroy(&priv->alloc_mutex);
1444 mutex_destroy(&priv->bfregs.wc_head.lock);
1445 mutex_destroy(&priv->bfregs.reg_head.lock);
1446 mutex_destroy(&dev->intf_state_mutex);
1449 static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1451 struct mlx5_core_dev *dev;
1452 struct devlink *devlink;
1455 devlink = mlx5_devlink_alloc();
1457 dev_err(&pdev->dev, "devlink alloc failed\n");
1461 dev = devlink_priv(devlink);
1462 dev->device = &pdev->dev;
1465 dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
1466 MLX5_COREDEV_VF : MLX5_COREDEV_PF;
1468 dev->priv.adev_idx = mlx5_adev_idx_alloc();
1469 if (dev->priv.adev_idx < 0) {
1470 err = dev->priv.adev_idx;
1474 err = mlx5_mdev_init(dev, prof_sel);
1478 err = mlx5_pci_init(dev, pdev, id);
1480 mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
1485 err = mlx5_init_one(dev);
1487 mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n",
1492 err = mlx5_crdump_enable(dev);
1494 dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
1496 pci_save_state(pdev);
1497 if (!mlx5_core_is_mp_slave(dev))
1498 devlink_reload_enable(devlink);
1502 mlx5_pci_close(dev);
1504 mlx5_mdev_uninit(dev);
1506 mlx5_adev_idx_free(dev->priv.adev_idx);
1508 mlx5_devlink_free(devlink);
1513 static void remove_one(struct pci_dev *pdev)
1515 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1516 struct devlink *devlink = priv_to_devlink(dev);
1518 devlink_reload_disable(devlink);
1519 mlx5_crdump_disable(dev);
1520 mlx5_drain_health_wq(dev);
1521 mlx5_uninit_one(dev);
1522 mlx5_pci_close(dev);
1523 mlx5_mdev_uninit(dev);
1524 mlx5_adev_idx_free(dev->priv.adev_idx);
1525 mlx5_devlink_free(devlink);
1528 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1529 pci_channel_state_t state)
1531 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1533 mlx5_core_info(dev, "%s was called\n", __func__);
1535 mlx5_enter_error_state(dev, false);
1536 mlx5_error_sw_reset(dev);
1537 mlx5_unload_one(dev);
1538 mlx5_drain_health_wq(dev);
1539 mlx5_pci_disable_device(dev);
1541 return state == pci_channel_io_perm_failure ?
1542 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1545 /* wait for the device to show vital signs by waiting
1546 * for the health counter to start counting.
1548 static int wait_vital(struct pci_dev *pdev)
1550 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1551 struct mlx5_core_health *health = &dev->priv.health;
1552 const int niter = 100;
1557 for (i = 0; i < niter; i++) {
1558 count = ioread32be(health->health_counter);
1559 if (count && count != 0xffffffff) {
1560 if (last_count && last_count != count) {
1562 "wait vital counter value 0x%x after %d iterations\n",
1574 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1576 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1579 mlx5_core_info(dev, "%s was called\n", __func__);
1581 err = mlx5_pci_enable_device(dev);
1583 mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
1585 return PCI_ERS_RESULT_DISCONNECT;
1588 pci_set_master(pdev);
1589 pci_restore_state(pdev);
1590 pci_save_state(pdev);
1592 if (wait_vital(pdev)) {
1593 mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
1594 return PCI_ERS_RESULT_DISCONNECT;
1597 return PCI_ERS_RESULT_RECOVERED;
1600 static void mlx5_pci_resume(struct pci_dev *pdev)
1602 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1605 mlx5_core_info(dev, "%s was called\n", __func__);
1607 err = mlx5_load_one(dev);
1609 mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
1612 mlx5_core_info(dev, "%s: device recovered\n", __func__);
1615 static const struct pci_error_handlers mlx5_err_handler = {
1616 .error_detected = mlx5_pci_err_detected,
1617 .slot_reset = mlx5_pci_slot_reset,
1618 .resume = mlx5_pci_resume
1621 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
1623 bool fast_teardown = false, force_teardown = false;
1626 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
1627 force_teardown = MLX5_CAP_GEN(dev, force_teardown);
1629 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
1630 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
1632 if (!fast_teardown && !force_teardown)
1635 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1636 mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
1640 /* Panic tear down fw command will stop the PCI bus communication
1641 * with the HCA, so the health polll is no longer needed.
1643 mlx5_drain_health_wq(dev);
1644 mlx5_stop_health_poll(dev, false);
1646 ret = mlx5_cmd_fast_teardown_hca(dev);
1650 ret = mlx5_cmd_force_teardown_hca(dev);
1654 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
1655 mlx5_start_health_poll(dev);
1659 mlx5_enter_error_state(dev, true);
1661 /* Some platforms requiring freeing the IRQ's in the shutdown
1662 * flow. If they aren't freed they can't be allocated after
1663 * kexec. There is no need to cleanup the mlx5_core software
1666 mlx5_core_eq_free_irqs(dev);
1671 static void shutdown(struct pci_dev *pdev)
1673 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1676 mlx5_core_info(dev, "Shutdown was called\n");
1677 err = mlx5_try_fast_unload(dev);
1679 mlx5_unload_one(dev);
1680 mlx5_pci_disable_device(dev);
1683 static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
1685 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1687 mlx5_unload_one(dev);
1692 static int mlx5_resume(struct pci_dev *pdev)
1694 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1696 return mlx5_load_one(dev);
1699 static const struct pci_device_id mlx5_core_pci_table[] = {
1700 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
1701 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
1702 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
1703 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1704 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
1705 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
1706 { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
1707 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
1708 { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5 Ex */
1709 { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
1710 { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
1711 { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
1712 { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
1713 { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
1714 { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
1715 { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
1716 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
1717 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
1718 { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
1719 { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
1723 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1725 void mlx5_disable_device(struct mlx5_core_dev *dev)
1727 mlx5_error_sw_reset(dev);
1728 mlx5_unload_one(dev);
1731 int mlx5_recover_device(struct mlx5_core_dev *dev)
1735 mlx5_pci_disable_device(dev);
1736 if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
1737 ret = mlx5_load_one(dev);
1741 static struct pci_driver mlx5_core_driver = {
1742 .name = KBUILD_MODNAME,
1743 .id_table = mlx5_core_pci_table,
1745 .remove = remove_one,
1746 .suspend = mlx5_suspend,
1747 .resume = mlx5_resume,
1748 .shutdown = shutdown,
1749 .err_handler = &mlx5_err_handler,
1750 .sriov_configure = mlx5_core_sriov_configure,
1751 .sriov_get_vf_total_msix = mlx5_sriov_get_vf_total_msix,
1752 .sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count,
1755 static void mlx5_core_verify_params(void)
1757 if (prof_sel >= ARRAY_SIZE(profile)) {
1758 pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
1760 ARRAY_SIZE(profile) - 1,
1762 prof_sel = MLX5_DEFAULT_PROF;
1766 static int __init init(void)
1770 WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME),
1771 "mlx5_core name not in sync with kernel module name");
1773 get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
1775 mlx5_core_verify_params();
1776 mlx5_fpga_ipsec_build_fs_cmds();
1777 mlx5_register_debugfs();
1779 err = pci_register_driver(&mlx5_core_driver);
1783 err = mlx5_sf_driver_register();
1787 #ifdef CONFIG_MLX5_CORE_EN
1790 pci_unregister_driver(&mlx5_core_driver);
1798 pci_unregister_driver(&mlx5_core_driver);
1800 mlx5_unregister_debugfs();
1804 static void __exit cleanup(void)
1806 #ifdef CONFIG_MLX5_CORE_EN
1809 mlx5_sf_driver_unregister();
1810 pci_unregister_driver(&mlx5_core_driver);
1811 mlx5_unregister_debugfs();
1815 module_exit(cleanup);