1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/stddef.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/delay.h>
12 #include <asm/byteorder.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/string.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/workqueue.h>
18 #include <linux/ethtool.h>
19 #include <linux/etherdevice.h>
20 #include <linux/vmalloc.h>
21 #include <linux/crash_dump.h>
22 #include <linux/crc32.h>
23 #include <linux/qed/qed_if.h>
24 #include <linux/qed/qed_ll2_if.h>
25 #include <net/devlink.h>
26 #include <linux/aer.h>
27 #include <linux/phylink.h>
30 #include "qed_sriov.h"
32 #include "qed_dev_api.h"
35 #include "qed_iscsi.h"
38 #include "qed_reg_addr.h"
40 #include "qed_selftest.h"
41 #include "qed_debug.h"
42 #include "qed_devlink.h"
44 #define QED_ROCE_QPS (8192)
45 #define QED_ROCE_DPIS (8)
46 #define QED_RDMA_SRQS QED_ROCE_QPS
47 #define QED_NVM_CFG_GET_FLAGS 0xA
48 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A
49 #define QED_NVM_CFG_MAX_ATTRS 50
51 static char version[] =
52 "QLogic FastLinQ 4xxxx Core Module qed\n";
54 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
55 MODULE_LICENSE("GPL");
57 #define FW_FILE_VERSION \
58 __stringify(FW_MAJOR_VERSION) "." \
59 __stringify(FW_MINOR_VERSION) "." \
60 __stringify(FW_REVISION_VERSION) "." \
61 __stringify(FW_ENGINEERING_VERSION)
63 #define QED_FW_FILE_NAME \
64 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
66 MODULE_FIRMWARE(QED_FW_FILE_NAME);
68 /* MFW speed capabilities maps */
70 struct qed_mfw_speed_map {
72 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
78 #define QED_MFW_SPEED_MAP(type, arr) \
82 .arr_size = ARRAY_SIZE(arr), \
85 static const u32 qed_mfw_ext_1g[] __initconst = {
86 ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
87 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
88 ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
91 static const u32 qed_mfw_ext_10g[] __initconst = {
92 ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
93 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
94 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
95 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
96 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
97 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
98 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
99 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
102 static const u32 qed_mfw_ext_20g[] __initconst = {
103 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
106 static const u32 qed_mfw_ext_25g[] __initconst = {
107 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
108 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
109 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
112 static const u32 qed_mfw_ext_40g[] __initconst = {
113 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
114 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
115 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
116 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
119 static const u32 qed_mfw_ext_50g_base_r[] __initconst = {
120 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
121 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
122 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
123 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
124 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
127 static const u32 qed_mfw_ext_50g_base_r2[] __initconst = {
128 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
129 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
130 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
133 static const u32 qed_mfw_ext_100g_base_r2[] __initconst = {
134 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
135 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
136 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
137 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
138 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
141 static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
142 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
143 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
144 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
145 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
148 static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
149 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
150 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
151 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g),
152 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
153 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
154 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
155 qed_mfw_ext_50g_base_r),
156 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2,
157 qed_mfw_ext_50g_base_r2),
158 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2,
159 qed_mfw_ext_100g_base_r2),
160 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4,
161 qed_mfw_ext_100g_base_r4),
164 static const u32 qed_mfw_legacy_1g[] __initconst = {
165 ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
166 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
167 ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
170 static const u32 qed_mfw_legacy_10g[] __initconst = {
171 ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
172 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
173 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
174 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
175 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
176 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
177 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
178 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
181 static const u32 qed_mfw_legacy_20g[] __initconst = {
182 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
185 static const u32 qed_mfw_legacy_25g[] __initconst = {
186 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
187 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
188 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
191 static const u32 qed_mfw_legacy_40g[] __initconst = {
192 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
193 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
194 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
195 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
198 static const u32 qed_mfw_legacy_50g[] __initconst = {
199 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
200 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
201 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
204 static const u32 qed_mfw_legacy_bb_100g[] __initconst = {
205 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
206 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
207 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
208 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
211 static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = {
212 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G,
214 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G,
216 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G,
218 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G,
220 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G,
222 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G,
224 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G,
225 qed_mfw_legacy_bb_100g),
228 static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
230 linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
236 static void __init qed_mfw_speed_maps_init(void)
240 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++)
241 qed_mfw_speed_map_populate(qed_mfw_ext_maps + i);
243 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++)
244 qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i);
247 static int __init qed_init(void)
249 pr_info("%s", version);
251 qed_mfw_speed_maps_init();
255 module_init(qed_init);
257 static void __exit qed_exit(void)
259 /* To prevent marking this module as "permanent" */
261 module_exit(qed_exit);
263 /* Check if the DMA controller on the machine can properly handle the DMA
264 * addressing required by the device.
266 static int qed_set_coherency_mask(struct qed_dev *cdev)
268 struct device *dev = &cdev->pdev->dev;
270 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
271 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
273 "Can't request 64-bit consistent allocations\n");
276 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
277 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
284 static void qed_free_pci(struct qed_dev *cdev)
286 struct pci_dev *pdev = cdev->pdev;
288 pci_disable_pcie_error_reporting(pdev);
290 if (cdev->doorbells && cdev->db_size)
291 iounmap(cdev->doorbells);
293 iounmap(cdev->regview);
294 if (atomic_read(&pdev->enable_cnt) == 1)
295 pci_release_regions(pdev);
297 pci_disable_device(pdev);
300 #define PCI_REVISION_ID_ERROR_VAL 0xff
302 /* Performs PCI initializations as well as initializing PCI-related parameters
303 * in the device structrue. Returns 0 in case of success.
305 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
312 rc = pci_enable_device(pdev);
314 DP_NOTICE(cdev, "Cannot enable PCI device\n");
318 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
319 DP_NOTICE(cdev, "No memory region found in bar #0\n");
324 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
325 DP_NOTICE(cdev, "No memory region found in bar #2\n");
330 if (atomic_read(&pdev->enable_cnt) == 1) {
331 rc = pci_request_regions(pdev, "qed");
334 "Failed to request PCI memory resources\n");
337 pci_set_master(pdev);
338 pci_save_state(pdev);
341 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
342 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
344 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
349 if (!pci_is_pcie(pdev)) {
350 DP_NOTICE(cdev, "The bus is not PCI Express\n");
355 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
356 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
357 DP_NOTICE(cdev, "Cannot find power management capability\n");
359 rc = qed_set_coherency_mask(cdev);
363 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
364 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
365 cdev->pci_params.irq = pdev->irq;
367 cdev->regview = pci_ioremap_bar(pdev, 0);
368 if (!cdev->regview) {
369 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
374 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
375 cdev->db_size = pci_resource_len(cdev->pdev, 2);
376 if (!cdev->db_size) {
378 DP_NOTICE(cdev, "No Doorbell bar available\n");
385 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
387 if (!cdev->doorbells) {
388 DP_NOTICE(cdev, "Cannot map doorbell space\n");
392 /* AER (Advanced Error reporting) configuration */
393 rc = pci_enable_pcie_error_reporting(pdev);
395 DP_VERBOSE(cdev, NETIF_MSG_DRV,
396 "Failed to configure PCIe AER [%d]\n", rc);
401 pci_release_regions(pdev);
403 pci_disable_device(pdev);
408 int qed_fill_dev_info(struct qed_dev *cdev,
409 struct qed_dev_info *dev_info)
411 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
412 struct qed_hw_info *hw_info = &p_hwfn->hw_info;
413 struct qed_tunnel_info *tun = &cdev->tunnel;
416 memset(dev_info, 0, sizeof(struct qed_dev_info));
418 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
419 tun->vxlan.b_mode_enabled)
420 dev_info->vxlan_enable = true;
422 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
423 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
424 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
425 dev_info->gre_enable = true;
427 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
428 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
429 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
430 dev_info->geneve_enable = true;
432 dev_info->num_hwfns = cdev->num_hwfns;
433 dev_info->pci_mem_start = cdev->pci_params.mem_start;
434 dev_info->pci_mem_end = cdev->pci_params.mem_end;
435 dev_info->pci_irq = cdev->pci_params.irq;
436 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
437 dev_info->dev_type = cdev->type;
438 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
441 dev_info->fw_major = FW_MAJOR_VERSION;
442 dev_info->fw_minor = FW_MINOR_VERSION;
443 dev_info->fw_rev = FW_REVISION_VERSION;
444 dev_info->fw_eng = FW_ENGINEERING_VERSION;
445 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
447 if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
448 dev_info->b_arfs_capable = true;
449 dev_info->tx_switching = true;
451 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
452 dev_info->wol_support = true;
454 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
456 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
458 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
459 &dev_info->fw_minor, &dev_info->fw_rev,
464 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
466 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
467 &dev_info->mfw_rev, NULL);
469 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
470 &dev_info->mbi_version);
472 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
473 &dev_info->flash_size);
475 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
478 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
479 &dev_info->mfw_rev, NULL);
482 dev_info->mtu = hw_info->mtu;
483 cdev->common_dev_info = *dev_info;
488 static void qed_free_cdev(struct qed_dev *cdev)
493 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
495 struct qed_dev *cdev;
497 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
501 qed_init_struct(cdev);
506 /* Sets the requested power state */
507 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
512 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
517 static struct qed_dev *qed_probe(struct pci_dev *pdev,
518 struct qed_probe_params *params)
520 struct qed_dev *cdev;
523 cdev = qed_alloc_cdev(pdev);
527 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
528 cdev->protocol = params->protocol;
531 cdev->b_is_vf = true;
533 qed_init_dp(cdev, params->dp_module, params->dp_level);
535 cdev->recov_in_prog = params->recov_in_prog;
537 rc = qed_init_pci(cdev, pdev);
539 DP_ERR(cdev, "init pci failed\n");
542 DP_INFO(cdev, "PCI init completed successfully\n");
544 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
546 DP_ERR(cdev, "hw prepare failed\n");
550 DP_INFO(cdev, "qed_probe completed successfully\n");
562 static void qed_remove(struct qed_dev *cdev)
571 qed_set_power_state(cdev, PCI_D3hot);
576 static void qed_disable_msix(struct qed_dev *cdev)
578 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
579 pci_disable_msix(cdev->pdev);
580 kfree(cdev->int_params.msix_table);
581 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
582 pci_disable_msi(cdev->pdev);
585 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
588 static int qed_enable_msix(struct qed_dev *cdev,
589 struct qed_int_params *int_params)
593 cnt = int_params->in.num_vectors;
595 for (i = 0; i < cnt; i++)
596 int_params->msix_table[i].entry = i;
598 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
599 int_params->in.min_msix_cnt, cnt);
600 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
601 (rc % cdev->num_hwfns)) {
602 pci_disable_msix(cdev->pdev);
604 /* If fastpath is initialized, we need at least one interrupt
605 * per hwfn [and the slow path interrupts]. New requested number
606 * should be a multiple of the number of hwfns.
608 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
610 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
611 cnt, int_params->in.num_vectors);
612 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
618 /* For VFs, we should return with an error in case we didn't get the
619 * exact number of msix vectors as we requested.
620 * Not doing that will lead to a crash when starting queues for
623 if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
624 /* MSI-x configuration was achieved */
625 int_params->out.int_mode = QED_INT_MODE_MSIX;
626 int_params->out.num_vectors = rc;
630 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
637 /* This function outputs the int mode and the number of enabled msix vector */
638 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
640 struct qed_int_params *int_params = &cdev->int_params;
641 struct msix_entry *tbl;
644 switch (int_params->in.int_mode) {
645 case QED_INT_MODE_MSIX:
646 /* Allocate MSIX table */
647 cnt = int_params->in.num_vectors;
648 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
649 if (!int_params->msix_table) {
655 rc = qed_enable_msix(cdev, int_params);
659 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
660 kfree(int_params->msix_table);
665 case QED_INT_MODE_MSI:
666 if (cdev->num_hwfns == 1) {
667 rc = pci_enable_msi(cdev->pdev);
669 int_params->out.int_mode = QED_INT_MODE_MSI;
673 DP_NOTICE(cdev, "Failed to enable MSI\n");
679 case QED_INT_MODE_INTA:
680 int_params->out.int_mode = QED_INT_MODE_INTA;
684 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
685 int_params->in.int_mode);
691 DP_INFO(cdev, "Using %s interrupts\n",
692 int_params->out.int_mode == QED_INT_MODE_INTA ?
693 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
695 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
700 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
701 int index, void(*handler)(void *))
703 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
704 int relative_idx = index / cdev->num_hwfns;
706 hwfn->simd_proto_handler[relative_idx].func = handler;
707 hwfn->simd_proto_handler[relative_idx].token = token;
710 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
712 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
713 int relative_idx = index / cdev->num_hwfns;
715 memset(&hwfn->simd_proto_handler[relative_idx], 0,
716 sizeof(struct qed_simd_fp_handler));
719 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
721 tasklet_schedule((struct tasklet_struct *)tasklet);
725 static irqreturn_t qed_single_int(int irq, void *dev_instance)
727 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
728 struct qed_hwfn *hwfn;
729 irqreturn_t rc = IRQ_NONE;
733 for (i = 0; i < cdev->num_hwfns; i++) {
734 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
739 hwfn = &cdev->hwfns[i];
741 /* Slowpath interrupt */
742 if (unlikely(status & 0x1)) {
743 tasklet_schedule(&hwfn->sp_dpc);
748 /* Fastpath interrupts */
749 for (j = 0; j < 64; j++) {
750 if ((0x2ULL << j) & status) {
751 struct qed_simd_fp_handler *p_handler =
752 &hwfn->simd_proto_handler[j];
755 p_handler->func(p_handler->token);
758 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
761 status &= ~(0x2ULL << j);
766 if (unlikely(status))
767 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
768 "got an unknown interrupt status 0x%llx\n",
775 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
777 struct qed_dev *cdev = hwfn->cdev;
782 int_mode = cdev->int_params.out.int_mode;
783 if (int_mode == QED_INT_MODE_MSIX) {
785 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
786 id, cdev->pdev->bus->number,
787 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
788 rc = request_irq(cdev->int_params.msix_table[id].vector,
789 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
791 unsigned long flags = 0;
793 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
794 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
795 PCI_FUNC(cdev->pdev->devfn));
797 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
798 flags |= IRQF_SHARED;
800 rc = request_irq(cdev->pdev->irq, qed_single_int,
801 flags, cdev->name, cdev);
805 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
807 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
808 "Requested slowpath %s\n",
809 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
814 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
816 /* Calling the disable function will make sure that any
817 * currently-running function is completed. The following call to the
818 * enable function makes this sequence a flush-like operation.
820 if (p_hwfn->b_sp_dpc_enabled) {
821 tasklet_disable(&p_hwfn->sp_dpc);
822 tasklet_enable(&p_hwfn->sp_dpc);
826 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
828 struct qed_dev *cdev = p_hwfn->cdev;
829 u8 id = p_hwfn->my_id;
832 int_mode = cdev->int_params.out.int_mode;
833 if (int_mode == QED_INT_MODE_MSIX)
834 synchronize_irq(cdev->int_params.msix_table[id].vector);
836 synchronize_irq(cdev->pdev->irq);
838 qed_slowpath_tasklet_flush(p_hwfn);
841 static void qed_slowpath_irq_free(struct qed_dev *cdev)
845 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
846 for_each_hwfn(cdev, i) {
847 if (!cdev->hwfns[i].b_int_requested)
849 synchronize_irq(cdev->int_params.msix_table[i].vector);
850 free_irq(cdev->int_params.msix_table[i].vector,
851 &cdev->hwfns[i].sp_dpc);
854 if (QED_LEADING_HWFN(cdev)->b_int_requested)
855 free_irq(cdev->pdev->irq, cdev);
857 qed_int_disable_post_isr_release(cdev);
860 static int qed_nic_stop(struct qed_dev *cdev)
864 rc = qed_hw_stop(cdev);
866 for (i = 0; i < cdev->num_hwfns; i++) {
867 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
869 if (p_hwfn->b_sp_dpc_enabled) {
870 tasklet_disable(&p_hwfn->sp_dpc);
871 p_hwfn->b_sp_dpc_enabled = false;
872 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
873 "Disabled sp tasklet [hwfn %d] at %p\n",
878 qed_dbg_pf_exit(cdev);
883 static int qed_nic_setup(struct qed_dev *cdev)
887 /* Determine if interface is going to require LL2 */
888 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
889 for (i = 0; i < cdev->num_hwfns; i++) {
890 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
892 p_hwfn->using_ll2 = true;
896 rc = qed_resc_alloc(cdev);
900 DP_INFO(cdev, "Allocated qed resources\n");
902 qed_resc_setup(cdev);
907 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
911 /* Mark the fastpath as free/used */
912 cdev->int_params.fp_initialized = cnt ? true : false;
914 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
915 limit = cdev->num_hwfns * 63;
916 else if (cdev->int_params.fp_msix_cnt)
917 limit = cdev->int_params.fp_msix_cnt;
922 return min_t(int, cnt, limit);
925 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
927 memset(info, 0, sizeof(struct qed_int_info));
929 if (!cdev->int_params.fp_initialized) {
931 "Protocol driver requested interrupt information, but its support is not yet configured\n");
935 /* Need to expose only MSI-X information; Single IRQ is handled solely
938 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
939 int msix_base = cdev->int_params.fp_msix_base;
941 info->msix_cnt = cdev->int_params.fp_msix_cnt;
942 info->msix = &cdev->int_params.msix_table[msix_base];
948 static int qed_slowpath_setup_int(struct qed_dev *cdev,
949 enum qed_int_mode int_mode)
951 struct qed_sb_cnt_info sb_cnt_info;
952 int num_l2_queues = 0;
956 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
957 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
961 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
962 cdev->int_params.in.int_mode = int_mode;
963 for_each_hwfn(cdev, i) {
964 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
965 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
966 cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
967 cdev->int_params.in.num_vectors++; /* slowpath */
970 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
971 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
973 if (is_kdump_kernel()) {
975 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
976 cdev->int_params.in.min_msix_cnt);
977 cdev->int_params.in.num_vectors =
978 cdev->int_params.in.min_msix_cnt;
981 rc = qed_set_int_mode(cdev, false);
983 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
987 cdev->int_params.fp_msix_base = cdev->num_hwfns;
988 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
991 if (!IS_ENABLED(CONFIG_QED_RDMA) ||
992 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
995 for_each_hwfn(cdev, i)
996 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
998 DP_VERBOSE(cdev, QED_MSG_RDMA,
999 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
1000 cdev->int_params.fp_msix_cnt, num_l2_queues);
1002 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
1003 cdev->int_params.rdma_msix_cnt =
1004 (cdev->int_params.fp_msix_cnt - num_l2_queues)
1006 cdev->int_params.rdma_msix_base =
1007 cdev->int_params.fp_msix_base + num_l2_queues;
1008 cdev->int_params.fp_msix_cnt = num_l2_queues;
1010 cdev->int_params.rdma_msix_cnt = 0;
1013 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
1014 cdev->int_params.rdma_msix_cnt,
1015 cdev->int_params.rdma_msix_base);
1020 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
1024 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
1025 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
1027 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
1028 &cdev->int_params.in.num_vectors);
1029 if (cdev->num_hwfns > 1) {
1032 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
1033 cdev->int_params.in.num_vectors += vectors;
1036 /* We want a minimum of one fastpath vector per vf hwfn */
1037 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
1039 rc = qed_set_int_mode(cdev, true);
1043 cdev->int_params.fp_msix_base = 0;
1044 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
1049 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
1050 u8 *input_buf, u32 max_size, u8 *unzip_buf)
1054 p_hwfn->stream->next_in = input_buf;
1055 p_hwfn->stream->avail_in = input_len;
1056 p_hwfn->stream->next_out = unzip_buf;
1057 p_hwfn->stream->avail_out = max_size;
1059 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
1062 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1067 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1068 zlib_inflateEnd(p_hwfn->stream);
1070 if (rc != Z_OK && rc != Z_STREAM_END) {
1071 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1072 p_hwfn->stream->msg, rc);
1076 return p_hwfn->stream->total_out / 4;
1079 static int qed_alloc_stream_mem(struct qed_dev *cdev)
1084 for_each_hwfn(cdev, i) {
1085 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1087 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1088 if (!p_hwfn->stream)
1091 workspace = vzalloc(zlib_inflate_workspacesize());
1094 p_hwfn->stream->workspace = workspace;
1100 static void qed_free_stream_mem(struct qed_dev *cdev)
1104 for_each_hwfn(cdev, i) {
1105 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1107 if (!p_hwfn->stream)
1110 vfree(p_hwfn->stream->workspace);
1111 kfree(p_hwfn->stream);
1115 static void qed_update_pf_params(struct qed_dev *cdev,
1116 struct qed_pf_params *params)
1120 if (IS_ENABLED(CONFIG_QED_RDMA)) {
1121 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1122 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1123 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1124 /* divide by 3 the MRs to avoid MF ILT overflow */
1125 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1128 if (cdev->num_hwfns > 1 || IS_VF(cdev))
1129 params->eth_pf_params.num_arfs_filters = 0;
1131 /* In case we might support RDMA, don't allow qede to be greedy
1132 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1135 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1138 num_cons = ¶ms->eth_pf_params.num_cons;
1139 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1142 for (i = 0; i < cdev->num_hwfns; i++) {
1143 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1145 p_hwfn->pf_params = *params;
1149 #define QED_PERIODIC_DB_REC_COUNT 10
1150 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100
1151 #define QED_PERIODIC_DB_REC_INTERVAL \
1152 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1154 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1155 enum qed_slowpath_wq_flag wq_flag,
1156 unsigned long delay)
1158 if (!hwfn->slowpath_wq_active)
1161 /* Memory barrier for setting atomic bit */
1162 smp_mb__before_atomic();
1163 set_bit(wq_flag, &hwfn->slowpath_task_flags);
1164 smp_mb__after_atomic();
1165 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1170 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1172 /* Reset periodic Doorbell Recovery counter */
1173 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1175 /* Don't schedule periodic Doorbell Recovery if already scheduled */
1176 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1177 &p_hwfn->slowpath_task_flags))
1180 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1181 QED_PERIODIC_DB_REC_INTERVAL);
1184 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1191 for_each_hwfn(cdev, i) {
1192 if (!cdev->hwfns[i].slowpath_wq)
1195 /* Stop queuing new delayed works */
1196 cdev->hwfns[i].slowpath_wq_active = false;
1198 cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
1199 destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1203 static void qed_slowpath_task(struct work_struct *work)
1205 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1206 slowpath_task.work);
1207 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1210 if (hwfn->slowpath_wq_active)
1211 queue_delayed_work(hwfn->slowpath_wq,
1212 &hwfn->slowpath_task, 0);
1217 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1218 &hwfn->slowpath_task_flags))
1219 qed_mfw_process_tlv_req(hwfn, ptt);
1221 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1222 &hwfn->slowpath_task_flags)) {
1223 /* skip qed_db_rec_handler during recovery/unload */
1224 if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active)
1227 qed_db_rec_handler(hwfn, ptt);
1228 if (hwfn->periodic_db_rec_count--)
1229 qed_slowpath_delayed_work(hwfn,
1230 QED_SLOWPATH_PERIODIC_DB_REC,
1231 QED_PERIODIC_DB_REC_INTERVAL);
1235 qed_ptt_release(hwfn, ptt);
1238 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1240 struct qed_hwfn *hwfn;
1241 char name[NAME_SIZE];
1247 for_each_hwfn(cdev, i) {
1248 hwfn = &cdev->hwfns[i];
1250 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1251 cdev->pdev->bus->number,
1252 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1254 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1255 if (!hwfn->slowpath_wq) {
1256 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1260 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1261 hwfn->slowpath_wq_active = true;
1267 static int qed_slowpath_start(struct qed_dev *cdev,
1268 struct qed_slowpath_params *params)
1270 struct qed_drv_load_params drv_load_params;
1271 struct qed_hw_init_params hw_init_params;
1272 struct qed_mcp_drv_version drv_version;
1273 struct qed_tunnel_info tunn_info;
1274 const u8 *data = NULL;
1275 struct qed_hwfn *hwfn;
1276 struct qed_ptt *p_ptt;
1279 if (qed_iov_wq_start(cdev))
1282 if (qed_slowpath_wq_start(cdev))
1286 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1290 "Failed to find fw file - /lib/firmware/%s\n",
1295 if (cdev->num_hwfns == 1) {
1296 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1298 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1301 "Failed to acquire PTT for aRFS\n");
1307 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1308 rc = qed_nic_setup(cdev);
1313 rc = qed_slowpath_setup_int(cdev, params->int_mode);
1315 rc = qed_slowpath_vf_setup_int(cdev);
1320 /* Allocate stream for unzipping */
1321 rc = qed_alloc_stream_mem(cdev);
1325 /* First Dword used to differentiate between various sources */
1326 data = cdev->firmware->data + sizeof(u32);
1328 qed_dbg_pf_init(cdev);
1331 /* Start the slowpath */
1332 memset(&hw_init_params, 0, sizeof(hw_init_params));
1333 memset(&tunn_info, 0, sizeof(tunn_info));
1334 tunn_info.vxlan.b_mode_enabled = true;
1335 tunn_info.l2_gre.b_mode_enabled = true;
1336 tunn_info.ip_gre.b_mode_enabled = true;
1337 tunn_info.l2_geneve.b_mode_enabled = true;
1338 tunn_info.ip_geneve.b_mode_enabled = true;
1339 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1340 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1341 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1342 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1343 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1344 hw_init_params.p_tunn = &tunn_info;
1345 hw_init_params.b_hw_start = true;
1346 hw_init_params.int_mode = cdev->int_params.out.int_mode;
1347 hw_init_params.allow_npar_tx_switch = true;
1348 hw_init_params.bin_fw_data = data;
1350 memset(&drv_load_params, 0, sizeof(drv_load_params));
1351 drv_load_params.is_crash_kernel = is_kdump_kernel();
1352 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1353 drv_load_params.avoid_eng_reset = false;
1354 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1355 hw_init_params.p_drv_load_params = &drv_load_params;
1357 rc = qed_hw_init(cdev, &hw_init_params);
1362 "HW initialization and function start completed successfully\n");
1365 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1366 BIT(QED_MODE_L2GENEVE_TUNN) |
1367 BIT(QED_MODE_IPGENEVE_TUNN) |
1368 BIT(QED_MODE_L2GRE_TUNN) |
1369 BIT(QED_MODE_IPGRE_TUNN));
1372 /* Allocate LL2 interface if needed */
1373 if (QED_LEADING_HWFN(cdev)->using_ll2) {
1374 rc = qed_ll2_alloc_if(cdev);
1379 hwfn = QED_LEADING_HWFN(cdev);
1380 drv_version.version = (params->drv_major << 24) |
1381 (params->drv_minor << 16) |
1382 (params->drv_rev << 8) |
1384 strlcpy(drv_version.name, params->name,
1385 MCP_DRV_VER_STR_SIZE - 4);
1386 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1389 DP_NOTICE(cdev, "Failed sending drv version command\n");
1394 qed_reset_vport_stats(cdev);
1399 qed_ll2_dealloc_if(cdev);
1403 qed_hw_timers_stop_all(cdev);
1405 qed_slowpath_irq_free(cdev);
1406 qed_free_stream_mem(cdev);
1407 qed_disable_msix(cdev);
1409 qed_resc_free(cdev);
1412 release_firmware(cdev->firmware);
1414 if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1415 QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1416 qed_ptt_release(QED_LEADING_HWFN(cdev),
1417 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1419 qed_iov_wq_stop(cdev, false);
1421 qed_slowpath_wq_stop(cdev);
1426 static int qed_slowpath_stop(struct qed_dev *cdev)
1431 qed_slowpath_wq_stop(cdev);
1433 qed_ll2_dealloc_if(cdev);
1436 if (cdev->num_hwfns == 1)
1437 qed_ptt_release(QED_LEADING_HWFN(cdev),
1438 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1439 qed_free_stream_mem(cdev);
1440 if (IS_QED_ETH_IF(cdev))
1441 qed_sriov_disable(cdev, true);
1447 qed_slowpath_irq_free(cdev);
1449 qed_disable_msix(cdev);
1451 qed_resc_free(cdev);
1453 qed_iov_wq_stop(cdev, true);
1456 release_firmware(cdev->firmware);
1461 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1465 memcpy(cdev->name, name, NAME_SIZE);
1466 for_each_hwfn(cdev, i)
1467 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1470 static u32 qed_sb_init(struct qed_dev *cdev,
1471 struct qed_sb_info *sb_info,
1473 dma_addr_t sb_phy_addr, u16 sb_id,
1474 enum qed_sb_type type)
1476 struct qed_hwfn *p_hwfn;
1477 struct qed_ptt *p_ptt;
1481 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1482 if (type == QED_SB_TYPE_L2_QUEUE) {
1483 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1484 rel_sb_id = sb_id / cdev->num_hwfns;
1486 p_hwfn = QED_AFFIN_HWFN(cdev);
1490 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1491 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1492 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1494 if (IS_PF(p_hwfn->cdev)) {
1495 p_ptt = qed_ptt_acquire(p_hwfn);
1499 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1500 sb_phy_addr, rel_sb_id);
1501 qed_ptt_release(p_hwfn, p_ptt);
1503 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1504 sb_phy_addr, rel_sb_id);
1510 static u32 qed_sb_release(struct qed_dev *cdev,
1511 struct qed_sb_info *sb_info,
1513 enum qed_sb_type type)
1515 struct qed_hwfn *p_hwfn;
1519 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1520 if (type == QED_SB_TYPE_L2_QUEUE) {
1521 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1522 rel_sb_id = sb_id / cdev->num_hwfns;
1524 p_hwfn = QED_AFFIN_HWFN(cdev);
1528 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1529 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1530 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1532 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1537 static bool qed_can_link_change(struct qed_dev *cdev)
1542 static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params,
1543 const struct qed_link_params *params)
1545 struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed;
1546 const struct qed_mfw_speed_map *map;
1549 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1550 ext_speed->autoneg = !!params->autoneg;
1552 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1553 ext_speed->advertised_speeds = 0;
1555 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) {
1556 map = qed_mfw_ext_maps + i;
1558 if (linkmode_intersects(params->adv_speeds, map->caps))
1559 ext_speed->advertised_speeds |= map->mfw_val;
1563 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) {
1564 switch (params->forced_speed) {
1566 ext_speed->forced_speed = QED_EXT_SPEED_1G;
1569 ext_speed->forced_speed = QED_EXT_SPEED_10G;
1572 ext_speed->forced_speed = QED_EXT_SPEED_20G;
1575 ext_speed->forced_speed = QED_EXT_SPEED_25G;
1578 ext_speed->forced_speed = QED_EXT_SPEED_40G;
1581 ext_speed->forced_speed = QED_EXT_SPEED_50G_R |
1582 QED_EXT_SPEED_50G_R2;
1585 ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 |
1586 QED_EXT_SPEED_100G_R4 |
1587 QED_EXT_SPEED_100G_P4;
1594 if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG))
1597 switch (params->forced_speed) {
1599 switch (params->fec) {
1600 case FEC_FORCE_MODE_NONE:
1601 link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE;
1603 case FEC_FORCE_MODE_FIRECODE:
1604 link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R;
1606 case FEC_FORCE_MODE_RS:
1607 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528;
1609 case FEC_FORCE_MODE_AUTO:
1610 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 |
1611 ETH_EXT_FEC_25G_BASE_R |
1612 ETH_EXT_FEC_25G_NONE;
1620 switch (params->fec) {
1621 case FEC_FORCE_MODE_NONE:
1622 link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE;
1624 case FEC_FORCE_MODE_FIRECODE:
1625 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R;
1627 case FEC_FORCE_MODE_AUTO:
1628 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R |
1629 ETH_EXT_FEC_40G_NONE;
1637 switch (params->fec) {
1638 case FEC_FORCE_MODE_NONE:
1639 link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE;
1641 case FEC_FORCE_MODE_FIRECODE:
1642 link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R;
1644 case FEC_FORCE_MODE_RS:
1645 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528;
1647 case FEC_FORCE_MODE_AUTO:
1648 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 |
1649 ETH_EXT_FEC_50G_BASE_R |
1650 ETH_EXT_FEC_50G_NONE;
1658 switch (params->fec) {
1659 case FEC_FORCE_MODE_NONE:
1660 link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE;
1662 case FEC_FORCE_MODE_FIRECODE:
1663 link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R;
1665 case FEC_FORCE_MODE_RS:
1666 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528;
1668 case FEC_FORCE_MODE_AUTO:
1669 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 |
1670 ETH_EXT_FEC_100G_BASE_R |
1671 ETH_EXT_FEC_100G_NONE;
1683 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1685 struct qed_mcp_link_params *link_params;
1686 struct qed_mcp_link_speed_params *speed;
1687 const struct qed_mfw_speed_map *map;
1688 struct qed_hwfn *hwfn;
1689 struct qed_ptt *ptt;
1696 /* The link should be set only once per PF */
1697 hwfn = &cdev->hwfns[0];
1699 /* When VF wants to set link, force it to read the bulletin instead.
1700 * This mimics the PF behavior, where a noitification [both immediate
1701 * and possible later] would be generated when changing properties.
1704 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1708 ptt = qed_ptt_acquire(hwfn);
1712 link_params = qed_mcp_get_link_params(hwfn);
1716 speed = &link_params->speed;
1718 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1719 speed->autoneg = !!params->autoneg;
1721 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1722 speed->advertised_speeds = 0;
1724 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
1725 map = qed_mfw_legacy_maps + i;
1727 if (linkmode_intersects(params->adv_speeds, map->caps))
1728 speed->advertised_speeds |= map->mfw_val;
1732 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1733 speed->forced_speed = params->forced_speed;
1735 if (qed_mcp_is_ext_speed_supported(hwfn))
1736 qed_set_ext_speed_params(link_params, params);
1738 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1739 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1740 link_params->pause.autoneg = true;
1742 link_params->pause.autoneg = false;
1743 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1744 link_params->pause.forced_rx = true;
1746 link_params->pause.forced_rx = false;
1747 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1748 link_params->pause.forced_tx = true;
1750 link_params->pause.forced_tx = false;
1753 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1754 switch (params->loopback_mode) {
1755 case QED_LINK_LOOPBACK_INT_PHY:
1756 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1758 case QED_LINK_LOOPBACK_EXT_PHY:
1759 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1761 case QED_LINK_LOOPBACK_EXT:
1762 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1764 case QED_LINK_LOOPBACK_MAC:
1765 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1767 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123:
1768 link_params->loopback_mode =
1769 ETH_LOOPBACK_CNIG_AH_ONLY_0123;
1771 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301:
1772 link_params->loopback_mode =
1773 ETH_LOOPBACK_CNIG_AH_ONLY_2301;
1775 case QED_LINK_LOOPBACK_PCS_AH_ONLY:
1776 link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY;
1778 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY:
1779 link_params->loopback_mode =
1780 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY;
1782 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY:
1783 link_params->loopback_mode =
1784 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY;
1787 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1792 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1793 memcpy(&link_params->eee, ¶ms->eee,
1794 sizeof(link_params->eee));
1796 if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
1797 link_params->fec = params->fec;
1799 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1801 qed_ptt_release(hwfn, ptt);
1806 static int qed_get_port_type(u32 media_type)
1810 switch (media_type) {
1811 case MEDIA_SFPP_10G_FIBER:
1812 case MEDIA_SFP_1G_FIBER:
1813 case MEDIA_XFP_FIBER:
1814 case MEDIA_MODULE_FIBER:
1815 port_type = PORT_FIBRE;
1817 case MEDIA_DA_TWINAX:
1818 port_type = PORT_DA;
1821 port_type = PORT_TP;
1824 case MEDIA_NOT_PRESENT:
1825 port_type = PORT_NONE;
1827 case MEDIA_UNSPECIFIED:
1829 port_type = PORT_OTHER;
1835 static int qed_get_link_data(struct qed_hwfn *hwfn,
1836 struct qed_mcp_link_params *params,
1837 struct qed_mcp_link_state *link,
1838 struct qed_mcp_link_capabilities *link_caps)
1842 if (!IS_PF(hwfn->cdev)) {
1843 qed_vf_get_link_params(hwfn, params);
1844 qed_vf_get_link_state(hwfn, link);
1845 qed_vf_get_link_caps(hwfn, link_caps);
1850 p = qed_mcp_get_link_params(hwfn);
1853 memcpy(params, p, sizeof(*params));
1855 p = qed_mcp_get_link_state(hwfn);
1858 memcpy(link, p, sizeof(*link));
1860 p = qed_mcp_get_link_capabilities(hwfn);
1863 memcpy(link_caps, p, sizeof(*link_caps));
1868 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1869 struct qed_ptt *ptt, u32 capability,
1870 unsigned long *if_caps)
1872 u32 media_type, tcvr_state, tcvr_type;
1873 u32 speed_mask, board_cfg;
1875 if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1876 media_type = MEDIA_UNSPECIFIED;
1878 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1879 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1881 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1882 speed_mask = 0xFFFFFFFF;
1884 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1885 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1887 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1888 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1889 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1891 switch (media_type) {
1892 case MEDIA_DA_TWINAX:
1893 phylink_set(if_caps, FIBRE);
1895 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1896 phylink_set(if_caps, 20000baseKR2_Full);
1898 /* For DAC media multiple speed capabilities are supported */
1899 capability |= speed_mask;
1901 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1902 phylink_set(if_caps, 1000baseKX_Full);
1903 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1904 phylink_set(if_caps, 10000baseCR_Full);
1906 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1907 switch (tcvr_type) {
1908 case ETH_TRANSCEIVER_TYPE_40G_CR4:
1909 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
1910 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1911 phylink_set(if_caps, 40000baseCR4_Full);
1917 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1918 phylink_set(if_caps, 25000baseCR_Full);
1919 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1920 phylink_set(if_caps, 50000baseCR2_Full);
1923 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1924 switch (tcvr_type) {
1925 case ETH_TRANSCEIVER_TYPE_100G_CR4:
1926 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1927 phylink_set(if_caps, 100000baseCR4_Full);
1935 phylink_set(if_caps, TP);
1937 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1939 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1940 phylink_set(if_caps, 1000baseT_Full);
1942 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1943 phylink_set(if_caps, 10000baseT_Full);
1946 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1947 phylink_set(if_caps, FIBRE);
1949 switch (tcvr_type) {
1950 case ETH_TRANSCEIVER_TYPE_1000BASET:
1951 phylink_set(if_caps, 1000baseT_Full);
1953 case ETH_TRANSCEIVER_TYPE_10G_BASET:
1954 phylink_set(if_caps, 10000baseT_Full);
1962 case MEDIA_SFP_1G_FIBER:
1963 case MEDIA_SFPP_10G_FIBER:
1964 case MEDIA_XFP_FIBER:
1965 case MEDIA_MODULE_FIBER:
1966 phylink_set(if_caps, FIBRE);
1967 capability |= speed_mask;
1969 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1970 switch (tcvr_type) {
1971 case ETH_TRANSCEIVER_TYPE_1G_LX:
1972 case ETH_TRANSCEIVER_TYPE_1G_SX:
1973 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1974 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1975 phylink_set(if_caps, 1000baseKX_Full);
1981 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1982 switch (tcvr_type) {
1983 case ETH_TRANSCEIVER_TYPE_10G_SR:
1984 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
1985 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1986 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1987 phylink_set(if_caps, 10000baseSR_Full);
1989 case ETH_TRANSCEIVER_TYPE_10G_LR:
1990 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
1991 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
1992 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1993 phylink_set(if_caps, 10000baseLR_Full);
1995 case ETH_TRANSCEIVER_TYPE_10G_LRM:
1996 phylink_set(if_caps, 10000baseLRM_Full);
1998 case ETH_TRANSCEIVER_TYPE_10G_ER:
1999 phylink_set(if_caps, 10000baseR_FEC);
2005 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2006 phylink_set(if_caps, 20000baseKR2_Full);
2008 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2009 switch (tcvr_type) {
2010 case ETH_TRANSCEIVER_TYPE_25G_SR:
2011 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2012 phylink_set(if_caps, 25000baseSR_Full);
2018 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2019 switch (tcvr_type) {
2020 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2021 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2022 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2023 phylink_set(if_caps, 40000baseLR4_Full);
2025 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2026 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2027 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2028 phylink_set(if_caps, 40000baseSR4_Full);
2034 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2035 phylink_set(if_caps, 50000baseKR2_Full);
2038 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2039 switch (tcvr_type) {
2040 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2041 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2042 phylink_set(if_caps, 100000baseSR4_Full);
2044 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2045 phylink_set(if_caps, 100000baseLR4_ER4_Full);
2053 phylink_set(if_caps, Backplane);
2055 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
2056 phylink_set(if_caps, 20000baseKR2_Full);
2057 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
2058 phylink_set(if_caps, 1000baseKX_Full);
2059 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
2060 phylink_set(if_caps, 10000baseKR_Full);
2061 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
2062 phylink_set(if_caps, 25000baseKR_Full);
2063 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
2064 phylink_set(if_caps, 40000baseKR4_Full);
2065 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
2066 phylink_set(if_caps, 50000baseKR2_Full);
2068 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
2069 phylink_set(if_caps, 100000baseKR4_Full);
2072 case MEDIA_UNSPECIFIED:
2073 case MEDIA_NOT_PRESENT:
2075 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
2076 "Unknown media and transceiver type;\n");
2081 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask)
2086 (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD))
2087 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2088 if (caps & QED_LINK_PARTNER_SPEED_10G)
2089 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2090 if (caps & QED_LINK_PARTNER_SPEED_20G)
2091 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
2092 if (caps & QED_LINK_PARTNER_SPEED_25G)
2093 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2094 if (caps & QED_LINK_PARTNER_SPEED_40G)
2095 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2096 if (caps & QED_LINK_PARTNER_SPEED_50G)
2097 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
2098 if (caps & QED_LINK_PARTNER_SPEED_100G)
2099 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
2102 static void qed_fill_link(struct qed_hwfn *hwfn,
2103 struct qed_ptt *ptt,
2104 struct qed_link_output *if_link)
2106 struct qed_mcp_link_capabilities link_caps;
2107 struct qed_mcp_link_params params;
2108 struct qed_mcp_link_state link;
2109 u32 media_type, speed_mask;
2111 memset(if_link, 0, sizeof(*if_link));
2113 /* Prepare source inputs */
2114 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
2115 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
2119 /* Set the link parameters to pass to protocol driver */
2121 if_link->link_up = true;
2123 if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) {
2124 if (link_caps.default_ext_autoneg)
2125 phylink_set(if_link->supported_caps, Autoneg);
2127 linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2129 if (params.ext_speed.autoneg)
2130 phylink_set(if_link->advertised_caps, Autoneg);
2132 phylink_clear(if_link->advertised_caps, Autoneg);
2134 qed_fill_link_capability(hwfn, ptt,
2135 params.ext_speed.advertised_speeds,
2136 if_link->advertised_caps);
2138 if (link_caps.default_speed_autoneg)
2139 phylink_set(if_link->supported_caps, Autoneg);
2141 linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
2143 if (params.speed.autoneg)
2144 phylink_set(if_link->advertised_caps, Autoneg);
2146 phylink_clear(if_link->advertised_caps, Autoneg);
2149 if (params.pause.autoneg ||
2150 (params.pause.forced_rx && params.pause.forced_tx))
2151 phylink_set(if_link->supported_caps, Asym_Pause);
2152 if (params.pause.autoneg || params.pause.forced_rx ||
2153 params.pause.forced_tx)
2154 phylink_set(if_link->supported_caps, Pause);
2156 if_link->sup_fec = link_caps.fec_default;
2157 if_link->active_fec = params.fec;
2159 /* Fill link advertised capability */
2160 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
2161 if_link->advertised_caps);
2163 /* Fill link supported capability */
2164 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
2165 if_link->supported_caps);
2167 /* Fill partner advertised capability */
2168 qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask);
2169 qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps);
2172 if_link->speed = link.speed;
2174 /* TODO - fill duplex properly */
2175 if_link->duplex = DUPLEX_FULL;
2176 qed_mcp_get_media_type(hwfn, ptt, &media_type);
2177 if_link->port = qed_get_port_type(media_type);
2179 if_link->autoneg = params.speed.autoneg;
2181 if (params.pause.autoneg)
2182 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
2183 if (params.pause.forced_rx)
2184 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
2185 if (params.pause.forced_tx)
2186 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
2188 if (link.an_complete)
2189 phylink_set(if_link->lp_caps, Autoneg);
2190 if (link.partner_adv_pause)
2191 phylink_set(if_link->lp_caps, Pause);
2192 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
2193 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
2194 phylink_set(if_link->lp_caps, Asym_Pause);
2196 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
2197 if_link->eee_supported = false;
2199 if_link->eee_supported = true;
2200 if_link->eee_active = link.eee_active;
2201 if_link->sup_caps = link_caps.eee_speed_caps;
2202 /* MFW clears adv_caps on eee disable; use configured value */
2203 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
2204 params.eee.adv_caps;
2205 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
2206 if_link->eee.enable = params.eee.enable;
2207 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
2208 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
2212 static void qed_get_current_link(struct qed_dev *cdev,
2213 struct qed_link_output *if_link)
2215 struct qed_hwfn *hwfn;
2216 struct qed_ptt *ptt;
2219 hwfn = &cdev->hwfns[0];
2221 ptt = qed_ptt_acquire(hwfn);
2223 qed_fill_link(hwfn, ptt, if_link);
2224 qed_ptt_release(hwfn, ptt);
2226 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
2229 qed_fill_link(hwfn, NULL, if_link);
2232 for_each_hwfn(cdev, i)
2233 qed_inform_vf_link_state(&cdev->hwfns[i]);
2236 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2238 void *cookie = hwfn->cdev->ops_cookie;
2239 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2240 struct qed_link_output if_link;
2242 qed_fill_link(hwfn, ptt, &if_link);
2243 qed_inform_vf_link_state(hwfn);
2245 if (IS_LEAD_HWFN(hwfn) && cookie)
2246 op->link_update(cookie, &if_link);
2249 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2251 void *cookie = hwfn->cdev->ops_cookie;
2252 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2254 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
2255 op->bw_update(cookie);
2258 static int qed_drain(struct qed_dev *cdev)
2260 struct qed_hwfn *hwfn;
2261 struct qed_ptt *ptt;
2267 for_each_hwfn(cdev, i) {
2268 hwfn = &cdev->hwfns[i];
2269 ptt = qed_ptt_acquire(hwfn);
2271 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
2274 rc = qed_mcp_drain(hwfn, ptt);
2275 qed_ptt_release(hwfn, ptt);
2283 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
2284 struct qed_nvm_image_att *nvm_image,
2290 /* Allocate a buffer for holding the nvram image */
2291 buf = kzalloc(nvm_image->length, GFP_KERNEL);
2295 /* Read image into buffer */
2296 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
2297 buf, nvm_image->length);
2299 DP_ERR(cdev, "Failed reading image from nvm\n");
2303 /* Convert the buffer into big-endian format (excluding the
2304 * closing 4 bytes of CRC).
2306 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
2307 DIV_ROUND_UP(nvm_image->length - 4, 4));
2309 /* Calc CRC for the "actual" image buffer, i.e. not including
2310 * the last 4 CRC bytes.
2312 *crc = ~crc32(~0U, buf, nvm_image->length - 4);
2313 *crc = (__force u32)cpu_to_be32p(crc);
2321 /* Binary file format -
2322 * /----------------------------------------------------------------------\
2323 * 0B | 0x4 [command index] |
2324 * 4B | image_type | Options | Number of register settings |
2328 * \----------------------------------------------------------------------/
2329 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2330 * Options - 0'b - Calculate & Update CRC for image
2332 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2335 struct qed_nvm_image_att nvm_image;
2336 struct qed_hwfn *p_hwfn;
2337 bool is_crc = false;
2343 image_type = **data;
2344 p_hwfn = QED_LEADING_HWFN(cdev);
2345 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2346 if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2348 if (i == p_hwfn->nvm_info.num_images) {
2349 DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2354 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2355 nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2357 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2358 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2359 **data, image_type, nvm_image.start_addr,
2360 nvm_image.start_addr + nvm_image.length - 1);
2362 is_crc = !!(**data & BIT(0));
2364 len = *((u16 *)*data);
2369 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2371 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2375 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2376 (nvm_image.start_addr +
2377 nvm_image.length - 4), (u8 *)&crc, 4);
2379 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2380 nvm_image.start_addr + nvm_image.length - 4, rc);
2384 /* Iterate over the values for setting */
2386 u32 offset, mask, value, cur_value;
2389 value = *((u32 *)*data);
2391 mask = *((u32 *)*data);
2393 offset = *((u32 *)*data);
2396 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2399 DP_ERR(cdev, "Failed reading from %08x\n",
2400 nvm_image.start_addr + offset);
2404 cur_value = le32_to_cpu(*((__le32 *)buf));
2405 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2406 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2407 nvm_image.start_addr + offset, cur_value,
2408 (cur_value & ~mask) | (value & mask), value, mask);
2409 value = (value & mask) | (cur_value & ~mask);
2410 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2411 nvm_image.start_addr + offset,
2414 DP_ERR(cdev, "Failed writing to %08x\n",
2415 nvm_image.start_addr + offset);
2425 /* Binary file format -
2426 * /----------------------------------------------------------------------\
2427 * 0B | 0x3 [command index] |
2428 * 4B | b'0: check_response? | b'1-31 reserved |
2429 * 8B | File-type | reserved |
2430 * 12B | Image length in bytes |
2431 * \----------------------------------------------------------------------/
2432 * Start a new file of the provided type
2434 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2435 const u8 **data, bool *check_resp)
2437 u32 file_type, file_size = 0;
2441 *check_resp = !!(**data & BIT(0));
2445 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2446 "About to start a new file of type %02x\n", file_type);
2447 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2449 file_size = *((u32 *)(*data));
2452 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2453 (u8 *)(&file_size), 4);
2459 /* Binary file format -
2460 * /----------------------------------------------------------------------\
2461 * 0B | 0x2 [command index] |
2462 * 4B | Length in bytes |
2463 * 8B | b'0: check_response? | b'1-31 reserved |
2464 * 12B | Offset in bytes |
2466 * \----------------------------------------------------------------------/
2467 * Write data as part of a file that was previously started. Data should be
2468 * of length equal to that provided in the message
2470 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2471 const u8 **data, bool *check_resp)
2477 len = *((u32 *)(*data));
2479 *check_resp = !!(**data & BIT(0));
2481 offset = *((u32 *)(*data));
2484 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2485 "About to write File-data: %08x bytes to offset %08x\n",
2488 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2489 (char *)(*data), len);
2495 /* Binary file format [General header] -
2496 * /----------------------------------------------------------------------\
2497 * 0B | QED_NVM_SIGNATURE |
2498 * 4B | Length in bytes |
2499 * 8B | Highest command in this batchfile | Reserved |
2500 * \----------------------------------------------------------------------/
2502 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2503 const struct firmware *image,
2508 /* Check minimum size */
2509 if (image->size < 12) {
2510 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2514 /* Check signature */
2515 signature = *((u32 *)(*data));
2516 if (signature != QED_NVM_SIGNATURE) {
2517 DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2522 /* Validate internal size equals the image-size */
2523 len = *((u32 *)(*data));
2524 if (len != image->size) {
2525 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2526 len, (u32)image->size);
2531 /* Make sure driver familiar with all commands necessary for this */
2532 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2533 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2543 /* Binary file format -
2544 * /----------------------------------------------------------------------\
2545 * 0B | 0x5 [command index] |
2546 * 4B | Number of config attributes | Reserved |
2547 * 4B | Config ID | Entity ID | Length |
2550 * \----------------------------------------------------------------------/
2551 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2552 * 'Number of config attributes'.
2554 * The API parses config attributes from the user provided buffer and flashes
2555 * them to the respective NVM path using Management FW inerface.
2557 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2559 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2560 u8 entity_id, len, buf[32];
2561 bool need_nvm_init = true;
2562 struct qed_ptt *ptt;
2567 ptt = qed_ptt_acquire(hwfn);
2571 /* NVM CFG ID attribute header */
2573 count = *((u16 *)*data);
2576 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2577 "Read config ids: num_attrs = %0d\n", count);
2578 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2579 * arithmetic operations in the implementation.
2581 for (i = 1; i <= count; i++) {
2582 cfg_id = *((u16 *)*data);
2588 memcpy(buf, *data, len);
2592 if (need_nvm_init) {
2593 flags |= QED_NVM_CFG_OPTION_INIT;
2594 need_nvm_init = false;
2597 /* Commit to flash and free the resources */
2598 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2599 flags |= QED_NVM_CFG_OPTION_COMMIT |
2600 QED_NVM_CFG_OPTION_FREE;
2601 need_nvm_init = true;
2605 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2607 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2608 "cfg_id = %d entity = %d len = %d\n", cfg_id,
2610 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2613 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2618 qed_ptt_release(hwfn, ptt);
2623 #define QED_MAX_NVM_BUF_LEN 32
2624 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2626 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2627 u8 buf[QED_MAX_NVM_BUF_LEN];
2628 struct qed_ptt *ptt;
2632 ptt = qed_ptt_acquire(hwfn);
2634 return QED_MAX_NVM_BUF_LEN;
2636 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2639 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2640 len = QED_MAX_NVM_BUF_LEN;
2643 qed_ptt_release(hwfn, ptt);
2648 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2649 u32 cmd, u32 entity_id)
2651 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2652 struct qed_ptt *ptt;
2656 ptt = qed_ptt_acquire(hwfn);
2660 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2661 "Read config cmd = %d entity id %d\n", cmd, entity_id);
2662 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2663 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2665 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2667 qed_ptt_release(hwfn, ptt);
2672 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2674 const struct firmware *image;
2675 const u8 *data, *data_end;
2679 rc = request_firmware(&image, name, &cdev->pdev->dev);
2681 DP_ERR(cdev, "Failed to find '%s'\n", name);
2685 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2686 "Flashing '%s' - firmware's data at %p, size is %08x\n",
2687 name, image->data, (u32)image->size);
2689 data_end = data + image->size;
2691 rc = qed_nvm_flash_image_validate(cdev, image, &data);
2695 while (data < data_end) {
2696 bool check_resp = false;
2698 /* Parse the actual command */
2699 cmd_type = *((u32 *)data);
2701 case QED_NVM_FLASH_CMD_FILE_DATA:
2702 rc = qed_nvm_flash_image_file_data(cdev, &data,
2705 case QED_NVM_FLASH_CMD_FILE_START:
2706 rc = qed_nvm_flash_image_file_start(cdev, &data,
2709 case QED_NVM_FLASH_CMD_NVM_CHANGE:
2710 rc = qed_nvm_flash_image_access(cdev, &data,
2713 case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2714 rc = qed_nvm_flash_cfg_write(cdev, &data);
2717 DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2723 DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2727 /* Check response if needed */
2729 u32 mcp_response = 0;
2731 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2732 DP_ERR(cdev, "Failed getting MCP response\n");
2737 switch (mcp_response & FW_MSG_CODE_MASK) {
2738 case FW_MSG_CODE_OK:
2739 case FW_MSG_CODE_NVM_OK:
2740 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2741 case FW_MSG_CODE_PHY_OK:
2744 DP_ERR(cdev, "MFW returns error: %08x\n",
2753 release_firmware(image);
2758 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2761 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2763 return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2766 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2768 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2769 void *cookie = p_hwfn->cdev->ops_cookie;
2771 if (ops && ops->schedule_recovery_handler)
2772 ops->schedule_recovery_handler(cookie);
2775 static const char * const qed_hw_err_type_descr[] = {
2776 [QED_HW_ERR_FAN_FAIL] = "Fan Failure",
2777 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure",
2778 [QED_HW_ERR_HW_ATTN] = "HW Attention",
2779 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure",
2780 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure",
2781 [QED_HW_ERR_FW_ASSERT] = "FW Assertion",
2782 [QED_HW_ERR_LAST] = "Unknown",
2785 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
2786 enum qed_hw_err_type err_type)
2788 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2789 void *cookie = p_hwfn->cdev->ops_cookie;
2790 const char *err_str;
2792 if (err_type > QED_HW_ERR_LAST)
2793 err_type = QED_HW_ERR_LAST;
2794 err_str = qed_hw_err_type_descr[err_type];
2796 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
2798 /* Call the HW error handler of the protocol driver.
2799 * If it is not available - perform a minimal handling of preventing
2800 * HW attentions from being reasserted.
2802 if (ops && ops->schedule_hw_err_handler)
2803 ops->schedule_hw_err_handler(cookie, err_type);
2805 qed_int_attn_clr_enable(p_hwfn->cdev, true);
2808 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2811 return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2814 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2816 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2817 struct qed_ptt *ptt;
2820 ptt = qed_ptt_acquire(hwfn);
2824 status = qed_mcp_set_led(hwfn, ptt, mode);
2826 qed_ptt_release(hwfn, ptt);
2831 int qed_recovery_process(struct qed_dev *cdev)
2833 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2834 struct qed_ptt *p_ptt;
2837 p_ptt = qed_ptt_acquire(p_hwfn);
2841 rc = qed_start_recovery_process(p_hwfn, p_ptt);
2843 qed_ptt_release(p_hwfn, p_ptt);
2848 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2850 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2851 struct qed_ptt *ptt;
2857 ptt = qed_ptt_acquire(hwfn);
2861 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2862 : QED_OV_WOL_DISABLED);
2865 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2868 qed_ptt_release(hwfn, ptt);
2872 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2874 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2875 struct qed_ptt *ptt;
2881 ptt = qed_ptt_acquire(hwfn);
2885 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2886 QED_OV_DRIVER_STATE_ACTIVE :
2887 QED_OV_DRIVER_STATE_DISABLED);
2889 qed_ptt_release(hwfn, ptt);
2894 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2896 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2897 struct qed_ptt *ptt;
2903 ptt = qed_ptt_acquire(hwfn);
2907 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2911 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2914 qed_ptt_release(hwfn, ptt);
2918 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2920 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2921 struct qed_ptt *ptt;
2927 ptt = qed_ptt_acquire(hwfn);
2931 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2935 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2938 qed_ptt_release(hwfn, ptt);
2942 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2943 u8 dev_addr, u32 offset, u32 len)
2945 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2946 struct qed_ptt *ptt;
2952 ptt = qed_ptt_acquire(hwfn);
2956 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2959 qed_ptt_release(hwfn, ptt);
2964 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2966 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2967 struct qed_ptt *ptt;
2973 ptt = qed_ptt_acquire(hwfn);
2977 rc = qed_dbg_grc_config(hwfn, cfg_id, val);
2979 qed_ptt_release(hwfn, ptt);
2984 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2986 return QED_AFFIN_HWFN_IDX(cdev);
2989 static struct qed_selftest_ops qed_selftest_ops_pass = {
2990 .selftest_memory = &qed_selftest_memory,
2991 .selftest_interrupt = &qed_selftest_interrupt,
2992 .selftest_register = &qed_selftest_register,
2993 .selftest_clock = &qed_selftest_clock,
2994 .selftest_nvram = &qed_selftest_nvram,
2997 const struct qed_common_ops qed_common_ops_pass = {
2998 .selftest = &qed_selftest_ops_pass,
2999 .probe = &qed_probe,
3000 .remove = &qed_remove,
3001 .set_power_state = &qed_set_power_state,
3002 .set_name = &qed_set_name,
3003 .update_pf_params = &qed_update_pf_params,
3004 .slowpath_start = &qed_slowpath_start,
3005 .slowpath_stop = &qed_slowpath_stop,
3006 .set_fp_int = &qed_set_int_fp,
3007 .get_fp_int = &qed_get_int_fp,
3008 .sb_init = &qed_sb_init,
3009 .sb_release = &qed_sb_release,
3010 .simd_handler_config = &qed_simd_handler_config,
3011 .simd_handler_clean = &qed_simd_handler_clean,
3012 .dbg_grc = &qed_dbg_grc,
3013 .dbg_grc_size = &qed_dbg_grc_size,
3014 .can_link_change = &qed_can_link_change,
3015 .set_link = &qed_set_link,
3016 .get_link = &qed_get_current_link,
3017 .drain = &qed_drain,
3018 .update_msglvl = &qed_init_dp,
3019 .devlink_register = qed_devlink_register,
3020 .devlink_unregister = qed_devlink_unregister,
3021 .report_fatal_error = qed_report_fatal_error,
3022 .dbg_all_data = &qed_dbg_all_data,
3023 .dbg_all_data_size = &qed_dbg_all_data_size,
3024 .chain_alloc = &qed_chain_alloc,
3025 .chain_free = &qed_chain_free,
3026 .nvm_flash = &qed_nvm_flash,
3027 .nvm_get_image = &qed_nvm_get_image,
3028 .set_coalesce = &qed_set_coalesce,
3029 .set_led = &qed_set_led,
3030 .recovery_process = &qed_recovery_process,
3031 .recovery_prolog = &qed_recovery_prolog,
3032 .attn_clr_enable = &qed_int_attn_clr_enable,
3033 .update_drv_state = &qed_update_drv_state,
3034 .update_mac = &qed_update_mac,
3035 .update_mtu = &qed_update_mtu,
3036 .update_wol = &qed_update_wol,
3037 .db_recovery_add = &qed_db_recovery_add,
3038 .db_recovery_del = &qed_db_recovery_del,
3039 .read_module_eeprom = &qed_read_module_eeprom,
3040 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
3041 .read_nvm_cfg = &qed_nvm_flash_cfg_read,
3042 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
3043 .set_grc_config = &qed_set_grc_config,
3046 void qed_get_protocol_stats(struct qed_dev *cdev,
3047 enum qed_mcp_protocol_type type,
3048 union qed_mcp_protocol_stats *stats)
3050 struct qed_eth_stats eth_stats;
3052 memset(stats, 0, sizeof(*stats));
3055 case QED_MCP_LAN_STATS:
3056 qed_get_vport_stats(cdev, ð_stats);
3057 stats->lan_stats.ucast_rx_pkts =
3058 eth_stats.common.rx_ucast_pkts;
3059 stats->lan_stats.ucast_tx_pkts =
3060 eth_stats.common.tx_ucast_pkts;
3061 stats->lan_stats.fcs_err = -1;
3063 case QED_MCP_FCOE_STATS:
3064 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
3066 case QED_MCP_ISCSI_STATS:
3067 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
3070 DP_VERBOSE(cdev, QED_MSG_SP,
3071 "Invalid protocol type = %d\n", type);
3076 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
3078 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
3079 "Scheduling slowpath task [Flag: %d]\n",
3080 QED_SLOWPATH_MFW_TLV_REQ);
3081 smp_mb__before_atomic();
3082 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
3083 smp_mb__after_atomic();
3084 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
3090 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
3092 struct qed_common_cb_ops *op = cdev->protocol_ops.common;
3093 struct qed_eth_stats_common *p_common;
3094 struct qed_generic_tlvs gen_tlvs;
3095 struct qed_eth_stats stats;
3098 memset(&gen_tlvs, 0, sizeof(gen_tlvs));
3099 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
3101 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
3102 tlv->flags.ipv4_csum_offload = true;
3103 if (gen_tlvs.feat_flags & QED_TLV_LSO)
3104 tlv->flags.lso_supported = true;
3105 tlv->flags.b_set = true;
3107 for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
3108 if (is_valid_ether_addr(gen_tlvs.mac[i])) {
3109 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
3110 tlv->mac_set[i] = true;
3114 qed_get_vport_stats(cdev, &stats);
3115 p_common = &stats.common;
3116 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
3117 p_common->rx_bcast_pkts;
3118 tlv->rx_frames_set = true;
3119 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
3120 p_common->rx_bcast_bytes;
3121 tlv->rx_bytes_set = true;
3122 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
3123 p_common->tx_bcast_pkts;
3124 tlv->tx_frames_set = true;
3125 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
3126 p_common->tx_bcast_bytes;
3127 tlv->rx_bytes_set = true;
3130 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
3131 union qed_mfw_tlv_data *tlv_buf)
3133 struct qed_dev *cdev = hwfn->cdev;
3134 struct qed_common_cb_ops *ops;
3136 ops = cdev->protocol_ops.common;
3137 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
3138 DP_NOTICE(hwfn, "Can't collect TLV management info\n");
3143 case QED_MFW_TLV_GENERIC:
3144 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
3146 case QED_MFW_TLV_ETH:
3147 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
3149 case QED_MFW_TLV_FCOE:
3150 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
3152 case QED_MFW_TLV_ISCSI:
3153 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);