1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/stddef.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/delay.h>
12 #include <asm/byteorder.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/string.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/workqueue.h>
18 #include <linux/ethtool.h>
19 #include <linux/etherdevice.h>
20 #include <linux/vmalloc.h>
21 #include <linux/crash_dump.h>
22 #include <linux/crc32.h>
23 #include <linux/qed/qed_if.h>
24 #include <linux/qed/qed_ll2_if.h>
25 #include <net/devlink.h>
26 #include <linux/aer.h>
27 #include <linux/phylink.h>
30 #include "qed_sriov.h"
32 #include "qed_dev_api.h"
35 #include "qed_iscsi.h"
38 #include "qed_reg_addr.h"
40 #include "qed_selftest.h"
41 #include "qed_debug.h"
43 #define QED_ROCE_QPS (8192)
44 #define QED_ROCE_DPIS (8)
45 #define QED_RDMA_SRQS QED_ROCE_QPS
46 #define QED_NVM_CFG_GET_FLAGS 0xA
47 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A
48 #define QED_NVM_CFG_MAX_ATTRS 50
50 static char version[] =
51 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
53 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
54 MODULE_LICENSE("GPL");
55 MODULE_VERSION(DRV_MODULE_VERSION);
57 #define FW_FILE_VERSION \
58 __stringify(FW_MAJOR_VERSION) "." \
59 __stringify(FW_MINOR_VERSION) "." \
60 __stringify(FW_REVISION_VERSION) "." \
61 __stringify(FW_ENGINEERING_VERSION)
63 #define QED_FW_FILE_NAME \
64 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
66 MODULE_FIRMWARE(QED_FW_FILE_NAME);
68 static int __init qed_init(void)
70 pr_info("%s", version);
75 static void __exit qed_cleanup(void)
77 pr_notice("qed_cleanup called\n");
80 module_init(qed_init);
81 module_exit(qed_cleanup);
83 /* Check if the DMA controller on the machine can properly handle the DMA
84 * addressing required by the device.
86 static int qed_set_coherency_mask(struct qed_dev *cdev)
88 struct device *dev = &cdev->pdev->dev;
90 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
91 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
93 "Can't request 64-bit consistent allocations\n");
96 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
97 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
104 static void qed_free_pci(struct qed_dev *cdev)
106 struct pci_dev *pdev = cdev->pdev;
108 pci_disable_pcie_error_reporting(pdev);
110 if (cdev->doorbells && cdev->db_size)
111 iounmap(cdev->doorbells);
113 iounmap(cdev->regview);
114 if (atomic_read(&pdev->enable_cnt) == 1)
115 pci_release_regions(pdev);
117 pci_disable_device(pdev);
120 #define PCI_REVISION_ID_ERROR_VAL 0xff
122 /* Performs PCI initializations as well as initializing PCI-related parameters
123 * in the device structrue. Returns 0 in case of success.
125 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
132 rc = pci_enable_device(pdev);
134 DP_NOTICE(cdev, "Cannot enable PCI device\n");
138 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
139 DP_NOTICE(cdev, "No memory region found in bar #0\n");
144 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
145 DP_NOTICE(cdev, "No memory region found in bar #2\n");
150 if (atomic_read(&pdev->enable_cnt) == 1) {
151 rc = pci_request_regions(pdev, "qed");
154 "Failed to request PCI memory resources\n");
157 pci_set_master(pdev);
158 pci_save_state(pdev);
161 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
162 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
164 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
169 if (!pci_is_pcie(pdev)) {
170 DP_NOTICE(cdev, "The bus is not PCI Express\n");
175 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
176 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
177 DP_NOTICE(cdev, "Cannot find power management capability\n");
179 rc = qed_set_coherency_mask(cdev);
183 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
184 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
185 cdev->pci_params.irq = pdev->irq;
187 cdev->regview = pci_ioremap_bar(pdev, 0);
188 if (!cdev->regview) {
189 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
194 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
195 cdev->db_size = pci_resource_len(cdev->pdev, 2);
196 if (!cdev->db_size) {
198 DP_NOTICE(cdev, "No Doorbell bar available\n");
205 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
207 if (!cdev->doorbells) {
208 DP_NOTICE(cdev, "Cannot map doorbell space\n");
212 /* AER (Advanced Error reporting) configuration */
213 rc = pci_enable_pcie_error_reporting(pdev);
215 DP_VERBOSE(cdev, NETIF_MSG_DRV,
216 "Failed to configure PCIe AER [%d]\n", rc);
221 pci_release_regions(pdev);
223 pci_disable_device(pdev);
228 int qed_fill_dev_info(struct qed_dev *cdev,
229 struct qed_dev_info *dev_info)
231 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
232 struct qed_hw_info *hw_info = &p_hwfn->hw_info;
233 struct qed_tunnel_info *tun = &cdev->tunnel;
236 memset(dev_info, 0, sizeof(struct qed_dev_info));
238 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
239 tun->vxlan.b_mode_enabled)
240 dev_info->vxlan_enable = true;
242 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
243 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
244 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
245 dev_info->gre_enable = true;
247 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
248 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
249 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
250 dev_info->geneve_enable = true;
252 dev_info->num_hwfns = cdev->num_hwfns;
253 dev_info->pci_mem_start = cdev->pci_params.mem_start;
254 dev_info->pci_mem_end = cdev->pci_params.mem_end;
255 dev_info->pci_irq = cdev->pci_params.irq;
256 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
257 dev_info->dev_type = cdev->type;
258 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
261 dev_info->fw_major = FW_MAJOR_VERSION;
262 dev_info->fw_minor = FW_MINOR_VERSION;
263 dev_info->fw_rev = FW_REVISION_VERSION;
264 dev_info->fw_eng = FW_ENGINEERING_VERSION;
265 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
267 dev_info->tx_switching = true;
269 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
270 dev_info->wol_support = true;
272 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
274 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
276 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
277 &dev_info->fw_minor, &dev_info->fw_rev,
282 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
284 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
285 &dev_info->mfw_rev, NULL);
287 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
288 &dev_info->mbi_version);
290 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
291 &dev_info->flash_size);
293 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
296 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
297 &dev_info->mfw_rev, NULL);
300 dev_info->mtu = hw_info->mtu;
305 static void qed_free_cdev(struct qed_dev *cdev)
310 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
312 struct qed_dev *cdev;
314 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
318 qed_init_struct(cdev);
323 /* Sets the requested power state */
324 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
329 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
334 struct qed_dev *cdev;
337 enum qed_devlink_param_id {
338 QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
339 QED_DEVLINK_PARAM_ID_IWARP_CMT,
342 static int qed_dl_param_get(struct devlink *dl, u32 id,
343 struct devlink_param_gset_ctx *ctx)
345 struct qed_devlink *qed_dl;
346 struct qed_dev *cdev;
348 qed_dl = devlink_priv(dl);
350 ctx->val.vbool = cdev->iwarp_cmt;
355 static int qed_dl_param_set(struct devlink *dl, u32 id,
356 struct devlink_param_gset_ctx *ctx)
358 struct qed_devlink *qed_dl;
359 struct qed_dev *cdev;
361 qed_dl = devlink_priv(dl);
363 cdev->iwarp_cmt = ctx->val.vbool;
368 static const struct devlink_param qed_devlink_params[] = {
369 DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
370 "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
371 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
372 qed_dl_param_get, qed_dl_param_set, NULL),
375 static const struct devlink_ops qed_dl_ops;
377 static int qed_devlink_register(struct qed_dev *cdev)
379 union devlink_param_value value;
380 struct qed_devlink *qed_dl;
384 dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
388 qed_dl = devlink_priv(dl);
393 rc = devlink_register(dl, &cdev->pdev->dev);
397 rc = devlink_params_register(dl, qed_devlink_params,
398 ARRAY_SIZE(qed_devlink_params));
403 devlink_param_driverinit_value_set(dl,
404 QED_DEVLINK_PARAM_ID_IWARP_CMT,
407 devlink_params_publish(dl);
408 cdev->iwarp_cmt = false;
413 devlink_unregister(dl);
422 static void qed_devlink_unregister(struct qed_dev *cdev)
427 devlink_params_unregister(cdev->dl, qed_devlink_params,
428 ARRAY_SIZE(qed_devlink_params));
430 devlink_unregister(cdev->dl);
431 devlink_free(cdev->dl);
435 static struct qed_dev *qed_probe(struct pci_dev *pdev,
436 struct qed_probe_params *params)
438 struct qed_dev *cdev;
441 cdev = qed_alloc_cdev(pdev);
445 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
446 cdev->protocol = params->protocol;
449 cdev->b_is_vf = true;
451 qed_init_dp(cdev, params->dp_module, params->dp_level);
453 cdev->recov_in_prog = params->recov_in_prog;
455 rc = qed_init_pci(cdev, pdev);
457 DP_ERR(cdev, "init pci failed\n");
460 DP_INFO(cdev, "PCI init completed successfully\n");
462 rc = qed_devlink_register(cdev);
464 DP_INFO(cdev, "Failed to register devlink.\n");
468 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
470 DP_ERR(cdev, "hw prepare failed\n");
474 DP_INFO(cdev, "qed_probe completed successfully\n");
486 static void qed_remove(struct qed_dev *cdev)
495 qed_set_power_state(cdev, PCI_D3hot);
497 qed_devlink_unregister(cdev);
502 static void qed_disable_msix(struct qed_dev *cdev)
504 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
505 pci_disable_msix(cdev->pdev);
506 kfree(cdev->int_params.msix_table);
507 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
508 pci_disable_msi(cdev->pdev);
511 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
514 static int qed_enable_msix(struct qed_dev *cdev,
515 struct qed_int_params *int_params)
519 cnt = int_params->in.num_vectors;
521 for (i = 0; i < cnt; i++)
522 int_params->msix_table[i].entry = i;
524 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
525 int_params->in.min_msix_cnt, cnt);
526 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
527 (rc % cdev->num_hwfns)) {
528 pci_disable_msix(cdev->pdev);
530 /* If fastpath is initialized, we need at least one interrupt
531 * per hwfn [and the slow path interrupts]. New requested number
532 * should be a multiple of the number of hwfns.
534 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
536 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
537 cnt, int_params->in.num_vectors);
538 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
545 /* MSI-x configuration was achieved */
546 int_params->out.int_mode = QED_INT_MODE_MSIX;
547 int_params->out.num_vectors = rc;
551 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
558 /* This function outputs the int mode and the number of enabled msix vector */
559 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
561 struct qed_int_params *int_params = &cdev->int_params;
562 struct msix_entry *tbl;
565 switch (int_params->in.int_mode) {
566 case QED_INT_MODE_MSIX:
567 /* Allocate MSIX table */
568 cnt = int_params->in.num_vectors;
569 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
570 if (!int_params->msix_table) {
576 rc = qed_enable_msix(cdev, int_params);
580 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
581 kfree(int_params->msix_table);
586 case QED_INT_MODE_MSI:
587 if (cdev->num_hwfns == 1) {
588 rc = pci_enable_msi(cdev->pdev);
590 int_params->out.int_mode = QED_INT_MODE_MSI;
594 DP_NOTICE(cdev, "Failed to enable MSI\n");
600 case QED_INT_MODE_INTA:
601 int_params->out.int_mode = QED_INT_MODE_INTA;
605 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
606 int_params->in.int_mode);
612 DP_INFO(cdev, "Using %s interrupts\n",
613 int_params->out.int_mode == QED_INT_MODE_INTA ?
614 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
616 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
621 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
622 int index, void(*handler)(void *))
624 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
625 int relative_idx = index / cdev->num_hwfns;
627 hwfn->simd_proto_handler[relative_idx].func = handler;
628 hwfn->simd_proto_handler[relative_idx].token = token;
631 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
633 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
634 int relative_idx = index / cdev->num_hwfns;
636 memset(&hwfn->simd_proto_handler[relative_idx], 0,
637 sizeof(struct qed_simd_fp_handler));
640 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
642 tasklet_schedule((struct tasklet_struct *)tasklet);
646 static irqreturn_t qed_single_int(int irq, void *dev_instance)
648 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
649 struct qed_hwfn *hwfn;
650 irqreturn_t rc = IRQ_NONE;
654 for (i = 0; i < cdev->num_hwfns; i++) {
655 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
660 hwfn = &cdev->hwfns[i];
662 /* Slowpath interrupt */
663 if (unlikely(status & 0x1)) {
664 tasklet_schedule(hwfn->sp_dpc);
669 /* Fastpath interrupts */
670 for (j = 0; j < 64; j++) {
671 if ((0x2ULL << j) & status) {
672 struct qed_simd_fp_handler *p_handler =
673 &hwfn->simd_proto_handler[j];
676 p_handler->func(p_handler->token);
679 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
682 status &= ~(0x2ULL << j);
687 if (unlikely(status))
688 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
689 "got an unknown interrupt status 0x%llx\n",
696 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
698 struct qed_dev *cdev = hwfn->cdev;
703 int_mode = cdev->int_params.out.int_mode;
704 if (int_mode == QED_INT_MODE_MSIX) {
706 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
707 id, cdev->pdev->bus->number,
708 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
709 rc = request_irq(cdev->int_params.msix_table[id].vector,
710 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
712 unsigned long flags = 0;
714 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
715 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
716 PCI_FUNC(cdev->pdev->devfn));
718 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
719 flags |= IRQF_SHARED;
721 rc = request_irq(cdev->pdev->irq, qed_single_int,
722 flags, cdev->name, cdev);
726 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
728 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
729 "Requested slowpath %s\n",
730 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
735 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
737 /* Calling the disable function will make sure that any
738 * currently-running function is completed. The following call to the
739 * enable function makes this sequence a flush-like operation.
741 if (p_hwfn->b_sp_dpc_enabled) {
742 tasklet_disable(p_hwfn->sp_dpc);
743 tasklet_enable(p_hwfn->sp_dpc);
747 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
749 struct qed_dev *cdev = p_hwfn->cdev;
750 u8 id = p_hwfn->my_id;
753 int_mode = cdev->int_params.out.int_mode;
754 if (int_mode == QED_INT_MODE_MSIX)
755 synchronize_irq(cdev->int_params.msix_table[id].vector);
757 synchronize_irq(cdev->pdev->irq);
759 qed_slowpath_tasklet_flush(p_hwfn);
762 static void qed_slowpath_irq_free(struct qed_dev *cdev)
766 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
767 for_each_hwfn(cdev, i) {
768 if (!cdev->hwfns[i].b_int_requested)
770 synchronize_irq(cdev->int_params.msix_table[i].vector);
771 free_irq(cdev->int_params.msix_table[i].vector,
772 cdev->hwfns[i].sp_dpc);
775 if (QED_LEADING_HWFN(cdev)->b_int_requested)
776 free_irq(cdev->pdev->irq, cdev);
778 qed_int_disable_post_isr_release(cdev);
781 static int qed_nic_stop(struct qed_dev *cdev)
785 rc = qed_hw_stop(cdev);
787 for (i = 0; i < cdev->num_hwfns; i++) {
788 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
790 if (p_hwfn->b_sp_dpc_enabled) {
791 tasklet_disable(p_hwfn->sp_dpc);
792 p_hwfn->b_sp_dpc_enabled = false;
793 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
794 "Disabled sp tasklet [hwfn %d] at %p\n",
799 qed_dbg_pf_exit(cdev);
804 static int qed_nic_setup(struct qed_dev *cdev)
808 /* Determine if interface is going to require LL2 */
809 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
810 for (i = 0; i < cdev->num_hwfns; i++) {
811 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
813 p_hwfn->using_ll2 = true;
817 rc = qed_resc_alloc(cdev);
821 DP_INFO(cdev, "Allocated qed resources\n");
823 qed_resc_setup(cdev);
828 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
832 /* Mark the fastpath as free/used */
833 cdev->int_params.fp_initialized = cnt ? true : false;
835 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
836 limit = cdev->num_hwfns * 63;
837 else if (cdev->int_params.fp_msix_cnt)
838 limit = cdev->int_params.fp_msix_cnt;
843 return min_t(int, cnt, limit);
846 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
848 memset(info, 0, sizeof(struct qed_int_info));
850 if (!cdev->int_params.fp_initialized) {
852 "Protocol driver requested interrupt information, but its support is not yet configured\n");
856 /* Need to expose only MSI-X information; Single IRQ is handled solely
859 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
860 int msix_base = cdev->int_params.fp_msix_base;
862 info->msix_cnt = cdev->int_params.fp_msix_cnt;
863 info->msix = &cdev->int_params.msix_table[msix_base];
869 static int qed_slowpath_setup_int(struct qed_dev *cdev,
870 enum qed_int_mode int_mode)
872 struct qed_sb_cnt_info sb_cnt_info;
873 int num_l2_queues = 0;
877 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
878 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
882 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
883 cdev->int_params.in.int_mode = int_mode;
884 for_each_hwfn(cdev, i) {
885 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
886 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
887 cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
888 cdev->int_params.in.num_vectors++; /* slowpath */
891 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
892 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
894 if (is_kdump_kernel()) {
896 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
897 cdev->int_params.in.min_msix_cnt);
898 cdev->int_params.in.num_vectors =
899 cdev->int_params.in.min_msix_cnt;
902 rc = qed_set_int_mode(cdev, false);
904 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
908 cdev->int_params.fp_msix_base = cdev->num_hwfns;
909 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
912 if (!IS_ENABLED(CONFIG_QED_RDMA) ||
913 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
916 for_each_hwfn(cdev, i)
917 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
919 DP_VERBOSE(cdev, QED_MSG_RDMA,
920 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
921 cdev->int_params.fp_msix_cnt, num_l2_queues);
923 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
924 cdev->int_params.rdma_msix_cnt =
925 (cdev->int_params.fp_msix_cnt - num_l2_queues)
927 cdev->int_params.rdma_msix_base =
928 cdev->int_params.fp_msix_base + num_l2_queues;
929 cdev->int_params.fp_msix_cnt = num_l2_queues;
931 cdev->int_params.rdma_msix_cnt = 0;
934 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
935 cdev->int_params.rdma_msix_cnt,
936 cdev->int_params.rdma_msix_base);
941 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
945 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
946 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
948 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
949 &cdev->int_params.in.num_vectors);
950 if (cdev->num_hwfns > 1) {
953 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
954 cdev->int_params.in.num_vectors += vectors;
957 /* We want a minimum of one fastpath vector per vf hwfn */
958 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
960 rc = qed_set_int_mode(cdev, true);
964 cdev->int_params.fp_msix_base = 0;
965 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
970 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
971 u8 *input_buf, u32 max_size, u8 *unzip_buf)
975 p_hwfn->stream->next_in = input_buf;
976 p_hwfn->stream->avail_in = input_len;
977 p_hwfn->stream->next_out = unzip_buf;
978 p_hwfn->stream->avail_out = max_size;
980 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
983 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
988 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
989 zlib_inflateEnd(p_hwfn->stream);
991 if (rc != Z_OK && rc != Z_STREAM_END) {
992 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
993 p_hwfn->stream->msg, rc);
997 return p_hwfn->stream->total_out / 4;
1000 static int qed_alloc_stream_mem(struct qed_dev *cdev)
1005 for_each_hwfn(cdev, i) {
1006 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1008 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1009 if (!p_hwfn->stream)
1012 workspace = vzalloc(zlib_inflate_workspacesize());
1015 p_hwfn->stream->workspace = workspace;
1021 static void qed_free_stream_mem(struct qed_dev *cdev)
1025 for_each_hwfn(cdev, i) {
1026 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1028 if (!p_hwfn->stream)
1031 vfree(p_hwfn->stream->workspace);
1032 kfree(p_hwfn->stream);
1036 static void qed_update_pf_params(struct qed_dev *cdev,
1037 struct qed_pf_params *params)
1041 if (IS_ENABLED(CONFIG_QED_RDMA)) {
1042 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1043 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1044 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1045 /* divide by 3 the MRs to avoid MF ILT overflow */
1046 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1049 if (cdev->num_hwfns > 1 || IS_VF(cdev))
1050 params->eth_pf_params.num_arfs_filters = 0;
1052 /* In case we might support RDMA, don't allow qede to be greedy
1053 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1056 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1059 num_cons = ¶ms->eth_pf_params.num_cons;
1060 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1063 for (i = 0; i < cdev->num_hwfns; i++) {
1064 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1066 p_hwfn->pf_params = *params;
1070 #define QED_PERIODIC_DB_REC_COUNT 10
1071 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100
1072 #define QED_PERIODIC_DB_REC_INTERVAL \
1073 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1075 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1076 enum qed_slowpath_wq_flag wq_flag,
1077 unsigned long delay)
1079 if (!hwfn->slowpath_wq_active)
1082 /* Memory barrier for setting atomic bit */
1083 smp_mb__before_atomic();
1084 set_bit(wq_flag, &hwfn->slowpath_task_flags);
1085 smp_mb__after_atomic();
1086 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1091 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1093 /* Reset periodic Doorbell Recovery counter */
1094 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1096 /* Don't schedule periodic Doorbell Recovery if already scheduled */
1097 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1098 &p_hwfn->slowpath_task_flags))
1101 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1102 QED_PERIODIC_DB_REC_INTERVAL);
1105 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1112 for_each_hwfn(cdev, i) {
1113 if (!cdev->hwfns[i].slowpath_wq)
1116 /* Stop queuing new delayed works */
1117 cdev->hwfns[i].slowpath_wq_active = false;
1119 cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
1120 destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1124 static void qed_slowpath_task(struct work_struct *work)
1126 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1127 slowpath_task.work);
1128 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1131 if (hwfn->slowpath_wq_active)
1132 queue_delayed_work(hwfn->slowpath_wq,
1133 &hwfn->slowpath_task, 0);
1138 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1139 &hwfn->slowpath_task_flags))
1140 qed_mfw_process_tlv_req(hwfn, ptt);
1142 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1143 &hwfn->slowpath_task_flags)) {
1144 qed_db_rec_handler(hwfn, ptt);
1145 if (hwfn->periodic_db_rec_count--)
1146 qed_slowpath_delayed_work(hwfn,
1147 QED_SLOWPATH_PERIODIC_DB_REC,
1148 QED_PERIODIC_DB_REC_INTERVAL);
1151 qed_ptt_release(hwfn, ptt);
1154 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1156 struct qed_hwfn *hwfn;
1157 char name[NAME_SIZE];
1163 for_each_hwfn(cdev, i) {
1164 hwfn = &cdev->hwfns[i];
1166 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1167 cdev->pdev->bus->number,
1168 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1170 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1171 if (!hwfn->slowpath_wq) {
1172 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1176 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1177 hwfn->slowpath_wq_active = true;
1183 static int qed_slowpath_start(struct qed_dev *cdev,
1184 struct qed_slowpath_params *params)
1186 struct qed_drv_load_params drv_load_params;
1187 struct qed_hw_init_params hw_init_params;
1188 struct qed_mcp_drv_version drv_version;
1189 struct qed_tunnel_info tunn_info;
1190 const u8 *data = NULL;
1191 struct qed_hwfn *hwfn;
1192 struct qed_ptt *p_ptt;
1195 if (qed_iov_wq_start(cdev))
1198 if (qed_slowpath_wq_start(cdev))
1202 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1206 "Failed to find fw file - /lib/firmware/%s\n",
1211 if (cdev->num_hwfns == 1) {
1212 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1214 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1217 "Failed to acquire PTT for aRFS\n");
1223 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1224 rc = qed_nic_setup(cdev);
1229 rc = qed_slowpath_setup_int(cdev, params->int_mode);
1231 rc = qed_slowpath_vf_setup_int(cdev);
1236 /* Allocate stream for unzipping */
1237 rc = qed_alloc_stream_mem(cdev);
1241 /* First Dword used to differentiate between various sources */
1242 data = cdev->firmware->data + sizeof(u32);
1244 qed_dbg_pf_init(cdev);
1247 /* Start the slowpath */
1248 memset(&hw_init_params, 0, sizeof(hw_init_params));
1249 memset(&tunn_info, 0, sizeof(tunn_info));
1250 tunn_info.vxlan.b_mode_enabled = true;
1251 tunn_info.l2_gre.b_mode_enabled = true;
1252 tunn_info.ip_gre.b_mode_enabled = true;
1253 tunn_info.l2_geneve.b_mode_enabled = true;
1254 tunn_info.ip_geneve.b_mode_enabled = true;
1255 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1256 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1257 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1258 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1259 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1260 hw_init_params.p_tunn = &tunn_info;
1261 hw_init_params.b_hw_start = true;
1262 hw_init_params.int_mode = cdev->int_params.out.int_mode;
1263 hw_init_params.allow_npar_tx_switch = true;
1264 hw_init_params.bin_fw_data = data;
1266 memset(&drv_load_params, 0, sizeof(drv_load_params));
1267 drv_load_params.is_crash_kernel = is_kdump_kernel();
1268 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1269 drv_load_params.avoid_eng_reset = false;
1270 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1271 hw_init_params.p_drv_load_params = &drv_load_params;
1273 rc = qed_hw_init(cdev, &hw_init_params);
1278 "HW initialization and function start completed successfully\n");
1281 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1282 BIT(QED_MODE_L2GENEVE_TUNN) |
1283 BIT(QED_MODE_IPGENEVE_TUNN) |
1284 BIT(QED_MODE_L2GRE_TUNN) |
1285 BIT(QED_MODE_IPGRE_TUNN));
1288 /* Allocate LL2 interface if needed */
1289 if (QED_LEADING_HWFN(cdev)->using_ll2) {
1290 rc = qed_ll2_alloc_if(cdev);
1295 hwfn = QED_LEADING_HWFN(cdev);
1296 drv_version.version = (params->drv_major << 24) |
1297 (params->drv_minor << 16) |
1298 (params->drv_rev << 8) |
1300 strlcpy(drv_version.name, params->name,
1301 MCP_DRV_VER_STR_SIZE - 4);
1302 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1305 DP_NOTICE(cdev, "Failed sending drv version command\n");
1310 qed_reset_vport_stats(cdev);
1315 qed_ll2_dealloc_if(cdev);
1319 qed_hw_timers_stop_all(cdev);
1321 qed_slowpath_irq_free(cdev);
1322 qed_free_stream_mem(cdev);
1323 qed_disable_msix(cdev);
1325 qed_resc_free(cdev);
1328 release_firmware(cdev->firmware);
1330 if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1331 QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1332 qed_ptt_release(QED_LEADING_HWFN(cdev),
1333 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1335 qed_iov_wq_stop(cdev, false);
1337 qed_slowpath_wq_stop(cdev);
1342 static int qed_slowpath_stop(struct qed_dev *cdev)
1347 qed_slowpath_wq_stop(cdev);
1349 qed_ll2_dealloc_if(cdev);
1352 if (cdev->num_hwfns == 1)
1353 qed_ptt_release(QED_LEADING_HWFN(cdev),
1354 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1355 qed_free_stream_mem(cdev);
1356 if (IS_QED_ETH_IF(cdev))
1357 qed_sriov_disable(cdev, true);
1363 qed_slowpath_irq_free(cdev);
1365 qed_disable_msix(cdev);
1367 qed_resc_free(cdev);
1369 qed_iov_wq_stop(cdev, true);
1372 release_firmware(cdev->firmware);
1377 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1381 memcpy(cdev->name, name, NAME_SIZE);
1382 for_each_hwfn(cdev, i)
1383 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1386 static u32 qed_sb_init(struct qed_dev *cdev,
1387 struct qed_sb_info *sb_info,
1389 dma_addr_t sb_phy_addr, u16 sb_id,
1390 enum qed_sb_type type)
1392 struct qed_hwfn *p_hwfn;
1393 struct qed_ptt *p_ptt;
1397 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1398 if (type == QED_SB_TYPE_L2_QUEUE) {
1399 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1400 rel_sb_id = sb_id / cdev->num_hwfns;
1402 p_hwfn = QED_AFFIN_HWFN(cdev);
1406 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1407 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1408 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1410 if (IS_PF(p_hwfn->cdev)) {
1411 p_ptt = qed_ptt_acquire(p_hwfn);
1415 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1416 sb_phy_addr, rel_sb_id);
1417 qed_ptt_release(p_hwfn, p_ptt);
1419 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1420 sb_phy_addr, rel_sb_id);
1426 static u32 qed_sb_release(struct qed_dev *cdev,
1427 struct qed_sb_info *sb_info,
1429 enum qed_sb_type type)
1431 struct qed_hwfn *p_hwfn;
1435 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1436 if (type == QED_SB_TYPE_L2_QUEUE) {
1437 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1438 rel_sb_id = sb_id / cdev->num_hwfns;
1440 p_hwfn = QED_AFFIN_HWFN(cdev);
1444 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1445 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1446 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1448 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1453 static bool qed_can_link_change(struct qed_dev *cdev)
1458 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1460 __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
1461 struct qed_mcp_link_params *link_params;
1462 struct qed_hwfn *hwfn;
1463 struct qed_ptt *ptt;
1470 /* The link should be set only once per PF */
1471 hwfn = &cdev->hwfns[0];
1473 /* When VF wants to set link, force it to read the bulletin instead.
1474 * This mimics the PF behavior, where a noitification [both immediate
1475 * and possible later] would be generated when changing properties.
1478 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1482 ptt = qed_ptt_acquire(hwfn);
1486 link_params = qed_mcp_get_link_params(hwfn);
1490 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1491 link_params->speed.autoneg = params->autoneg;
1493 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1496 phylink_zero(sup_caps);
1497 phylink_set(sup_caps, 1000baseT_Full);
1498 phylink_set(sup_caps, 1000baseKX_Full);
1499 phylink_set(sup_caps, 1000baseX_Full);
1501 if (linkmode_intersects(params->adv_speeds, sup_caps))
1502 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1504 phylink_zero(sup_caps);
1505 phylink_set(sup_caps, 10000baseT_Full);
1506 phylink_set(sup_caps, 10000baseKR_Full);
1507 phylink_set(sup_caps, 10000baseKX4_Full);
1508 phylink_set(sup_caps, 10000baseR_FEC);
1509 phylink_set(sup_caps, 10000baseCR_Full);
1510 phylink_set(sup_caps, 10000baseSR_Full);
1511 phylink_set(sup_caps, 10000baseLR_Full);
1512 phylink_set(sup_caps, 10000baseLRM_Full);
1514 if (linkmode_intersects(params->adv_speeds, sup_caps))
1515 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1517 phylink_zero(sup_caps);
1518 phylink_set(sup_caps, 20000baseKR2_Full);
1520 if (linkmode_intersects(params->adv_speeds, sup_caps))
1521 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
1523 phylink_zero(sup_caps);
1524 phylink_set(sup_caps, 25000baseKR_Full);
1525 phylink_set(sup_caps, 25000baseCR_Full);
1526 phylink_set(sup_caps, 25000baseSR_Full);
1528 if (linkmode_intersects(params->adv_speeds, sup_caps))
1529 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1531 phylink_zero(sup_caps);
1532 phylink_set(sup_caps, 40000baseLR4_Full);
1533 phylink_set(sup_caps, 40000baseKR4_Full);
1534 phylink_set(sup_caps, 40000baseCR4_Full);
1535 phylink_set(sup_caps, 40000baseSR4_Full);
1537 if (linkmode_intersects(params->adv_speeds, sup_caps))
1538 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1540 phylink_zero(sup_caps);
1541 phylink_set(sup_caps, 50000baseKR2_Full);
1542 phylink_set(sup_caps, 50000baseCR2_Full);
1543 phylink_set(sup_caps, 50000baseSR2_Full);
1545 if (linkmode_intersects(params->adv_speeds, sup_caps))
1546 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1548 phylink_zero(sup_caps);
1549 phylink_set(sup_caps, 100000baseKR4_Full);
1550 phylink_set(sup_caps, 100000baseSR4_Full);
1551 phylink_set(sup_caps, 100000baseCR4_Full);
1552 phylink_set(sup_caps, 100000baseLR4_ER4_Full);
1554 if (linkmode_intersects(params->adv_speeds, sup_caps))
1555 as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1557 link_params->speed.advertised_speeds = as;
1560 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1561 link_params->speed.forced_speed = params->forced_speed;
1562 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1563 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1564 link_params->pause.autoneg = true;
1566 link_params->pause.autoneg = false;
1567 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1568 link_params->pause.forced_rx = true;
1570 link_params->pause.forced_rx = false;
1571 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1572 link_params->pause.forced_tx = true;
1574 link_params->pause.forced_tx = false;
1576 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1577 switch (params->loopback_mode) {
1578 case QED_LINK_LOOPBACK_INT_PHY:
1579 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1581 case QED_LINK_LOOPBACK_EXT_PHY:
1582 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1584 case QED_LINK_LOOPBACK_EXT:
1585 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1587 case QED_LINK_LOOPBACK_MAC:
1588 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1591 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1596 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1597 memcpy(&link_params->eee, ¶ms->eee,
1598 sizeof(link_params->eee));
1600 if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
1601 link_params->fec = params->fec;
1603 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1605 qed_ptt_release(hwfn, ptt);
1610 static int qed_get_port_type(u32 media_type)
1614 switch (media_type) {
1615 case MEDIA_SFPP_10G_FIBER:
1616 case MEDIA_SFP_1G_FIBER:
1617 case MEDIA_XFP_FIBER:
1618 case MEDIA_MODULE_FIBER:
1620 port_type = PORT_FIBRE;
1622 case MEDIA_DA_TWINAX:
1623 port_type = PORT_DA;
1626 port_type = PORT_TP;
1628 case MEDIA_NOT_PRESENT:
1629 port_type = PORT_NONE;
1631 case MEDIA_UNSPECIFIED:
1633 port_type = PORT_OTHER;
1639 static int qed_get_link_data(struct qed_hwfn *hwfn,
1640 struct qed_mcp_link_params *params,
1641 struct qed_mcp_link_state *link,
1642 struct qed_mcp_link_capabilities *link_caps)
1646 if (!IS_PF(hwfn->cdev)) {
1647 qed_vf_get_link_params(hwfn, params);
1648 qed_vf_get_link_state(hwfn, link);
1649 qed_vf_get_link_caps(hwfn, link_caps);
1654 p = qed_mcp_get_link_params(hwfn);
1657 memcpy(params, p, sizeof(*params));
1659 p = qed_mcp_get_link_state(hwfn);
1662 memcpy(link, p, sizeof(*link));
1664 p = qed_mcp_get_link_capabilities(hwfn);
1667 memcpy(link_caps, p, sizeof(*link_caps));
1672 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1673 struct qed_ptt *ptt, u32 capability,
1674 unsigned long *if_caps)
1676 u32 media_type, tcvr_state, tcvr_type;
1677 u32 speed_mask, board_cfg;
1679 if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1680 media_type = MEDIA_UNSPECIFIED;
1682 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1683 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1685 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1686 speed_mask = 0xFFFFFFFF;
1688 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1689 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1691 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1692 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1693 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1695 switch (media_type) {
1696 case MEDIA_DA_TWINAX:
1697 phylink_set(if_caps, FIBRE);
1699 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1700 phylink_set(if_caps, 20000baseKR2_Full);
1702 /* For DAC media multiple speed capabilities are supported */
1703 capability |= speed_mask;
1705 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1706 phylink_set(if_caps, 1000baseKX_Full);
1707 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1708 phylink_set(if_caps, 10000baseCR_Full);
1710 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1711 switch (tcvr_type) {
1712 case ETH_TRANSCEIVER_TYPE_40G_CR4:
1713 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
1714 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1715 phylink_set(if_caps, 40000baseCR4_Full);
1721 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1722 phylink_set(if_caps, 25000baseCR_Full);
1723 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1724 phylink_set(if_caps, 50000baseCR2_Full);
1727 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1728 switch (tcvr_type) {
1729 case ETH_TRANSCEIVER_TYPE_100G_CR4:
1730 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
1731 phylink_set(if_caps, 100000baseCR4_Full);
1739 phylink_set(if_caps, TP);
1741 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1743 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1744 phylink_set(if_caps, 1000baseT_Full);
1746 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1747 phylink_set(if_caps, 10000baseT_Full);
1750 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1751 phylink_set(if_caps, FIBRE);
1753 switch (tcvr_type) {
1754 case ETH_TRANSCEIVER_TYPE_1000BASET:
1755 phylink_set(if_caps, 1000baseT_Full);
1757 case ETH_TRANSCEIVER_TYPE_10G_BASET:
1758 phylink_set(if_caps, 10000baseT_Full);
1766 case MEDIA_SFP_1G_FIBER:
1767 case MEDIA_SFPP_10G_FIBER:
1768 case MEDIA_XFP_FIBER:
1769 case MEDIA_MODULE_FIBER:
1770 phylink_set(if_caps, FIBRE);
1771 capability |= speed_mask;
1773 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1774 switch (tcvr_type) {
1775 case ETH_TRANSCEIVER_TYPE_1G_LX:
1776 case ETH_TRANSCEIVER_TYPE_1G_SX:
1777 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1778 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1779 phylink_set(if_caps, 1000baseKX_Full);
1785 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1786 switch (tcvr_type) {
1787 case ETH_TRANSCEIVER_TYPE_10G_SR:
1788 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
1789 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1790 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
1791 phylink_set(if_caps, 10000baseSR_Full);
1793 case ETH_TRANSCEIVER_TYPE_10G_LR:
1794 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
1795 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
1796 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
1797 phylink_set(if_caps, 10000baseLR_Full);
1799 case ETH_TRANSCEIVER_TYPE_10G_LRM:
1800 phylink_set(if_caps, 10000baseLRM_Full);
1802 case ETH_TRANSCEIVER_TYPE_10G_ER:
1803 phylink_set(if_caps, 10000baseR_FEC);
1809 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1810 phylink_set(if_caps, 20000baseKR2_Full);
1812 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1813 switch (tcvr_type) {
1814 case ETH_TRANSCEIVER_TYPE_25G_SR:
1815 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
1816 phylink_set(if_caps, 25000baseSR_Full);
1822 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1823 switch (tcvr_type) {
1824 case ETH_TRANSCEIVER_TYPE_40G_LR4:
1825 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
1826 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
1827 phylink_set(if_caps, 40000baseLR4_Full);
1829 case ETH_TRANSCEIVER_TYPE_40G_SR4:
1830 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
1831 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
1832 phylink_set(if_caps, 40000baseSR4_Full);
1838 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1839 phylink_set(if_caps, 50000baseKR2_Full);
1842 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1843 switch (tcvr_type) {
1844 case ETH_TRANSCEIVER_TYPE_100G_SR4:
1845 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
1846 phylink_set(if_caps, 100000baseSR4_Full);
1848 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
1849 phylink_set(if_caps, 100000baseLR4_ER4_Full);
1857 phylink_set(if_caps, Backplane);
1859 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1860 phylink_set(if_caps, 20000baseKR2_Full);
1861 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1862 phylink_set(if_caps, 1000baseKX_Full);
1863 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1864 phylink_set(if_caps, 10000baseKR_Full);
1865 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1866 phylink_set(if_caps, 25000baseKR_Full);
1867 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1868 phylink_set(if_caps, 40000baseKR4_Full);
1869 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1870 phylink_set(if_caps, 50000baseKR2_Full);
1872 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1873 phylink_set(if_caps, 100000baseKR4_Full);
1876 case MEDIA_UNSPECIFIED:
1877 case MEDIA_NOT_PRESENT:
1879 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
1880 "Unknown media and transceiver type;\n");
1885 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask)
1890 (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD))
1891 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1892 if (caps & QED_LINK_PARTNER_SPEED_10G)
1893 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1894 if (caps & QED_LINK_PARTNER_SPEED_20G)
1895 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
1896 if (caps & QED_LINK_PARTNER_SPEED_25G)
1897 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1898 if (caps & QED_LINK_PARTNER_SPEED_40G)
1899 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1900 if (caps & QED_LINK_PARTNER_SPEED_50G)
1901 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1902 if (caps & QED_LINK_PARTNER_SPEED_100G)
1903 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1906 static void qed_fill_link(struct qed_hwfn *hwfn,
1907 struct qed_ptt *ptt,
1908 struct qed_link_output *if_link)
1910 struct qed_mcp_link_capabilities link_caps;
1911 struct qed_mcp_link_params params;
1912 struct qed_mcp_link_state link;
1913 u32 media_type, speed_mask;
1915 memset(if_link, 0, sizeof(*if_link));
1917 /* Prepare source inputs */
1918 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1919 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1923 /* Set the link parameters to pass to protocol driver */
1925 if_link->link_up = true;
1927 /* TODO - at the moment assume supported and advertised speed equal */
1928 if (link_caps.default_speed_autoneg)
1929 phylink_set(if_link->supported_caps, Autoneg);
1930 if (params.pause.autoneg ||
1931 (params.pause.forced_rx && params.pause.forced_tx))
1932 phylink_set(if_link->supported_caps, Asym_Pause);
1933 if (params.pause.autoneg || params.pause.forced_rx ||
1934 params.pause.forced_tx)
1935 phylink_set(if_link->supported_caps, Pause);
1937 linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
1939 if (params.speed.autoneg)
1940 phylink_set(if_link->advertised_caps, Autoneg);
1942 phylink_clear(if_link->advertised_caps, Autoneg);
1944 if_link->sup_fec = link_caps.fec_default;
1945 if_link->active_fec = params.fec;
1947 /* Fill link advertised capability */
1948 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
1949 if_link->advertised_caps);
1951 /* Fill link supported capability */
1952 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
1953 if_link->supported_caps);
1955 /* Fill partner advertised capability */
1956 qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask);
1957 qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps);
1960 if_link->speed = link.speed;
1962 /* TODO - fill duplex properly */
1963 if_link->duplex = DUPLEX_FULL;
1964 qed_mcp_get_media_type(hwfn, ptt, &media_type);
1965 if_link->port = qed_get_port_type(media_type);
1967 if_link->autoneg = params.speed.autoneg;
1969 if (params.pause.autoneg)
1970 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1971 if (params.pause.forced_rx)
1972 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1973 if (params.pause.forced_tx)
1974 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1976 if (link.an_complete)
1977 phylink_set(if_link->lp_caps, Autoneg);
1978 if (link.partner_adv_pause)
1979 phylink_set(if_link->lp_caps, Pause);
1980 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1981 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1982 phylink_set(if_link->lp_caps, Asym_Pause);
1984 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1985 if_link->eee_supported = false;
1987 if_link->eee_supported = true;
1988 if_link->eee_active = link.eee_active;
1989 if_link->sup_caps = link_caps.eee_speed_caps;
1990 /* MFW clears adv_caps on eee disable; use configured value */
1991 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1992 params.eee.adv_caps;
1993 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1994 if_link->eee.enable = params.eee.enable;
1995 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1996 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
2000 static void qed_get_current_link(struct qed_dev *cdev,
2001 struct qed_link_output *if_link)
2003 struct qed_hwfn *hwfn;
2004 struct qed_ptt *ptt;
2007 hwfn = &cdev->hwfns[0];
2009 ptt = qed_ptt_acquire(hwfn);
2011 qed_fill_link(hwfn, ptt, if_link);
2012 qed_ptt_release(hwfn, ptt);
2014 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
2017 qed_fill_link(hwfn, NULL, if_link);
2020 for_each_hwfn(cdev, i)
2021 qed_inform_vf_link_state(&cdev->hwfns[i]);
2024 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2026 void *cookie = hwfn->cdev->ops_cookie;
2027 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2028 struct qed_link_output if_link;
2030 qed_fill_link(hwfn, ptt, &if_link);
2031 qed_inform_vf_link_state(hwfn);
2033 if (IS_LEAD_HWFN(hwfn) && cookie)
2034 op->link_update(cookie, &if_link);
2037 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
2039 void *cookie = hwfn->cdev->ops_cookie;
2040 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
2042 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
2043 op->bw_update(cookie);
2046 static int qed_drain(struct qed_dev *cdev)
2048 struct qed_hwfn *hwfn;
2049 struct qed_ptt *ptt;
2055 for_each_hwfn(cdev, i) {
2056 hwfn = &cdev->hwfns[i];
2057 ptt = qed_ptt_acquire(hwfn);
2059 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
2062 rc = qed_mcp_drain(hwfn, ptt);
2063 qed_ptt_release(hwfn, ptt);
2071 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
2072 struct qed_nvm_image_att *nvm_image,
2078 /* Allocate a buffer for holding the nvram image */
2079 buf = kzalloc(nvm_image->length, GFP_KERNEL);
2083 /* Read image into buffer */
2084 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
2085 buf, nvm_image->length);
2087 DP_ERR(cdev, "Failed reading image from nvm\n");
2091 /* Convert the buffer into big-endian format (excluding the
2092 * closing 4 bytes of CRC).
2094 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
2095 DIV_ROUND_UP(nvm_image->length - 4, 4));
2097 /* Calc CRC for the "actual" image buffer, i.e. not including
2098 * the last 4 CRC bytes.
2100 *crc = ~crc32(~0U, buf, nvm_image->length - 4);
2101 *crc = (__force u32)cpu_to_be32p(crc);
2109 /* Binary file format -
2110 * /----------------------------------------------------------------------\
2111 * 0B | 0x4 [command index] |
2112 * 4B | image_type | Options | Number of register settings |
2116 * \----------------------------------------------------------------------/
2117 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2118 * Options - 0'b - Calculate & Update CRC for image
2120 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2123 struct qed_nvm_image_att nvm_image;
2124 struct qed_hwfn *p_hwfn;
2125 bool is_crc = false;
2131 image_type = **data;
2132 p_hwfn = QED_LEADING_HWFN(cdev);
2133 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2134 if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2136 if (i == p_hwfn->nvm_info.num_images) {
2137 DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2142 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2143 nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2145 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2146 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2147 **data, image_type, nvm_image.start_addr,
2148 nvm_image.start_addr + nvm_image.length - 1);
2150 is_crc = !!(**data & BIT(0));
2152 len = *((u16 *)*data);
2157 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2159 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2163 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2164 (nvm_image.start_addr +
2165 nvm_image.length - 4), (u8 *)&crc, 4);
2167 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2168 nvm_image.start_addr + nvm_image.length - 4, rc);
2172 /* Iterate over the values for setting */
2174 u32 offset, mask, value, cur_value;
2177 value = *((u32 *)*data);
2179 mask = *((u32 *)*data);
2181 offset = *((u32 *)*data);
2184 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2187 DP_ERR(cdev, "Failed reading from %08x\n",
2188 nvm_image.start_addr + offset);
2192 cur_value = le32_to_cpu(*((__le32 *)buf));
2193 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2194 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2195 nvm_image.start_addr + offset, cur_value,
2196 (cur_value & ~mask) | (value & mask), value, mask);
2197 value = (value & mask) | (cur_value & ~mask);
2198 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2199 nvm_image.start_addr + offset,
2202 DP_ERR(cdev, "Failed writing to %08x\n",
2203 nvm_image.start_addr + offset);
2213 /* Binary file format -
2214 * /----------------------------------------------------------------------\
2215 * 0B | 0x3 [command index] |
2216 * 4B | b'0: check_response? | b'1-31 reserved |
2217 * 8B | File-type | reserved |
2218 * 12B | Image length in bytes |
2219 * \----------------------------------------------------------------------/
2220 * Start a new file of the provided type
2222 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2223 const u8 **data, bool *check_resp)
2225 u32 file_type, file_size = 0;
2229 *check_resp = !!(**data & BIT(0));
2233 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2234 "About to start a new file of type %02x\n", file_type);
2235 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2237 file_size = *((u32 *)(*data));
2240 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2241 (u8 *)(&file_size), 4);
2247 /* Binary file format -
2248 * /----------------------------------------------------------------------\
2249 * 0B | 0x2 [command index] |
2250 * 4B | Length in bytes |
2251 * 8B | b'0: check_response? | b'1-31 reserved |
2252 * 12B | Offset in bytes |
2254 * \----------------------------------------------------------------------/
2255 * Write data as part of a file that was previously started. Data should be
2256 * of length equal to that provided in the message
2258 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2259 const u8 **data, bool *check_resp)
2265 len = *((u32 *)(*data));
2267 *check_resp = !!(**data & BIT(0));
2269 offset = *((u32 *)(*data));
2272 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2273 "About to write File-data: %08x bytes to offset %08x\n",
2276 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2277 (char *)(*data), len);
2283 /* Binary file format [General header] -
2284 * /----------------------------------------------------------------------\
2285 * 0B | QED_NVM_SIGNATURE |
2286 * 4B | Length in bytes |
2287 * 8B | Highest command in this batchfile | Reserved |
2288 * \----------------------------------------------------------------------/
2290 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2291 const struct firmware *image,
2296 /* Check minimum size */
2297 if (image->size < 12) {
2298 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2302 /* Check signature */
2303 signature = *((u32 *)(*data));
2304 if (signature != QED_NVM_SIGNATURE) {
2305 DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2310 /* Validate internal size equals the image-size */
2311 len = *((u32 *)(*data));
2312 if (len != image->size) {
2313 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2314 len, (u32)image->size);
2319 /* Make sure driver familiar with all commands necessary for this */
2320 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2321 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2331 /* Binary file format -
2332 * /----------------------------------------------------------------------\
2333 * 0B | 0x5 [command index] |
2334 * 4B | Number of config attributes | Reserved |
2335 * 4B | Config ID | Entity ID | Length |
2338 * \----------------------------------------------------------------------/
2339 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2340 * 'Number of config attributes'.
2342 * The API parses config attributes from the user provided buffer and flashes
2343 * them to the respective NVM path using Management FW inerface.
2345 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2347 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2348 u8 entity_id, len, buf[32];
2349 bool need_nvm_init = true;
2350 struct qed_ptt *ptt;
2355 ptt = qed_ptt_acquire(hwfn);
2359 /* NVM CFG ID attribute header */
2361 count = *((u16 *)*data);
2364 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2365 "Read config ids: num_attrs = %0d\n", count);
2366 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2367 * arithmetic operations in the implementation.
2369 for (i = 1; i <= count; i++) {
2370 cfg_id = *((u16 *)*data);
2376 memcpy(buf, *data, len);
2380 if (need_nvm_init) {
2381 flags |= QED_NVM_CFG_OPTION_INIT;
2382 need_nvm_init = false;
2385 /* Commit to flash and free the resources */
2386 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
2387 flags |= QED_NVM_CFG_OPTION_COMMIT |
2388 QED_NVM_CFG_OPTION_FREE;
2389 need_nvm_init = true;
2393 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
2395 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2396 "cfg_id = %d entity = %d len = %d\n", cfg_id,
2398 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2401 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2406 qed_ptt_release(hwfn, ptt);
2411 #define QED_MAX_NVM_BUF_LEN 32
2412 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2414 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2415 u8 buf[QED_MAX_NVM_BUF_LEN];
2416 struct qed_ptt *ptt;
2420 ptt = qed_ptt_acquire(hwfn);
2422 return QED_MAX_NVM_BUF_LEN;
2424 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2427 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2428 len = QED_MAX_NVM_BUF_LEN;
2431 qed_ptt_release(hwfn, ptt);
2436 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2437 u32 cmd, u32 entity_id)
2439 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2440 struct qed_ptt *ptt;
2444 ptt = qed_ptt_acquire(hwfn);
2448 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2449 "Read config cmd = %d entity id %d\n", cmd, entity_id);
2450 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2451 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2453 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2455 qed_ptt_release(hwfn, ptt);
2460 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2462 const struct firmware *image;
2463 const u8 *data, *data_end;
2467 rc = request_firmware(&image, name, &cdev->pdev->dev);
2469 DP_ERR(cdev, "Failed to find '%s'\n", name);
2473 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2474 "Flashing '%s' - firmware's data at %p, size is %08x\n",
2475 name, image->data, (u32)image->size);
2477 data_end = data + image->size;
2479 rc = qed_nvm_flash_image_validate(cdev, image, &data);
2483 while (data < data_end) {
2484 bool check_resp = false;
2486 /* Parse the actual command */
2487 cmd_type = *((u32 *)data);
2489 case QED_NVM_FLASH_CMD_FILE_DATA:
2490 rc = qed_nvm_flash_image_file_data(cdev, &data,
2493 case QED_NVM_FLASH_CMD_FILE_START:
2494 rc = qed_nvm_flash_image_file_start(cdev, &data,
2497 case QED_NVM_FLASH_CMD_NVM_CHANGE:
2498 rc = qed_nvm_flash_image_access(cdev, &data,
2501 case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2502 rc = qed_nvm_flash_cfg_write(cdev, &data);
2505 DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2511 DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2515 /* Check response if needed */
2517 u32 mcp_response = 0;
2519 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2520 DP_ERR(cdev, "Failed getting MCP response\n");
2525 switch (mcp_response & FW_MSG_CODE_MASK) {
2526 case FW_MSG_CODE_OK:
2527 case FW_MSG_CODE_NVM_OK:
2528 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2529 case FW_MSG_CODE_PHY_OK:
2532 DP_ERR(cdev, "MFW returns error: %08x\n",
2541 release_firmware(image);
2546 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2549 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2551 return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2554 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2556 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2557 void *cookie = p_hwfn->cdev->ops_cookie;
2559 if (ops && ops->schedule_recovery_handler)
2560 ops->schedule_recovery_handler(cookie);
2563 static const char * const qed_hw_err_type_descr[] = {
2564 [QED_HW_ERR_FAN_FAIL] = "Fan Failure",
2565 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure",
2566 [QED_HW_ERR_HW_ATTN] = "HW Attention",
2567 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure",
2568 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure",
2569 [QED_HW_ERR_FW_ASSERT] = "FW Assertion",
2570 [QED_HW_ERR_LAST] = "Unknown",
2573 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
2574 enum qed_hw_err_type err_type)
2576 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2577 void *cookie = p_hwfn->cdev->ops_cookie;
2578 const char *err_str;
2580 if (err_type > QED_HW_ERR_LAST)
2581 err_type = QED_HW_ERR_LAST;
2582 err_str = qed_hw_err_type_descr[err_type];
2584 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
2586 /* Call the HW error handler of the protocol driver.
2587 * If it is not available - perform a minimal handling of preventing
2588 * HW attentions from being reasserted.
2590 if (ops && ops->schedule_hw_err_handler)
2591 ops->schedule_hw_err_handler(cookie, err_type);
2593 qed_int_attn_clr_enable(p_hwfn->cdev, true);
2596 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2599 return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2602 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2604 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2605 struct qed_ptt *ptt;
2608 ptt = qed_ptt_acquire(hwfn);
2612 status = qed_mcp_set_led(hwfn, ptt, mode);
2614 qed_ptt_release(hwfn, ptt);
2619 static int qed_recovery_process(struct qed_dev *cdev)
2621 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2622 struct qed_ptt *p_ptt;
2625 p_ptt = qed_ptt_acquire(p_hwfn);
2629 rc = qed_start_recovery_process(p_hwfn, p_ptt);
2631 qed_ptt_release(p_hwfn, p_ptt);
2636 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2638 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2639 struct qed_ptt *ptt;
2645 ptt = qed_ptt_acquire(hwfn);
2649 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2650 : QED_OV_WOL_DISABLED);
2653 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2656 qed_ptt_release(hwfn, ptt);
2660 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2662 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2663 struct qed_ptt *ptt;
2669 ptt = qed_ptt_acquire(hwfn);
2673 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2674 QED_OV_DRIVER_STATE_ACTIVE :
2675 QED_OV_DRIVER_STATE_DISABLED);
2677 qed_ptt_release(hwfn, ptt);
2682 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2684 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2685 struct qed_ptt *ptt;
2691 ptt = qed_ptt_acquire(hwfn);
2695 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2699 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2702 qed_ptt_release(hwfn, ptt);
2706 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2708 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2709 struct qed_ptt *ptt;
2715 ptt = qed_ptt_acquire(hwfn);
2719 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2723 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2726 qed_ptt_release(hwfn, ptt);
2730 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2731 u8 dev_addr, u32 offset, u32 len)
2733 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2734 struct qed_ptt *ptt;
2740 ptt = qed_ptt_acquire(hwfn);
2744 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2747 qed_ptt_release(hwfn, ptt);
2752 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2754 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2755 struct qed_ptt *ptt;
2761 ptt = qed_ptt_acquire(hwfn);
2765 rc = qed_dbg_grc_config(hwfn, cfg_id, val);
2767 qed_ptt_release(hwfn, ptt);
2772 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2774 return QED_AFFIN_HWFN_IDX(cdev);
2777 static struct qed_selftest_ops qed_selftest_ops_pass = {
2778 .selftest_memory = &qed_selftest_memory,
2779 .selftest_interrupt = &qed_selftest_interrupt,
2780 .selftest_register = &qed_selftest_register,
2781 .selftest_clock = &qed_selftest_clock,
2782 .selftest_nvram = &qed_selftest_nvram,
2785 const struct qed_common_ops qed_common_ops_pass = {
2786 .selftest = &qed_selftest_ops_pass,
2787 .probe = &qed_probe,
2788 .remove = &qed_remove,
2789 .set_power_state = &qed_set_power_state,
2790 .set_name = &qed_set_name,
2791 .update_pf_params = &qed_update_pf_params,
2792 .slowpath_start = &qed_slowpath_start,
2793 .slowpath_stop = &qed_slowpath_stop,
2794 .set_fp_int = &qed_set_int_fp,
2795 .get_fp_int = &qed_get_int_fp,
2796 .sb_init = &qed_sb_init,
2797 .sb_release = &qed_sb_release,
2798 .simd_handler_config = &qed_simd_handler_config,
2799 .simd_handler_clean = &qed_simd_handler_clean,
2800 .dbg_grc = &qed_dbg_grc,
2801 .dbg_grc_size = &qed_dbg_grc_size,
2802 .can_link_change = &qed_can_link_change,
2803 .set_link = &qed_set_link,
2804 .get_link = &qed_get_current_link,
2805 .drain = &qed_drain,
2806 .update_msglvl = &qed_init_dp,
2807 .dbg_all_data = &qed_dbg_all_data,
2808 .dbg_all_data_size = &qed_dbg_all_data_size,
2809 .chain_alloc = &qed_chain_alloc,
2810 .chain_free = &qed_chain_free,
2811 .nvm_flash = &qed_nvm_flash,
2812 .nvm_get_image = &qed_nvm_get_image,
2813 .set_coalesce = &qed_set_coalesce,
2814 .set_led = &qed_set_led,
2815 .recovery_process = &qed_recovery_process,
2816 .recovery_prolog = &qed_recovery_prolog,
2817 .attn_clr_enable = &qed_int_attn_clr_enable,
2818 .update_drv_state = &qed_update_drv_state,
2819 .update_mac = &qed_update_mac,
2820 .update_mtu = &qed_update_mtu,
2821 .update_wol = &qed_update_wol,
2822 .db_recovery_add = &qed_db_recovery_add,
2823 .db_recovery_del = &qed_db_recovery_del,
2824 .read_module_eeprom = &qed_read_module_eeprom,
2825 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
2826 .read_nvm_cfg = &qed_nvm_flash_cfg_read,
2827 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
2828 .set_grc_config = &qed_set_grc_config,
2831 void qed_get_protocol_stats(struct qed_dev *cdev,
2832 enum qed_mcp_protocol_type type,
2833 union qed_mcp_protocol_stats *stats)
2835 struct qed_eth_stats eth_stats;
2837 memset(stats, 0, sizeof(*stats));
2840 case QED_MCP_LAN_STATS:
2841 qed_get_vport_stats(cdev, ð_stats);
2842 stats->lan_stats.ucast_rx_pkts =
2843 eth_stats.common.rx_ucast_pkts;
2844 stats->lan_stats.ucast_tx_pkts =
2845 eth_stats.common.tx_ucast_pkts;
2846 stats->lan_stats.fcs_err = -1;
2848 case QED_MCP_FCOE_STATS:
2849 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2851 case QED_MCP_ISCSI_STATS:
2852 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2855 DP_VERBOSE(cdev, QED_MSG_SP,
2856 "Invalid protocol type = %d\n", type);
2861 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2863 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2864 "Scheduling slowpath task [Flag: %d]\n",
2865 QED_SLOWPATH_MFW_TLV_REQ);
2866 smp_mb__before_atomic();
2867 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
2868 smp_mb__after_atomic();
2869 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2875 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2877 struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2878 struct qed_eth_stats_common *p_common;
2879 struct qed_generic_tlvs gen_tlvs;
2880 struct qed_eth_stats stats;
2883 memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2884 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2886 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2887 tlv->flags.ipv4_csum_offload = true;
2888 if (gen_tlvs.feat_flags & QED_TLV_LSO)
2889 tlv->flags.lso_supported = true;
2890 tlv->flags.b_set = true;
2892 for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2893 if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2894 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2895 tlv->mac_set[i] = true;
2899 qed_get_vport_stats(cdev, &stats);
2900 p_common = &stats.common;
2901 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2902 p_common->rx_bcast_pkts;
2903 tlv->rx_frames_set = true;
2904 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2905 p_common->rx_bcast_bytes;
2906 tlv->rx_bytes_set = true;
2907 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2908 p_common->tx_bcast_pkts;
2909 tlv->tx_frames_set = true;
2910 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2911 p_common->tx_bcast_bytes;
2912 tlv->rx_bytes_set = true;
2915 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2916 union qed_mfw_tlv_data *tlv_buf)
2918 struct qed_dev *cdev = hwfn->cdev;
2919 struct qed_common_cb_ops *ops;
2921 ops = cdev->protocol_ops.common;
2922 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2923 DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2928 case QED_MFW_TLV_GENERIC:
2929 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2931 case QED_MFW_TLV_ETH:
2932 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2934 case QED_MFW_TLV_FCOE:
2935 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2937 case QED_MFW_TLV_ISCSI:
2938 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);