1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/crc32.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
51 #include <net/devlink.h>
54 #include "qed_sriov.h"
56 #include "qed_dev_api.h"
59 #include "qed_iscsi.h"
62 #include "qed_reg_addr.h"
64 #include "qed_selftest.h"
65 #include "qed_debug.h"
67 #define QED_ROCE_QPS (8192)
68 #define QED_ROCE_DPIS (8)
69 #define QED_RDMA_SRQS QED_ROCE_QPS
71 static char version[] =
72 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
74 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
75 MODULE_LICENSE("GPL");
76 MODULE_VERSION(DRV_MODULE_VERSION);
78 #define FW_FILE_VERSION \
79 __stringify(FW_MAJOR_VERSION) "." \
80 __stringify(FW_MINOR_VERSION) "." \
81 __stringify(FW_REVISION_VERSION) "." \
82 __stringify(FW_ENGINEERING_VERSION)
84 #define QED_FW_FILE_NAME \
85 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
87 MODULE_FIRMWARE(QED_FW_FILE_NAME);
89 static int __init qed_init(void)
91 pr_info("%s", version);
96 static void __exit qed_cleanup(void)
98 pr_notice("qed_cleanup called\n");
101 module_init(qed_init);
102 module_exit(qed_cleanup);
104 /* Check if the DMA controller on the machine can properly handle the DMA
105 * addressing required by the device.
107 static int qed_set_coherency_mask(struct qed_dev *cdev)
109 struct device *dev = &cdev->pdev->dev;
111 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
112 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
114 "Can't request 64-bit consistent allocations\n");
117 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
118 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
125 static void qed_free_pci(struct qed_dev *cdev)
127 struct pci_dev *pdev = cdev->pdev;
129 if (cdev->doorbells && cdev->db_size)
130 iounmap(cdev->doorbells);
132 iounmap(cdev->regview);
133 if (atomic_read(&pdev->enable_cnt) == 1)
134 pci_release_regions(pdev);
136 pci_disable_device(pdev);
139 #define PCI_REVISION_ID_ERROR_VAL 0xff
141 /* Performs PCI initializations as well as initializing PCI-related parameters
142 * in the device structrue. Returns 0 in case of success.
144 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
151 rc = pci_enable_device(pdev);
153 DP_NOTICE(cdev, "Cannot enable PCI device\n");
157 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
158 DP_NOTICE(cdev, "No memory region found in bar #0\n");
163 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
164 DP_NOTICE(cdev, "No memory region found in bar #2\n");
169 if (atomic_read(&pdev->enable_cnt) == 1) {
170 rc = pci_request_regions(pdev, "qed");
173 "Failed to request PCI memory resources\n");
176 pci_set_master(pdev);
177 pci_save_state(pdev);
180 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
181 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
183 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
188 if (!pci_is_pcie(pdev)) {
189 DP_NOTICE(cdev, "The bus is not PCI Express\n");
194 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
195 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
196 DP_NOTICE(cdev, "Cannot find power management capability\n");
198 rc = qed_set_coherency_mask(cdev);
202 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
203 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
204 cdev->pci_params.irq = pdev->irq;
206 cdev->regview = pci_ioremap_bar(pdev, 0);
207 if (!cdev->regview) {
208 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
213 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
214 cdev->db_size = pci_resource_len(cdev->pdev, 2);
215 if (!cdev->db_size) {
217 DP_NOTICE(cdev, "No Doorbell bar available\n");
224 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
226 if (!cdev->doorbells) {
227 DP_NOTICE(cdev, "Cannot map doorbell space\n");
234 pci_release_regions(pdev);
236 pci_disable_device(pdev);
241 int qed_fill_dev_info(struct qed_dev *cdev,
242 struct qed_dev_info *dev_info)
244 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
245 struct qed_hw_info *hw_info = &p_hwfn->hw_info;
246 struct qed_tunnel_info *tun = &cdev->tunnel;
249 memset(dev_info, 0, sizeof(struct qed_dev_info));
251 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
252 tun->vxlan.b_mode_enabled)
253 dev_info->vxlan_enable = true;
255 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
256 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
257 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
258 dev_info->gre_enable = true;
260 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
261 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
262 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
263 dev_info->geneve_enable = true;
265 dev_info->num_hwfns = cdev->num_hwfns;
266 dev_info->pci_mem_start = cdev->pci_params.mem_start;
267 dev_info->pci_mem_end = cdev->pci_params.mem_end;
268 dev_info->pci_irq = cdev->pci_params.irq;
269 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
270 dev_info->dev_type = cdev->type;
271 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
274 dev_info->fw_major = FW_MAJOR_VERSION;
275 dev_info->fw_minor = FW_MINOR_VERSION;
276 dev_info->fw_rev = FW_REVISION_VERSION;
277 dev_info->fw_eng = FW_ENGINEERING_VERSION;
278 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
280 dev_info->tx_switching = true;
282 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
283 dev_info->wol_support = true;
285 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
287 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
289 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
290 &dev_info->fw_minor, &dev_info->fw_rev,
295 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
297 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
298 &dev_info->mfw_rev, NULL);
300 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
301 &dev_info->mbi_version);
303 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
304 &dev_info->flash_size);
306 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
309 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
310 &dev_info->mfw_rev, NULL);
313 dev_info->mtu = hw_info->mtu;
318 static void qed_free_cdev(struct qed_dev *cdev)
323 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
325 struct qed_dev *cdev;
327 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
331 qed_init_struct(cdev);
336 /* Sets the requested power state */
337 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
342 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
347 struct qed_dev *cdev;
350 enum qed_devlink_param_id {
351 QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
352 QED_DEVLINK_PARAM_ID_IWARP_CMT,
355 static int qed_dl_param_get(struct devlink *dl, u32 id,
356 struct devlink_param_gset_ctx *ctx)
358 struct qed_devlink *qed_dl;
359 struct qed_dev *cdev;
361 qed_dl = devlink_priv(dl);
363 ctx->val.vbool = cdev->iwarp_cmt;
368 static int qed_dl_param_set(struct devlink *dl, u32 id,
369 struct devlink_param_gset_ctx *ctx)
371 struct qed_devlink *qed_dl;
372 struct qed_dev *cdev;
374 qed_dl = devlink_priv(dl);
376 cdev->iwarp_cmt = ctx->val.vbool;
381 static const struct devlink_param qed_devlink_params[] = {
382 DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
383 "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
384 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
385 qed_dl_param_get, qed_dl_param_set, NULL),
388 static const struct devlink_ops qed_dl_ops;
390 static int qed_devlink_register(struct qed_dev *cdev)
392 union devlink_param_value value;
393 struct qed_devlink *qed_dl;
397 dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
401 qed_dl = devlink_priv(dl);
406 rc = devlink_register(dl, &cdev->pdev->dev);
410 rc = devlink_params_register(dl, qed_devlink_params,
411 ARRAY_SIZE(qed_devlink_params));
416 devlink_param_driverinit_value_set(dl,
417 QED_DEVLINK_PARAM_ID_IWARP_CMT,
420 devlink_params_publish(dl);
421 cdev->iwarp_cmt = false;
426 devlink_unregister(dl);
435 static void qed_devlink_unregister(struct qed_dev *cdev)
440 devlink_params_unregister(cdev->dl, qed_devlink_params,
441 ARRAY_SIZE(qed_devlink_params));
443 devlink_unregister(cdev->dl);
444 devlink_free(cdev->dl);
448 static struct qed_dev *qed_probe(struct pci_dev *pdev,
449 struct qed_probe_params *params)
451 struct qed_dev *cdev;
454 cdev = qed_alloc_cdev(pdev);
458 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
459 cdev->protocol = params->protocol;
462 cdev->b_is_vf = true;
464 qed_init_dp(cdev, params->dp_module, params->dp_level);
466 cdev->recov_in_prog = params->recov_in_prog;
468 rc = qed_init_pci(cdev, pdev);
470 DP_ERR(cdev, "init pci failed\n");
473 DP_INFO(cdev, "PCI init completed successfully\n");
475 rc = qed_devlink_register(cdev);
477 DP_INFO(cdev, "Failed to register devlink.\n");
481 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
483 DP_ERR(cdev, "hw prepare failed\n");
487 DP_INFO(cdev, "qed_probe completed successfully\n");
499 static void qed_remove(struct qed_dev *cdev)
508 qed_set_power_state(cdev, PCI_D3hot);
510 qed_devlink_unregister(cdev);
515 static void qed_disable_msix(struct qed_dev *cdev)
517 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
518 pci_disable_msix(cdev->pdev);
519 kfree(cdev->int_params.msix_table);
520 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
521 pci_disable_msi(cdev->pdev);
524 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
527 static int qed_enable_msix(struct qed_dev *cdev,
528 struct qed_int_params *int_params)
532 cnt = int_params->in.num_vectors;
534 for (i = 0; i < cnt; i++)
535 int_params->msix_table[i].entry = i;
537 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
538 int_params->in.min_msix_cnt, cnt);
539 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
540 (rc % cdev->num_hwfns)) {
541 pci_disable_msix(cdev->pdev);
543 /* If fastpath is initialized, we need at least one interrupt
544 * per hwfn [and the slow path interrupts]. New requested number
545 * should be a multiple of the number of hwfns.
547 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
549 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
550 cnt, int_params->in.num_vectors);
551 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
558 /* MSI-x configuration was achieved */
559 int_params->out.int_mode = QED_INT_MODE_MSIX;
560 int_params->out.num_vectors = rc;
564 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
571 /* This function outputs the int mode and the number of enabled msix vector */
572 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
574 struct qed_int_params *int_params = &cdev->int_params;
575 struct msix_entry *tbl;
578 switch (int_params->in.int_mode) {
579 case QED_INT_MODE_MSIX:
580 /* Allocate MSIX table */
581 cnt = int_params->in.num_vectors;
582 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
583 if (!int_params->msix_table) {
589 rc = qed_enable_msix(cdev, int_params);
593 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
594 kfree(int_params->msix_table);
599 case QED_INT_MODE_MSI:
600 if (cdev->num_hwfns == 1) {
601 rc = pci_enable_msi(cdev->pdev);
603 int_params->out.int_mode = QED_INT_MODE_MSI;
607 DP_NOTICE(cdev, "Failed to enable MSI\n");
613 case QED_INT_MODE_INTA:
614 int_params->out.int_mode = QED_INT_MODE_INTA;
618 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
619 int_params->in.int_mode);
625 DP_INFO(cdev, "Using %s interrupts\n",
626 int_params->out.int_mode == QED_INT_MODE_INTA ?
627 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
629 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
634 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
635 int index, void(*handler)(void *))
637 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
638 int relative_idx = index / cdev->num_hwfns;
640 hwfn->simd_proto_handler[relative_idx].func = handler;
641 hwfn->simd_proto_handler[relative_idx].token = token;
644 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
646 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
647 int relative_idx = index / cdev->num_hwfns;
649 memset(&hwfn->simd_proto_handler[relative_idx], 0,
650 sizeof(struct qed_simd_fp_handler));
653 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
655 tasklet_schedule((struct tasklet_struct *)tasklet);
659 static irqreturn_t qed_single_int(int irq, void *dev_instance)
661 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
662 struct qed_hwfn *hwfn;
663 irqreturn_t rc = IRQ_NONE;
667 for (i = 0; i < cdev->num_hwfns; i++) {
668 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
673 hwfn = &cdev->hwfns[i];
675 /* Slowpath interrupt */
676 if (unlikely(status & 0x1)) {
677 tasklet_schedule(hwfn->sp_dpc);
682 /* Fastpath interrupts */
683 for (j = 0; j < 64; j++) {
684 if ((0x2ULL << j) & status) {
685 struct qed_simd_fp_handler *p_handler =
686 &hwfn->simd_proto_handler[j];
689 p_handler->func(p_handler->token);
692 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
695 status &= ~(0x2ULL << j);
700 if (unlikely(status))
701 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
702 "got an unknown interrupt status 0x%llx\n",
709 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
711 struct qed_dev *cdev = hwfn->cdev;
716 int_mode = cdev->int_params.out.int_mode;
717 if (int_mode == QED_INT_MODE_MSIX) {
719 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
720 id, cdev->pdev->bus->number,
721 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
722 rc = request_irq(cdev->int_params.msix_table[id].vector,
723 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
725 unsigned long flags = 0;
727 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
728 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
729 PCI_FUNC(cdev->pdev->devfn));
731 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
732 flags |= IRQF_SHARED;
734 rc = request_irq(cdev->pdev->irq, qed_single_int,
735 flags, cdev->name, cdev);
739 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
741 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
742 "Requested slowpath %s\n",
743 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
748 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
750 /* Calling the disable function will make sure that any
751 * currently-running function is completed. The following call to the
752 * enable function makes this sequence a flush-like operation.
754 if (p_hwfn->b_sp_dpc_enabled) {
755 tasklet_disable(p_hwfn->sp_dpc);
756 tasklet_enable(p_hwfn->sp_dpc);
760 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
762 struct qed_dev *cdev = p_hwfn->cdev;
763 u8 id = p_hwfn->my_id;
766 int_mode = cdev->int_params.out.int_mode;
767 if (int_mode == QED_INT_MODE_MSIX)
768 synchronize_irq(cdev->int_params.msix_table[id].vector);
770 synchronize_irq(cdev->pdev->irq);
772 qed_slowpath_tasklet_flush(p_hwfn);
775 static void qed_slowpath_irq_free(struct qed_dev *cdev)
779 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
780 for_each_hwfn(cdev, i) {
781 if (!cdev->hwfns[i].b_int_requested)
783 synchronize_irq(cdev->int_params.msix_table[i].vector);
784 free_irq(cdev->int_params.msix_table[i].vector,
785 cdev->hwfns[i].sp_dpc);
788 if (QED_LEADING_HWFN(cdev)->b_int_requested)
789 free_irq(cdev->pdev->irq, cdev);
791 qed_int_disable_post_isr_release(cdev);
794 static int qed_nic_stop(struct qed_dev *cdev)
798 rc = qed_hw_stop(cdev);
800 for (i = 0; i < cdev->num_hwfns; i++) {
801 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
803 if (p_hwfn->b_sp_dpc_enabled) {
804 tasklet_disable(p_hwfn->sp_dpc);
805 p_hwfn->b_sp_dpc_enabled = false;
806 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
807 "Disabled sp tasklet [hwfn %d] at %p\n",
812 qed_dbg_pf_exit(cdev);
817 static int qed_nic_setup(struct qed_dev *cdev)
821 /* Determine if interface is going to require LL2 */
822 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
823 for (i = 0; i < cdev->num_hwfns; i++) {
824 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
826 p_hwfn->using_ll2 = true;
830 rc = qed_resc_alloc(cdev);
834 DP_INFO(cdev, "Allocated qed resources\n");
836 qed_resc_setup(cdev);
841 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
845 /* Mark the fastpath as free/used */
846 cdev->int_params.fp_initialized = cnt ? true : false;
848 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
849 limit = cdev->num_hwfns * 63;
850 else if (cdev->int_params.fp_msix_cnt)
851 limit = cdev->int_params.fp_msix_cnt;
856 return min_t(int, cnt, limit);
859 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
861 memset(info, 0, sizeof(struct qed_int_info));
863 if (!cdev->int_params.fp_initialized) {
865 "Protocol driver requested interrupt information, but its support is not yet configured\n");
869 /* Need to expose only MSI-X information; Single IRQ is handled solely
872 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
873 int msix_base = cdev->int_params.fp_msix_base;
875 info->msix_cnt = cdev->int_params.fp_msix_cnt;
876 info->msix = &cdev->int_params.msix_table[msix_base];
882 static int qed_slowpath_setup_int(struct qed_dev *cdev,
883 enum qed_int_mode int_mode)
885 struct qed_sb_cnt_info sb_cnt_info;
886 int num_l2_queues = 0;
890 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
891 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
895 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
896 cdev->int_params.in.int_mode = int_mode;
897 for_each_hwfn(cdev, i) {
898 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
899 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
900 cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
901 cdev->int_params.in.num_vectors++; /* slowpath */
904 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
905 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
907 if (is_kdump_kernel()) {
909 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
910 cdev->int_params.in.min_msix_cnt);
911 cdev->int_params.in.num_vectors =
912 cdev->int_params.in.min_msix_cnt;
915 rc = qed_set_int_mode(cdev, false);
917 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
921 cdev->int_params.fp_msix_base = cdev->num_hwfns;
922 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
925 if (!IS_ENABLED(CONFIG_QED_RDMA) ||
926 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
929 for_each_hwfn(cdev, i)
930 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
932 DP_VERBOSE(cdev, QED_MSG_RDMA,
933 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
934 cdev->int_params.fp_msix_cnt, num_l2_queues);
936 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
937 cdev->int_params.rdma_msix_cnt =
938 (cdev->int_params.fp_msix_cnt - num_l2_queues)
940 cdev->int_params.rdma_msix_base =
941 cdev->int_params.fp_msix_base + num_l2_queues;
942 cdev->int_params.fp_msix_cnt = num_l2_queues;
944 cdev->int_params.rdma_msix_cnt = 0;
947 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
948 cdev->int_params.rdma_msix_cnt,
949 cdev->int_params.rdma_msix_base);
954 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
958 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
959 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
961 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
962 &cdev->int_params.in.num_vectors);
963 if (cdev->num_hwfns > 1) {
966 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
967 cdev->int_params.in.num_vectors += vectors;
970 /* We want a minimum of one fastpath vector per vf hwfn */
971 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
973 rc = qed_set_int_mode(cdev, true);
977 cdev->int_params.fp_msix_base = 0;
978 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
983 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
984 u8 *input_buf, u32 max_size, u8 *unzip_buf)
988 p_hwfn->stream->next_in = input_buf;
989 p_hwfn->stream->avail_in = input_len;
990 p_hwfn->stream->next_out = unzip_buf;
991 p_hwfn->stream->avail_out = max_size;
993 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
996 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1001 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1002 zlib_inflateEnd(p_hwfn->stream);
1004 if (rc != Z_OK && rc != Z_STREAM_END) {
1005 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1006 p_hwfn->stream->msg, rc);
1010 return p_hwfn->stream->total_out / 4;
1013 static int qed_alloc_stream_mem(struct qed_dev *cdev)
1018 for_each_hwfn(cdev, i) {
1019 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1021 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1022 if (!p_hwfn->stream)
1025 workspace = vzalloc(zlib_inflate_workspacesize());
1028 p_hwfn->stream->workspace = workspace;
1034 static void qed_free_stream_mem(struct qed_dev *cdev)
1038 for_each_hwfn(cdev, i) {
1039 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1041 if (!p_hwfn->stream)
1044 vfree(p_hwfn->stream->workspace);
1045 kfree(p_hwfn->stream);
1049 static void qed_update_pf_params(struct qed_dev *cdev,
1050 struct qed_pf_params *params)
1054 if (IS_ENABLED(CONFIG_QED_RDMA)) {
1055 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1056 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1057 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1058 /* divide by 3 the MRs to avoid MF ILT overflow */
1059 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1062 if (cdev->num_hwfns > 1 || IS_VF(cdev))
1063 params->eth_pf_params.num_arfs_filters = 0;
1065 /* In case we might support RDMA, don't allow qede to be greedy
1066 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1069 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1072 num_cons = ¶ms->eth_pf_params.num_cons;
1073 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1076 for (i = 0; i < cdev->num_hwfns; i++) {
1077 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1079 p_hwfn->pf_params = *params;
1083 #define QED_PERIODIC_DB_REC_COUNT 10
1084 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100
1085 #define QED_PERIODIC_DB_REC_INTERVAL \
1086 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1087 #define QED_PERIODIC_DB_REC_WAIT_COUNT 10
1088 #define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
1089 (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
1091 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1092 enum qed_slowpath_wq_flag wq_flag,
1093 unsigned long delay)
1095 if (!hwfn->slowpath_wq_active)
1098 /* Memory barrier for setting atomic bit */
1099 smp_mb__before_atomic();
1100 set_bit(wq_flag, &hwfn->slowpath_task_flags);
1101 smp_mb__after_atomic();
1102 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1107 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1109 /* Reset periodic Doorbell Recovery counter */
1110 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1112 /* Don't schedule periodic Doorbell Recovery if already scheduled */
1113 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1114 &p_hwfn->slowpath_task_flags))
1117 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1118 QED_PERIODIC_DB_REC_INTERVAL);
1121 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1123 int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
1128 for_each_hwfn(cdev, i) {
1129 if (!cdev->hwfns[i].slowpath_wq)
1132 /* Stop queuing new delayed works */
1133 cdev->hwfns[i].slowpath_wq_active = false;
1135 /* Wait until the last periodic doorbell recovery is executed */
1136 while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1137 &cdev->hwfns[i].slowpath_task_flags) &&
1139 msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
1141 flush_workqueue(cdev->hwfns[i].slowpath_wq);
1142 destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1146 static void qed_slowpath_task(struct work_struct *work)
1148 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1149 slowpath_task.work);
1150 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1153 if (hwfn->slowpath_wq_active)
1154 queue_delayed_work(hwfn->slowpath_wq,
1155 &hwfn->slowpath_task, 0);
1160 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1161 &hwfn->slowpath_task_flags))
1162 qed_mfw_process_tlv_req(hwfn, ptt);
1164 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1165 &hwfn->slowpath_task_flags)) {
1166 qed_db_rec_handler(hwfn, ptt);
1167 if (hwfn->periodic_db_rec_count--)
1168 qed_slowpath_delayed_work(hwfn,
1169 QED_SLOWPATH_PERIODIC_DB_REC,
1170 QED_PERIODIC_DB_REC_INTERVAL);
1173 qed_ptt_release(hwfn, ptt);
1176 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1178 struct qed_hwfn *hwfn;
1179 char name[NAME_SIZE];
1185 for_each_hwfn(cdev, i) {
1186 hwfn = &cdev->hwfns[i];
1188 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1189 cdev->pdev->bus->number,
1190 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1192 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1193 if (!hwfn->slowpath_wq) {
1194 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1198 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1199 hwfn->slowpath_wq_active = true;
1205 static int qed_slowpath_start(struct qed_dev *cdev,
1206 struct qed_slowpath_params *params)
1208 struct qed_drv_load_params drv_load_params;
1209 struct qed_hw_init_params hw_init_params;
1210 struct qed_mcp_drv_version drv_version;
1211 struct qed_tunnel_info tunn_info;
1212 const u8 *data = NULL;
1213 struct qed_hwfn *hwfn;
1214 struct qed_ptt *p_ptt;
1217 if (qed_iov_wq_start(cdev))
1220 if (qed_slowpath_wq_start(cdev))
1224 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1228 "Failed to find fw file - /lib/firmware/%s\n",
1233 if (cdev->num_hwfns == 1) {
1234 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1236 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1239 "Failed to acquire PTT for aRFS\n");
1245 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1246 rc = qed_nic_setup(cdev);
1251 rc = qed_slowpath_setup_int(cdev, params->int_mode);
1253 rc = qed_slowpath_vf_setup_int(cdev);
1258 /* Allocate stream for unzipping */
1259 rc = qed_alloc_stream_mem(cdev);
1263 /* First Dword used to differentiate between various sources */
1264 data = cdev->firmware->data + sizeof(u32);
1266 qed_dbg_pf_init(cdev);
1269 /* Start the slowpath */
1270 memset(&hw_init_params, 0, sizeof(hw_init_params));
1271 memset(&tunn_info, 0, sizeof(tunn_info));
1272 tunn_info.vxlan.b_mode_enabled = true;
1273 tunn_info.l2_gre.b_mode_enabled = true;
1274 tunn_info.ip_gre.b_mode_enabled = true;
1275 tunn_info.l2_geneve.b_mode_enabled = true;
1276 tunn_info.ip_geneve.b_mode_enabled = true;
1277 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1278 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1279 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1280 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1281 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1282 hw_init_params.p_tunn = &tunn_info;
1283 hw_init_params.b_hw_start = true;
1284 hw_init_params.int_mode = cdev->int_params.out.int_mode;
1285 hw_init_params.allow_npar_tx_switch = true;
1286 hw_init_params.bin_fw_data = data;
1288 memset(&drv_load_params, 0, sizeof(drv_load_params));
1289 drv_load_params.is_crash_kernel = is_kdump_kernel();
1290 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1291 drv_load_params.avoid_eng_reset = false;
1292 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1293 hw_init_params.p_drv_load_params = &drv_load_params;
1295 rc = qed_hw_init(cdev, &hw_init_params);
1300 "HW initialization and function start completed successfully\n");
1303 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1304 BIT(QED_MODE_L2GENEVE_TUNN) |
1305 BIT(QED_MODE_IPGENEVE_TUNN) |
1306 BIT(QED_MODE_L2GRE_TUNN) |
1307 BIT(QED_MODE_IPGRE_TUNN));
1310 /* Allocate LL2 interface if needed */
1311 if (QED_LEADING_HWFN(cdev)->using_ll2) {
1312 rc = qed_ll2_alloc_if(cdev);
1317 hwfn = QED_LEADING_HWFN(cdev);
1318 drv_version.version = (params->drv_major << 24) |
1319 (params->drv_minor << 16) |
1320 (params->drv_rev << 8) |
1322 strlcpy(drv_version.name, params->name,
1323 MCP_DRV_VER_STR_SIZE - 4);
1324 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1327 DP_NOTICE(cdev, "Failed sending drv version command\n");
1332 qed_reset_vport_stats(cdev);
1337 qed_ll2_dealloc_if(cdev);
1341 qed_hw_timers_stop_all(cdev);
1343 qed_slowpath_irq_free(cdev);
1344 qed_free_stream_mem(cdev);
1345 qed_disable_msix(cdev);
1347 qed_resc_free(cdev);
1350 release_firmware(cdev->firmware);
1352 if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1353 QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1354 qed_ptt_release(QED_LEADING_HWFN(cdev),
1355 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1357 qed_iov_wq_stop(cdev, false);
1359 qed_slowpath_wq_stop(cdev);
1364 static int qed_slowpath_stop(struct qed_dev *cdev)
1369 qed_slowpath_wq_stop(cdev);
1371 qed_ll2_dealloc_if(cdev);
1374 if (cdev->num_hwfns == 1)
1375 qed_ptt_release(QED_LEADING_HWFN(cdev),
1376 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1377 qed_free_stream_mem(cdev);
1378 if (IS_QED_ETH_IF(cdev))
1379 qed_sriov_disable(cdev, true);
1385 qed_slowpath_irq_free(cdev);
1387 qed_disable_msix(cdev);
1389 qed_resc_free(cdev);
1391 qed_iov_wq_stop(cdev, true);
1394 release_firmware(cdev->firmware);
1399 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1403 memcpy(cdev->name, name, NAME_SIZE);
1404 for_each_hwfn(cdev, i)
1405 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1408 static u32 qed_sb_init(struct qed_dev *cdev,
1409 struct qed_sb_info *sb_info,
1411 dma_addr_t sb_phy_addr, u16 sb_id,
1412 enum qed_sb_type type)
1414 struct qed_hwfn *p_hwfn;
1415 struct qed_ptt *p_ptt;
1419 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1420 if (type == QED_SB_TYPE_L2_QUEUE) {
1421 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1422 rel_sb_id = sb_id / cdev->num_hwfns;
1424 p_hwfn = QED_AFFIN_HWFN(cdev);
1428 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1429 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1430 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1432 if (IS_PF(p_hwfn->cdev)) {
1433 p_ptt = qed_ptt_acquire(p_hwfn);
1437 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1438 sb_phy_addr, rel_sb_id);
1439 qed_ptt_release(p_hwfn, p_ptt);
1441 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1442 sb_phy_addr, rel_sb_id);
1448 static u32 qed_sb_release(struct qed_dev *cdev,
1449 struct qed_sb_info *sb_info,
1451 enum qed_sb_type type)
1453 struct qed_hwfn *p_hwfn;
1457 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1458 if (type == QED_SB_TYPE_L2_QUEUE) {
1459 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1460 rel_sb_id = sb_id / cdev->num_hwfns;
1462 p_hwfn = QED_AFFIN_HWFN(cdev);
1466 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1467 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1468 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1470 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1475 static bool qed_can_link_change(struct qed_dev *cdev)
1480 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1482 struct qed_hwfn *hwfn;
1483 struct qed_mcp_link_params *link_params;
1484 struct qed_ptt *ptt;
1491 /* The link should be set only once per PF */
1492 hwfn = &cdev->hwfns[0];
1494 /* When VF wants to set link, force it to read the bulletin instead.
1495 * This mimics the PF behavior, where a noitification [both immediate
1496 * and possible later] would be generated when changing properties.
1499 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1503 ptt = qed_ptt_acquire(hwfn);
1507 link_params = qed_mcp_get_link_params(hwfn);
1508 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1509 link_params->speed.autoneg = params->autoneg;
1510 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1511 link_params->speed.advertised_speeds = 0;
1512 sup_caps = QED_LM_1000baseT_Full_BIT |
1513 QED_LM_1000baseKX_Full_BIT |
1514 QED_LM_1000baseX_Full_BIT;
1515 if (params->adv_speeds & sup_caps)
1516 link_params->speed.advertised_speeds |=
1517 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1518 sup_caps = QED_LM_10000baseT_Full_BIT |
1519 QED_LM_10000baseKR_Full_BIT |
1520 QED_LM_10000baseKX4_Full_BIT |
1521 QED_LM_10000baseR_FEC_BIT |
1522 QED_LM_10000baseCR_Full_BIT |
1523 QED_LM_10000baseSR_Full_BIT |
1524 QED_LM_10000baseLR_Full_BIT |
1525 QED_LM_10000baseLRM_Full_BIT;
1526 if (params->adv_speeds & sup_caps)
1527 link_params->speed.advertised_speeds |=
1528 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1529 if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
1530 link_params->speed.advertised_speeds |=
1531 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
1532 sup_caps = QED_LM_25000baseKR_Full_BIT |
1533 QED_LM_25000baseCR_Full_BIT |
1534 QED_LM_25000baseSR_Full_BIT;
1535 if (params->adv_speeds & sup_caps)
1536 link_params->speed.advertised_speeds |=
1537 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1538 sup_caps = QED_LM_40000baseLR4_Full_BIT |
1539 QED_LM_40000baseKR4_Full_BIT |
1540 QED_LM_40000baseCR4_Full_BIT |
1541 QED_LM_40000baseSR4_Full_BIT;
1542 if (params->adv_speeds & sup_caps)
1543 link_params->speed.advertised_speeds |=
1544 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1545 sup_caps = QED_LM_50000baseKR2_Full_BIT |
1546 QED_LM_50000baseCR2_Full_BIT |
1547 QED_LM_50000baseSR2_Full_BIT;
1548 if (params->adv_speeds & sup_caps)
1549 link_params->speed.advertised_speeds |=
1550 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1551 sup_caps = QED_LM_100000baseKR4_Full_BIT |
1552 QED_LM_100000baseSR4_Full_BIT |
1553 QED_LM_100000baseCR4_Full_BIT |
1554 QED_LM_100000baseLR4_ER4_Full_BIT;
1555 if (params->adv_speeds & sup_caps)
1556 link_params->speed.advertised_speeds |=
1557 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1559 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1560 link_params->speed.forced_speed = params->forced_speed;
1561 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1562 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1563 link_params->pause.autoneg = true;
1565 link_params->pause.autoneg = false;
1566 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1567 link_params->pause.forced_rx = true;
1569 link_params->pause.forced_rx = false;
1570 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1571 link_params->pause.forced_tx = true;
1573 link_params->pause.forced_tx = false;
1575 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1576 switch (params->loopback_mode) {
1577 case QED_LINK_LOOPBACK_INT_PHY:
1578 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1580 case QED_LINK_LOOPBACK_EXT_PHY:
1581 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1583 case QED_LINK_LOOPBACK_EXT:
1584 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1586 case QED_LINK_LOOPBACK_MAC:
1587 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1590 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1595 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1596 memcpy(&link_params->eee, ¶ms->eee,
1597 sizeof(link_params->eee));
1599 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1601 qed_ptt_release(hwfn, ptt);
1606 static int qed_get_port_type(u32 media_type)
1610 switch (media_type) {
1611 case MEDIA_SFPP_10G_FIBER:
1612 case MEDIA_SFP_1G_FIBER:
1613 case MEDIA_XFP_FIBER:
1614 case MEDIA_MODULE_FIBER:
1616 port_type = PORT_FIBRE;
1618 case MEDIA_DA_TWINAX:
1619 port_type = PORT_DA;
1622 port_type = PORT_TP;
1624 case MEDIA_NOT_PRESENT:
1625 port_type = PORT_NONE;
1627 case MEDIA_UNSPECIFIED:
1629 port_type = PORT_OTHER;
1635 static int qed_get_link_data(struct qed_hwfn *hwfn,
1636 struct qed_mcp_link_params *params,
1637 struct qed_mcp_link_state *link,
1638 struct qed_mcp_link_capabilities *link_caps)
1642 if (!IS_PF(hwfn->cdev)) {
1643 qed_vf_get_link_params(hwfn, params);
1644 qed_vf_get_link_state(hwfn, link);
1645 qed_vf_get_link_caps(hwfn, link_caps);
1650 p = qed_mcp_get_link_params(hwfn);
1653 memcpy(params, p, sizeof(*params));
1655 p = qed_mcp_get_link_state(hwfn);
1658 memcpy(link, p, sizeof(*link));
1660 p = qed_mcp_get_link_capabilities(hwfn);
1663 memcpy(link_caps, p, sizeof(*link_caps));
1668 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1669 struct qed_ptt *ptt, u32 capability,
1672 u32 media_type, tcvr_state, tcvr_type;
1673 u32 speed_mask, board_cfg;
1675 if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1676 media_type = MEDIA_UNSPECIFIED;
1678 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1679 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1681 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1682 speed_mask = 0xFFFFFFFF;
1684 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1685 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1687 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1688 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1689 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1691 switch (media_type) {
1692 case MEDIA_DA_TWINAX:
1693 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1694 *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1695 /* For DAC media multiple speed capabilities are supported*/
1696 capability = capability & speed_mask;
1697 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1698 *if_capability |= QED_LM_1000baseKX_Full_BIT;
1699 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1700 *if_capability |= QED_LM_10000baseCR_Full_BIT;
1701 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1702 *if_capability |= QED_LM_40000baseCR4_Full_BIT;
1703 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1704 *if_capability |= QED_LM_25000baseCR_Full_BIT;
1705 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1706 *if_capability |= QED_LM_50000baseCR2_Full_BIT;
1708 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1709 *if_capability |= QED_LM_100000baseCR4_Full_BIT;
1712 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1714 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1715 *if_capability |= QED_LM_1000baseT_Full_BIT;
1718 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1719 *if_capability |= QED_LM_10000baseT_Full_BIT;
1722 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1723 if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
1724 *if_capability |= QED_LM_1000baseT_Full_BIT;
1725 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
1726 *if_capability |= QED_LM_10000baseT_Full_BIT;
1729 case MEDIA_SFP_1G_FIBER:
1730 case MEDIA_SFPP_10G_FIBER:
1731 case MEDIA_XFP_FIBER:
1732 case MEDIA_MODULE_FIBER:
1734 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1735 if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
1736 (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
1737 *if_capability |= QED_LM_1000baseKX_Full_BIT;
1740 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1741 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
1742 *if_capability |= QED_LM_10000baseSR_Full_BIT;
1743 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
1744 *if_capability |= QED_LM_10000baseLR_Full_BIT;
1745 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
1746 *if_capability |= QED_LM_10000baseLRM_Full_BIT;
1747 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
1748 *if_capability |= QED_LM_10000baseR_FEC_BIT;
1750 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1751 *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1753 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
1754 if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
1755 *if_capability |= QED_LM_25000baseSR_Full_BIT;
1758 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
1759 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
1760 *if_capability |= QED_LM_40000baseLR4_Full_BIT;
1761 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
1762 *if_capability |= QED_LM_40000baseSR4_Full_BIT;
1765 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1766 *if_capability |= QED_LM_50000baseKR2_Full_BIT;
1768 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
1769 if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
1770 *if_capability |= QED_LM_100000baseSR4_Full_BIT;
1775 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1776 *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1778 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1779 *if_capability |= QED_LM_1000baseKX_Full_BIT;
1781 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1782 *if_capability |= QED_LM_10000baseKR_Full_BIT;
1784 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1785 *if_capability |= QED_LM_25000baseKR_Full_BIT;
1787 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1788 *if_capability |= QED_LM_40000baseKR4_Full_BIT;
1790 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1791 *if_capability |= QED_LM_50000baseKR2_Full_BIT;
1793 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1794 *if_capability |= QED_LM_100000baseKR4_Full_BIT;
1796 case MEDIA_UNSPECIFIED:
1797 case MEDIA_NOT_PRESENT:
1798 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
1799 "Unknown media and transceiver type;\n");
1804 static void qed_fill_link(struct qed_hwfn *hwfn,
1805 struct qed_ptt *ptt,
1806 struct qed_link_output *if_link)
1808 struct qed_mcp_link_capabilities link_caps;
1809 struct qed_mcp_link_params params;
1810 struct qed_mcp_link_state link;
1813 memset(if_link, 0, sizeof(*if_link));
1815 /* Prepare source inputs */
1816 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1817 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1821 /* Set the link parameters to pass to protocol driver */
1823 if_link->link_up = true;
1825 /* TODO - at the moment assume supported and advertised speed equal */
1826 if_link->supported_caps = QED_LM_FIBRE_BIT;
1827 if (link_caps.default_speed_autoneg)
1828 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1829 if (params.pause.autoneg ||
1830 (params.pause.forced_rx && params.pause.forced_tx))
1831 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1832 if (params.pause.autoneg || params.pause.forced_rx ||
1833 params.pause.forced_tx)
1834 if_link->supported_caps |= QED_LM_Pause_BIT;
1836 if_link->advertised_caps = if_link->supported_caps;
1837 if (params.speed.autoneg)
1838 if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1840 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1842 /* Fill link advertised capability*/
1843 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
1844 &if_link->advertised_caps);
1845 /* Fill link supported capability*/
1846 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
1847 &if_link->supported_caps);
1850 if_link->speed = link.speed;
1852 /* TODO - fill duplex properly */
1853 if_link->duplex = DUPLEX_FULL;
1854 qed_mcp_get_media_type(hwfn, ptt, &media_type);
1855 if_link->port = qed_get_port_type(media_type);
1857 if_link->autoneg = params.speed.autoneg;
1859 if (params.pause.autoneg)
1860 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1861 if (params.pause.forced_rx)
1862 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1863 if (params.pause.forced_tx)
1864 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1866 /* Link partner capabilities */
1867 if (link.partner_adv_speed &
1868 QED_LINK_PARTNER_SPEED_1G_FD)
1869 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1870 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1871 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1872 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
1873 if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
1874 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1875 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1876 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1877 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1878 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1879 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1880 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1881 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1883 if (link.an_complete)
1884 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1886 if (link.partner_adv_pause)
1887 if_link->lp_caps |= QED_LM_Pause_BIT;
1888 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1889 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1890 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1892 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1893 if_link->eee_supported = false;
1895 if_link->eee_supported = true;
1896 if_link->eee_active = link.eee_active;
1897 if_link->sup_caps = link_caps.eee_speed_caps;
1898 /* MFW clears adv_caps on eee disable; use configured value */
1899 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1900 params.eee.adv_caps;
1901 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1902 if_link->eee.enable = params.eee.enable;
1903 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1904 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1908 static void qed_get_current_link(struct qed_dev *cdev,
1909 struct qed_link_output *if_link)
1911 struct qed_hwfn *hwfn;
1912 struct qed_ptt *ptt;
1915 hwfn = &cdev->hwfns[0];
1917 ptt = qed_ptt_acquire(hwfn);
1919 qed_fill_link(hwfn, ptt, if_link);
1920 qed_ptt_release(hwfn, ptt);
1922 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
1925 qed_fill_link(hwfn, NULL, if_link);
1928 for_each_hwfn(cdev, i)
1929 qed_inform_vf_link_state(&cdev->hwfns[i]);
1932 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1934 void *cookie = hwfn->cdev->ops_cookie;
1935 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1936 struct qed_link_output if_link;
1938 qed_fill_link(hwfn, ptt, &if_link);
1939 qed_inform_vf_link_state(hwfn);
1941 if (IS_LEAD_HWFN(hwfn) && cookie)
1942 op->link_update(cookie, &if_link);
1945 static int qed_drain(struct qed_dev *cdev)
1947 struct qed_hwfn *hwfn;
1948 struct qed_ptt *ptt;
1954 for_each_hwfn(cdev, i) {
1955 hwfn = &cdev->hwfns[i];
1956 ptt = qed_ptt_acquire(hwfn);
1958 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1961 rc = qed_mcp_drain(hwfn, ptt);
1962 qed_ptt_release(hwfn, ptt);
1970 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
1971 struct qed_nvm_image_att *nvm_image,
1978 /* Allocate a buffer for holding the nvram image */
1979 buf = kzalloc(nvm_image->length, GFP_KERNEL);
1983 /* Read image into buffer */
1984 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
1985 buf, nvm_image->length);
1987 DP_ERR(cdev, "Failed reading image from nvm\n");
1991 /* Convert the buffer into big-endian format (excluding the
1992 * closing 4 bytes of CRC).
1994 for (j = 0; j < nvm_image->length - 4; j += 4) {
1995 val = cpu_to_be32(*(u32 *)&buf[j]);
1996 *(u32 *)&buf[j] = val;
1999 /* Calc CRC for the "actual" image buffer, i.e. not including
2000 * the last 4 CRC bytes.
2002 *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
2010 /* Binary file format -
2011 * /----------------------------------------------------------------------\
2012 * 0B | 0x4 [command index] |
2013 * 4B | image_type | Options | Number of register settings |
2017 * \----------------------------------------------------------------------/
2018 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2019 * Options - 0'b - Calculate & Update CRC for image
2021 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2024 struct qed_nvm_image_att nvm_image;
2025 struct qed_hwfn *p_hwfn;
2026 bool is_crc = false;
2032 image_type = **data;
2033 p_hwfn = QED_LEADING_HWFN(cdev);
2034 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2035 if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2037 if (i == p_hwfn->nvm_info.num_images) {
2038 DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2043 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2044 nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2046 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2047 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2048 **data, image_type, nvm_image.start_addr,
2049 nvm_image.start_addr + nvm_image.length - 1);
2051 is_crc = !!(**data & BIT(0));
2053 len = *((u16 *)*data);
2058 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2060 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2064 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2065 (nvm_image.start_addr +
2066 nvm_image.length - 4), (u8 *)&crc, 4);
2068 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2069 nvm_image.start_addr + nvm_image.length - 4, rc);
2073 /* Iterate over the values for setting */
2075 u32 offset, mask, value, cur_value;
2078 value = *((u32 *)*data);
2080 mask = *((u32 *)*data);
2082 offset = *((u32 *)*data);
2085 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2088 DP_ERR(cdev, "Failed reading from %08x\n",
2089 nvm_image.start_addr + offset);
2093 cur_value = le32_to_cpu(*((__le32 *)buf));
2094 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2095 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2096 nvm_image.start_addr + offset, cur_value,
2097 (cur_value & ~mask) | (value & mask), value, mask);
2098 value = (value & mask) | (cur_value & ~mask);
2099 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2100 nvm_image.start_addr + offset,
2103 DP_ERR(cdev, "Failed writing to %08x\n",
2104 nvm_image.start_addr + offset);
2114 /* Binary file format -
2115 * /----------------------------------------------------------------------\
2116 * 0B | 0x3 [command index] |
2117 * 4B | b'0: check_response? | b'1-31 reserved |
2118 * 8B | File-type | reserved |
2119 * 12B | Image length in bytes |
2120 * \----------------------------------------------------------------------/
2121 * Start a new file of the provided type
2123 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2124 const u8 **data, bool *check_resp)
2126 u32 file_type, file_size = 0;
2130 *check_resp = !!(**data & BIT(0));
2134 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2135 "About to start a new file of type %02x\n", file_type);
2136 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2138 file_size = *((u32 *)(*data));
2141 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2142 (u8 *)(&file_size), 4);
2148 /* Binary file format -
2149 * /----------------------------------------------------------------------\
2150 * 0B | 0x2 [command index] |
2151 * 4B | Length in bytes |
2152 * 8B | b'0: check_response? | b'1-31 reserved |
2153 * 12B | Offset in bytes |
2155 * \----------------------------------------------------------------------/
2156 * Write data as part of a file that was previously started. Data should be
2157 * of length equal to that provided in the message
2159 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2160 const u8 **data, bool *check_resp)
2166 len = *((u32 *)(*data));
2168 *check_resp = !!(**data & BIT(0));
2170 offset = *((u32 *)(*data));
2173 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2174 "About to write File-data: %08x bytes to offset %08x\n",
2177 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2178 (char *)(*data), len);
2184 /* Binary file format [General header] -
2185 * /----------------------------------------------------------------------\
2186 * 0B | QED_NVM_SIGNATURE |
2187 * 4B | Length in bytes |
2188 * 8B | Highest command in this batchfile | Reserved |
2189 * \----------------------------------------------------------------------/
2191 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2192 const struct firmware *image,
2197 /* Check minimum size */
2198 if (image->size < 12) {
2199 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2203 /* Check signature */
2204 signature = *((u32 *)(*data));
2205 if (signature != QED_NVM_SIGNATURE) {
2206 DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2211 /* Validate internal size equals the image-size */
2212 len = *((u32 *)(*data));
2213 if (len != image->size) {
2214 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2215 len, (u32)image->size);
2220 /* Make sure driver familiar with all commands necessary for this */
2221 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2222 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2232 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2234 const struct firmware *image;
2235 const u8 *data, *data_end;
2239 rc = request_firmware(&image, name, &cdev->pdev->dev);
2241 DP_ERR(cdev, "Failed to find '%s'\n", name);
2245 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2246 "Flashing '%s' - firmware's data at %p, size is %08x\n",
2247 name, image->data, (u32)image->size);
2249 data_end = data + image->size;
2251 rc = qed_nvm_flash_image_validate(cdev, image, &data);
2255 while (data < data_end) {
2256 bool check_resp = false;
2258 /* Parse the actual command */
2259 cmd_type = *((u32 *)data);
2261 case QED_NVM_FLASH_CMD_FILE_DATA:
2262 rc = qed_nvm_flash_image_file_data(cdev, &data,
2265 case QED_NVM_FLASH_CMD_FILE_START:
2266 rc = qed_nvm_flash_image_file_start(cdev, &data,
2269 case QED_NVM_FLASH_CMD_NVM_CHANGE:
2270 rc = qed_nvm_flash_image_access(cdev, &data,
2274 DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2280 DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2284 /* Check response if needed */
2286 u32 mcp_response = 0;
2288 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2289 DP_ERR(cdev, "Failed getting MCP response\n");
2294 switch (mcp_response & FW_MSG_CODE_MASK) {
2295 case FW_MSG_CODE_OK:
2296 case FW_MSG_CODE_NVM_OK:
2297 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2298 case FW_MSG_CODE_PHY_OK:
2301 DP_ERR(cdev, "MFW returns error: %08x\n",
2310 release_firmware(image);
2315 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2318 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2320 return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2323 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2325 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2326 void *cookie = p_hwfn->cdev->ops_cookie;
2328 if (ops && ops->schedule_recovery_handler)
2329 ops->schedule_recovery_handler(cookie);
2332 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2335 return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2338 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2340 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2341 struct qed_ptt *ptt;
2344 ptt = qed_ptt_acquire(hwfn);
2348 status = qed_mcp_set_led(hwfn, ptt, mode);
2350 qed_ptt_release(hwfn, ptt);
2355 static int qed_recovery_process(struct qed_dev *cdev)
2357 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2358 struct qed_ptt *p_ptt;
2361 p_ptt = qed_ptt_acquire(p_hwfn);
2365 rc = qed_start_recovery_process(p_hwfn, p_ptt);
2367 qed_ptt_release(p_hwfn, p_ptt);
2372 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2374 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2375 struct qed_ptt *ptt;
2381 ptt = qed_ptt_acquire(hwfn);
2385 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2386 : QED_OV_WOL_DISABLED);
2389 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2392 qed_ptt_release(hwfn, ptt);
2396 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2398 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2399 struct qed_ptt *ptt;
2405 ptt = qed_ptt_acquire(hwfn);
2409 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2410 QED_OV_DRIVER_STATE_ACTIVE :
2411 QED_OV_DRIVER_STATE_DISABLED);
2413 qed_ptt_release(hwfn, ptt);
2418 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2420 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2421 struct qed_ptt *ptt;
2427 ptt = qed_ptt_acquire(hwfn);
2431 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2435 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2438 qed_ptt_release(hwfn, ptt);
2442 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2444 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2445 struct qed_ptt *ptt;
2451 ptt = qed_ptt_acquire(hwfn);
2455 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2459 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2462 qed_ptt_release(hwfn, ptt);
2466 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2467 u8 dev_addr, u32 offset, u32 len)
2469 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2470 struct qed_ptt *ptt;
2476 ptt = qed_ptt_acquire(hwfn);
2480 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2483 qed_ptt_release(hwfn, ptt);
2488 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2490 return QED_AFFIN_HWFN_IDX(cdev);
2493 static struct qed_selftest_ops qed_selftest_ops_pass = {
2494 .selftest_memory = &qed_selftest_memory,
2495 .selftest_interrupt = &qed_selftest_interrupt,
2496 .selftest_register = &qed_selftest_register,
2497 .selftest_clock = &qed_selftest_clock,
2498 .selftest_nvram = &qed_selftest_nvram,
2501 const struct qed_common_ops qed_common_ops_pass = {
2502 .selftest = &qed_selftest_ops_pass,
2503 .probe = &qed_probe,
2504 .remove = &qed_remove,
2505 .set_power_state = &qed_set_power_state,
2506 .set_name = &qed_set_name,
2507 .update_pf_params = &qed_update_pf_params,
2508 .slowpath_start = &qed_slowpath_start,
2509 .slowpath_stop = &qed_slowpath_stop,
2510 .set_fp_int = &qed_set_int_fp,
2511 .get_fp_int = &qed_get_int_fp,
2512 .sb_init = &qed_sb_init,
2513 .sb_release = &qed_sb_release,
2514 .simd_handler_config = &qed_simd_handler_config,
2515 .simd_handler_clean = &qed_simd_handler_clean,
2516 .dbg_grc = &qed_dbg_grc,
2517 .dbg_grc_size = &qed_dbg_grc_size,
2518 .can_link_change = &qed_can_link_change,
2519 .set_link = &qed_set_link,
2520 .get_link = &qed_get_current_link,
2521 .drain = &qed_drain,
2522 .update_msglvl = &qed_init_dp,
2523 .dbg_all_data = &qed_dbg_all_data,
2524 .dbg_all_data_size = &qed_dbg_all_data_size,
2525 .chain_alloc = &qed_chain_alloc,
2526 .chain_free = &qed_chain_free,
2527 .nvm_flash = &qed_nvm_flash,
2528 .nvm_get_image = &qed_nvm_get_image,
2529 .set_coalesce = &qed_set_coalesce,
2530 .set_led = &qed_set_led,
2531 .recovery_process = &qed_recovery_process,
2532 .recovery_prolog = &qed_recovery_prolog,
2533 .update_drv_state = &qed_update_drv_state,
2534 .update_mac = &qed_update_mac,
2535 .update_mtu = &qed_update_mtu,
2536 .update_wol = &qed_update_wol,
2537 .db_recovery_add = &qed_db_recovery_add,
2538 .db_recovery_del = &qed_db_recovery_del,
2539 .read_module_eeprom = &qed_read_module_eeprom,
2540 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
2543 void qed_get_protocol_stats(struct qed_dev *cdev,
2544 enum qed_mcp_protocol_type type,
2545 union qed_mcp_protocol_stats *stats)
2547 struct qed_eth_stats eth_stats;
2549 memset(stats, 0, sizeof(*stats));
2552 case QED_MCP_LAN_STATS:
2553 qed_get_vport_stats(cdev, ð_stats);
2554 stats->lan_stats.ucast_rx_pkts =
2555 eth_stats.common.rx_ucast_pkts;
2556 stats->lan_stats.ucast_tx_pkts =
2557 eth_stats.common.tx_ucast_pkts;
2558 stats->lan_stats.fcs_err = -1;
2560 case QED_MCP_FCOE_STATS:
2561 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2563 case QED_MCP_ISCSI_STATS:
2564 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2567 DP_VERBOSE(cdev, QED_MSG_SP,
2568 "Invalid protocol type = %d\n", type);
2573 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2575 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2576 "Scheduling slowpath task [Flag: %d]\n",
2577 QED_SLOWPATH_MFW_TLV_REQ);
2578 smp_mb__before_atomic();
2579 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
2580 smp_mb__after_atomic();
2581 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2587 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2589 struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2590 struct qed_eth_stats_common *p_common;
2591 struct qed_generic_tlvs gen_tlvs;
2592 struct qed_eth_stats stats;
2595 memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2596 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2598 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2599 tlv->flags.ipv4_csum_offload = true;
2600 if (gen_tlvs.feat_flags & QED_TLV_LSO)
2601 tlv->flags.lso_supported = true;
2602 tlv->flags.b_set = true;
2604 for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2605 if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2606 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2607 tlv->mac_set[i] = true;
2611 qed_get_vport_stats(cdev, &stats);
2612 p_common = &stats.common;
2613 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2614 p_common->rx_bcast_pkts;
2615 tlv->rx_frames_set = true;
2616 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2617 p_common->rx_bcast_bytes;
2618 tlv->rx_bytes_set = true;
2619 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2620 p_common->tx_bcast_pkts;
2621 tlv->tx_frames_set = true;
2622 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2623 p_common->tx_bcast_bytes;
2624 tlv->rx_bytes_set = true;
2627 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2628 union qed_mfw_tlv_data *tlv_buf)
2630 struct qed_dev *cdev = hwfn->cdev;
2631 struct qed_common_cb_ops *ops;
2633 ops = cdev->protocol_ops.common;
2634 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2635 DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2640 case QED_MFW_TLV_GENERIC:
2641 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2643 case QED_MFW_TLV_ETH:
2644 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2646 case QED_MFW_TLV_FCOE:
2647 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2649 case QED_MFW_TLV_ISCSI:
2650 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);