1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/crc32.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
53 #include "qed_sriov.h"
55 #include "qed_dev_api.h"
58 #include "qed_iscsi.h"
61 #include "qed_reg_addr.h"
63 #include "qed_selftest.h"
64 #include "qed_debug.h"
66 #define QED_ROCE_QPS (8192)
67 #define QED_ROCE_DPIS (8)
68 #define QED_RDMA_SRQS QED_ROCE_QPS
70 static char version[] =
71 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
73 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
74 MODULE_LICENSE("GPL");
75 MODULE_VERSION(DRV_MODULE_VERSION);
77 #define FW_FILE_VERSION \
78 __stringify(FW_MAJOR_VERSION) "." \
79 __stringify(FW_MINOR_VERSION) "." \
80 __stringify(FW_REVISION_VERSION) "." \
81 __stringify(FW_ENGINEERING_VERSION)
83 #define QED_FW_FILE_NAME \
84 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
86 MODULE_FIRMWARE(QED_FW_FILE_NAME);
88 static int __init qed_init(void)
90 pr_info("%s", version);
95 static void __exit qed_cleanup(void)
97 pr_notice("qed_cleanup called\n");
100 module_init(qed_init);
101 module_exit(qed_cleanup);
103 /* Check if the DMA controller on the machine can properly handle the DMA
104 * addressing required by the device.
106 static int qed_set_coherency_mask(struct qed_dev *cdev)
108 struct device *dev = &cdev->pdev->dev;
110 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
111 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
113 "Can't request 64-bit consistent allocations\n");
116 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
117 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
124 static void qed_free_pci(struct qed_dev *cdev)
126 struct pci_dev *pdev = cdev->pdev;
128 if (cdev->doorbells && cdev->db_size)
129 iounmap(cdev->doorbells);
131 iounmap(cdev->regview);
132 if (atomic_read(&pdev->enable_cnt) == 1)
133 pci_release_regions(pdev);
135 pci_disable_device(pdev);
138 #define PCI_REVISION_ID_ERROR_VAL 0xff
140 /* Performs PCI initializations as well as initializing PCI-related parameters
141 * in the device structrue. Returns 0 in case of success.
143 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
150 rc = pci_enable_device(pdev);
152 DP_NOTICE(cdev, "Cannot enable PCI device\n");
156 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
157 DP_NOTICE(cdev, "No memory region found in bar #0\n");
162 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
163 DP_NOTICE(cdev, "No memory region found in bar #2\n");
168 if (atomic_read(&pdev->enable_cnt) == 1) {
169 rc = pci_request_regions(pdev, "qed");
172 "Failed to request PCI memory resources\n");
175 pci_set_master(pdev);
176 pci_save_state(pdev);
179 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
180 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
182 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
187 if (!pci_is_pcie(pdev)) {
188 DP_NOTICE(cdev, "The bus is not PCI Express\n");
193 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
194 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
195 DP_NOTICE(cdev, "Cannot find power management capability\n");
197 rc = qed_set_coherency_mask(cdev);
201 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
202 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
203 cdev->pci_params.irq = pdev->irq;
205 cdev->regview = pci_ioremap_bar(pdev, 0);
206 if (!cdev->regview) {
207 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
212 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
213 cdev->db_size = pci_resource_len(cdev->pdev, 2);
214 if (!cdev->db_size) {
216 DP_NOTICE(cdev, "No Doorbell bar available\n");
223 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
225 if (!cdev->doorbells) {
226 DP_NOTICE(cdev, "Cannot map doorbell space\n");
233 pci_release_regions(pdev);
235 pci_disable_device(pdev);
240 int qed_fill_dev_info(struct qed_dev *cdev,
241 struct qed_dev_info *dev_info)
243 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
244 struct qed_hw_info *hw_info = &p_hwfn->hw_info;
245 struct qed_tunnel_info *tun = &cdev->tunnel;
248 memset(dev_info, 0, sizeof(struct qed_dev_info));
250 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
251 tun->vxlan.b_mode_enabled)
252 dev_info->vxlan_enable = true;
254 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
255 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
256 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
257 dev_info->gre_enable = true;
259 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
260 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
261 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
262 dev_info->geneve_enable = true;
264 dev_info->num_hwfns = cdev->num_hwfns;
265 dev_info->pci_mem_start = cdev->pci_params.mem_start;
266 dev_info->pci_mem_end = cdev->pci_params.mem_end;
267 dev_info->pci_irq = cdev->pci_params.irq;
268 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
269 dev_info->dev_type = cdev->type;
270 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
273 dev_info->fw_major = FW_MAJOR_VERSION;
274 dev_info->fw_minor = FW_MINOR_VERSION;
275 dev_info->fw_rev = FW_REVISION_VERSION;
276 dev_info->fw_eng = FW_ENGINEERING_VERSION;
277 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
279 dev_info->tx_switching = true;
281 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
282 dev_info->wol_support = true;
284 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
286 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
287 &dev_info->fw_minor, &dev_info->fw_rev,
292 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
294 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
295 &dev_info->mfw_rev, NULL);
297 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
298 &dev_info->mbi_version);
300 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
301 &dev_info->flash_size);
303 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
306 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
307 &dev_info->mfw_rev, NULL);
310 dev_info->mtu = hw_info->mtu;
315 static void qed_free_cdev(struct qed_dev *cdev)
320 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
322 struct qed_dev *cdev;
324 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
328 qed_init_struct(cdev);
333 /* Sets the requested power state */
334 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
339 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
344 static struct qed_dev *qed_probe(struct pci_dev *pdev,
345 struct qed_probe_params *params)
347 struct qed_dev *cdev;
350 cdev = qed_alloc_cdev(pdev);
354 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
355 cdev->protocol = params->protocol;
358 cdev->b_is_vf = true;
360 qed_init_dp(cdev, params->dp_module, params->dp_level);
362 cdev->recov_in_prog = params->recov_in_prog;
364 rc = qed_init_pci(cdev, pdev);
366 DP_ERR(cdev, "init pci failed\n");
369 DP_INFO(cdev, "PCI init completed successfully\n");
371 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
373 DP_ERR(cdev, "hw prepare failed\n");
377 DP_INFO(cdev, "qed_probe completed successfully\n");
389 static void qed_remove(struct qed_dev *cdev)
398 qed_set_power_state(cdev, PCI_D3hot);
403 static void qed_disable_msix(struct qed_dev *cdev)
405 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
406 pci_disable_msix(cdev->pdev);
407 kfree(cdev->int_params.msix_table);
408 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
409 pci_disable_msi(cdev->pdev);
412 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
415 static int qed_enable_msix(struct qed_dev *cdev,
416 struct qed_int_params *int_params)
420 cnt = int_params->in.num_vectors;
422 for (i = 0; i < cnt; i++)
423 int_params->msix_table[i].entry = i;
425 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
426 int_params->in.min_msix_cnt, cnt);
427 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
428 (rc % cdev->num_hwfns)) {
429 pci_disable_msix(cdev->pdev);
431 /* If fastpath is initialized, we need at least one interrupt
432 * per hwfn [and the slow path interrupts]. New requested number
433 * should be a multiple of the number of hwfns.
435 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
437 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
438 cnt, int_params->in.num_vectors);
439 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
446 /* MSI-x configuration was achieved */
447 int_params->out.int_mode = QED_INT_MODE_MSIX;
448 int_params->out.num_vectors = rc;
452 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
459 /* This function outputs the int mode and the number of enabled msix vector */
460 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
462 struct qed_int_params *int_params = &cdev->int_params;
463 struct msix_entry *tbl;
466 switch (int_params->in.int_mode) {
467 case QED_INT_MODE_MSIX:
468 /* Allocate MSIX table */
469 cnt = int_params->in.num_vectors;
470 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
471 if (!int_params->msix_table) {
477 rc = qed_enable_msix(cdev, int_params);
481 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
482 kfree(int_params->msix_table);
487 case QED_INT_MODE_MSI:
488 if (cdev->num_hwfns == 1) {
489 rc = pci_enable_msi(cdev->pdev);
491 int_params->out.int_mode = QED_INT_MODE_MSI;
495 DP_NOTICE(cdev, "Failed to enable MSI\n");
501 case QED_INT_MODE_INTA:
502 int_params->out.int_mode = QED_INT_MODE_INTA;
506 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
507 int_params->in.int_mode);
513 DP_INFO(cdev, "Using %s interrupts\n",
514 int_params->out.int_mode == QED_INT_MODE_INTA ?
515 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
517 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
522 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
523 int index, void(*handler)(void *))
525 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
526 int relative_idx = index / cdev->num_hwfns;
528 hwfn->simd_proto_handler[relative_idx].func = handler;
529 hwfn->simd_proto_handler[relative_idx].token = token;
532 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
534 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
535 int relative_idx = index / cdev->num_hwfns;
537 memset(&hwfn->simd_proto_handler[relative_idx], 0,
538 sizeof(struct qed_simd_fp_handler));
541 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
543 tasklet_schedule((struct tasklet_struct *)tasklet);
547 static irqreturn_t qed_single_int(int irq, void *dev_instance)
549 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
550 struct qed_hwfn *hwfn;
551 irqreturn_t rc = IRQ_NONE;
555 for (i = 0; i < cdev->num_hwfns; i++) {
556 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
561 hwfn = &cdev->hwfns[i];
563 /* Slowpath interrupt */
564 if (unlikely(status & 0x1)) {
565 tasklet_schedule(hwfn->sp_dpc);
570 /* Fastpath interrupts */
571 for (j = 0; j < 64; j++) {
572 if ((0x2ULL << j) & status) {
573 struct qed_simd_fp_handler *p_handler =
574 &hwfn->simd_proto_handler[j];
577 p_handler->func(p_handler->token);
580 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
583 status &= ~(0x2ULL << j);
588 if (unlikely(status))
589 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
590 "got an unknown interrupt status 0x%llx\n",
597 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
599 struct qed_dev *cdev = hwfn->cdev;
604 int_mode = cdev->int_params.out.int_mode;
605 if (int_mode == QED_INT_MODE_MSIX) {
607 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
608 id, cdev->pdev->bus->number,
609 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
610 rc = request_irq(cdev->int_params.msix_table[id].vector,
611 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
613 unsigned long flags = 0;
615 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
616 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
617 PCI_FUNC(cdev->pdev->devfn));
619 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
620 flags |= IRQF_SHARED;
622 rc = request_irq(cdev->pdev->irq, qed_single_int,
623 flags, cdev->name, cdev);
627 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
629 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
630 "Requested slowpath %s\n",
631 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
636 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
638 /* Calling the disable function will make sure that any
639 * currently-running function is completed. The following call to the
640 * enable function makes this sequence a flush-like operation.
642 if (p_hwfn->b_sp_dpc_enabled) {
643 tasklet_disable(p_hwfn->sp_dpc);
644 tasklet_enable(p_hwfn->sp_dpc);
648 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
650 struct qed_dev *cdev = p_hwfn->cdev;
651 u8 id = p_hwfn->my_id;
654 int_mode = cdev->int_params.out.int_mode;
655 if (int_mode == QED_INT_MODE_MSIX)
656 synchronize_irq(cdev->int_params.msix_table[id].vector);
658 synchronize_irq(cdev->pdev->irq);
660 qed_slowpath_tasklet_flush(p_hwfn);
663 static void qed_slowpath_irq_free(struct qed_dev *cdev)
667 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
668 for_each_hwfn(cdev, i) {
669 if (!cdev->hwfns[i].b_int_requested)
671 synchronize_irq(cdev->int_params.msix_table[i].vector);
672 free_irq(cdev->int_params.msix_table[i].vector,
673 cdev->hwfns[i].sp_dpc);
676 if (QED_LEADING_HWFN(cdev)->b_int_requested)
677 free_irq(cdev->pdev->irq, cdev);
679 qed_int_disable_post_isr_release(cdev);
682 static int qed_nic_stop(struct qed_dev *cdev)
686 rc = qed_hw_stop(cdev);
688 for (i = 0; i < cdev->num_hwfns; i++) {
689 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
691 if (p_hwfn->b_sp_dpc_enabled) {
692 tasklet_disable(p_hwfn->sp_dpc);
693 p_hwfn->b_sp_dpc_enabled = false;
694 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
695 "Disabled sp tasklet [hwfn %d] at %p\n",
700 qed_dbg_pf_exit(cdev);
705 static int qed_nic_setup(struct qed_dev *cdev)
709 /* Determine if interface is going to require LL2 */
710 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
711 for (i = 0; i < cdev->num_hwfns; i++) {
712 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
714 p_hwfn->using_ll2 = true;
718 rc = qed_resc_alloc(cdev);
722 DP_INFO(cdev, "Allocated qed resources\n");
724 qed_resc_setup(cdev);
729 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
733 /* Mark the fastpath as free/used */
734 cdev->int_params.fp_initialized = cnt ? true : false;
736 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
737 limit = cdev->num_hwfns * 63;
738 else if (cdev->int_params.fp_msix_cnt)
739 limit = cdev->int_params.fp_msix_cnt;
744 return min_t(int, cnt, limit);
747 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
749 memset(info, 0, sizeof(struct qed_int_info));
751 if (!cdev->int_params.fp_initialized) {
753 "Protocol driver requested interrupt information, but its support is not yet configured\n");
757 /* Need to expose only MSI-X information; Single IRQ is handled solely
760 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
761 int msix_base = cdev->int_params.fp_msix_base;
763 info->msix_cnt = cdev->int_params.fp_msix_cnt;
764 info->msix = &cdev->int_params.msix_table[msix_base];
770 static int qed_slowpath_setup_int(struct qed_dev *cdev,
771 enum qed_int_mode int_mode)
773 struct qed_sb_cnt_info sb_cnt_info;
774 int num_l2_queues = 0;
778 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
779 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
783 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
784 cdev->int_params.in.int_mode = int_mode;
785 for_each_hwfn(cdev, i) {
786 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
787 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
788 cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
789 cdev->int_params.in.num_vectors++; /* slowpath */
792 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
793 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
795 if (is_kdump_kernel()) {
797 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
798 cdev->int_params.in.min_msix_cnt);
799 cdev->int_params.in.num_vectors =
800 cdev->int_params.in.min_msix_cnt;
803 rc = qed_set_int_mode(cdev, false);
805 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
809 cdev->int_params.fp_msix_base = cdev->num_hwfns;
810 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
813 if (!IS_ENABLED(CONFIG_QED_RDMA) ||
814 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
817 for_each_hwfn(cdev, i)
818 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
820 DP_VERBOSE(cdev, QED_MSG_RDMA,
821 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
822 cdev->int_params.fp_msix_cnt, num_l2_queues);
824 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
825 cdev->int_params.rdma_msix_cnt =
826 (cdev->int_params.fp_msix_cnt - num_l2_queues)
828 cdev->int_params.rdma_msix_base =
829 cdev->int_params.fp_msix_base + num_l2_queues;
830 cdev->int_params.fp_msix_cnt = num_l2_queues;
832 cdev->int_params.rdma_msix_cnt = 0;
835 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
836 cdev->int_params.rdma_msix_cnt,
837 cdev->int_params.rdma_msix_base);
842 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
846 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
847 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
849 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
850 &cdev->int_params.in.num_vectors);
851 if (cdev->num_hwfns > 1) {
854 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
855 cdev->int_params.in.num_vectors += vectors;
858 /* We want a minimum of one fastpath vector per vf hwfn */
859 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
861 rc = qed_set_int_mode(cdev, true);
865 cdev->int_params.fp_msix_base = 0;
866 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
871 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
872 u8 *input_buf, u32 max_size, u8 *unzip_buf)
876 p_hwfn->stream->next_in = input_buf;
877 p_hwfn->stream->avail_in = input_len;
878 p_hwfn->stream->next_out = unzip_buf;
879 p_hwfn->stream->avail_out = max_size;
881 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
884 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
889 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
890 zlib_inflateEnd(p_hwfn->stream);
892 if (rc != Z_OK && rc != Z_STREAM_END) {
893 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
894 p_hwfn->stream->msg, rc);
898 return p_hwfn->stream->total_out / 4;
901 static int qed_alloc_stream_mem(struct qed_dev *cdev)
906 for_each_hwfn(cdev, i) {
907 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
909 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
913 workspace = vzalloc(zlib_inflate_workspacesize());
916 p_hwfn->stream->workspace = workspace;
922 static void qed_free_stream_mem(struct qed_dev *cdev)
926 for_each_hwfn(cdev, i) {
927 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
932 vfree(p_hwfn->stream->workspace);
933 kfree(p_hwfn->stream);
937 static void qed_update_pf_params(struct qed_dev *cdev,
938 struct qed_pf_params *params)
942 if (IS_ENABLED(CONFIG_QED_RDMA)) {
943 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
944 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
945 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
946 /* divide by 3 the MRs to avoid MF ILT overflow */
947 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
950 if (cdev->num_hwfns > 1 || IS_VF(cdev))
951 params->eth_pf_params.num_arfs_filters = 0;
953 /* In case we might support RDMA, don't allow qede to be greedy
954 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
957 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
960 num_cons = ¶ms->eth_pf_params.num_cons;
961 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
964 for (i = 0; i < cdev->num_hwfns; i++) {
965 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
967 p_hwfn->pf_params = *params;
971 #define QED_PERIODIC_DB_REC_COUNT 100
972 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100
973 #define QED_PERIODIC_DB_REC_INTERVAL \
974 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
975 #define QED_PERIODIC_DB_REC_WAIT_COUNT 10
976 #define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
977 (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
979 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
980 enum qed_slowpath_wq_flag wq_flag,
983 if (!hwfn->slowpath_wq_active)
986 /* Memory barrier for setting atomic bit */
987 smp_mb__before_atomic();
988 set_bit(wq_flag, &hwfn->slowpath_task_flags);
989 smp_mb__after_atomic();
990 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
995 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
997 /* Reset periodic Doorbell Recovery counter */
998 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1000 /* Don't schedule periodic Doorbell Recovery if already scheduled */
1001 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1002 &p_hwfn->slowpath_task_flags))
1005 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1006 QED_PERIODIC_DB_REC_INTERVAL);
1009 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1011 int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
1016 for_each_hwfn(cdev, i) {
1017 if (!cdev->hwfns[i].slowpath_wq)
1020 /* Stop queuing new delayed works */
1021 cdev->hwfns[i].slowpath_wq_active = false;
1023 /* Wait until the last periodic doorbell recovery is executed */
1024 while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1025 &cdev->hwfns[i].slowpath_task_flags) &&
1027 msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
1029 flush_workqueue(cdev->hwfns[i].slowpath_wq);
1030 destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1034 static void qed_slowpath_task(struct work_struct *work)
1036 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1037 slowpath_task.work);
1038 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1041 if (hwfn->slowpath_wq_active)
1042 queue_delayed_work(hwfn->slowpath_wq,
1043 &hwfn->slowpath_task, 0);
1048 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1049 &hwfn->slowpath_task_flags))
1050 qed_mfw_process_tlv_req(hwfn, ptt);
1052 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1053 &hwfn->slowpath_task_flags)) {
1054 qed_db_rec_handler(hwfn, ptt);
1055 if (hwfn->periodic_db_rec_count--)
1056 qed_slowpath_delayed_work(hwfn,
1057 QED_SLOWPATH_PERIODIC_DB_REC,
1058 QED_PERIODIC_DB_REC_INTERVAL);
1061 qed_ptt_release(hwfn, ptt);
1064 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1066 struct qed_hwfn *hwfn;
1067 char name[NAME_SIZE];
1073 for_each_hwfn(cdev, i) {
1074 hwfn = &cdev->hwfns[i];
1076 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1077 cdev->pdev->bus->number,
1078 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1080 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1081 if (!hwfn->slowpath_wq) {
1082 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1086 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1087 hwfn->slowpath_wq_active = true;
1093 static int qed_slowpath_start(struct qed_dev *cdev,
1094 struct qed_slowpath_params *params)
1096 struct qed_drv_load_params drv_load_params;
1097 struct qed_hw_init_params hw_init_params;
1098 struct qed_mcp_drv_version drv_version;
1099 struct qed_tunnel_info tunn_info;
1100 const u8 *data = NULL;
1101 struct qed_hwfn *hwfn;
1102 struct qed_ptt *p_ptt;
1105 if (qed_iov_wq_start(cdev))
1108 if (qed_slowpath_wq_start(cdev))
1112 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1116 "Failed to find fw file - /lib/firmware/%s\n",
1121 if (cdev->num_hwfns == 1) {
1122 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1124 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1127 "Failed to acquire PTT for aRFS\n");
1133 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1134 rc = qed_nic_setup(cdev);
1139 rc = qed_slowpath_setup_int(cdev, params->int_mode);
1141 rc = qed_slowpath_vf_setup_int(cdev);
1146 /* Allocate stream for unzipping */
1147 rc = qed_alloc_stream_mem(cdev);
1151 /* First Dword used to differentiate between various sources */
1152 data = cdev->firmware->data + sizeof(u32);
1154 qed_dbg_pf_init(cdev);
1157 /* Start the slowpath */
1158 memset(&hw_init_params, 0, sizeof(hw_init_params));
1159 memset(&tunn_info, 0, sizeof(tunn_info));
1160 tunn_info.vxlan.b_mode_enabled = true;
1161 tunn_info.l2_gre.b_mode_enabled = true;
1162 tunn_info.ip_gre.b_mode_enabled = true;
1163 tunn_info.l2_geneve.b_mode_enabled = true;
1164 tunn_info.ip_geneve.b_mode_enabled = true;
1165 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1166 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1167 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1168 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1169 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1170 hw_init_params.p_tunn = &tunn_info;
1171 hw_init_params.b_hw_start = true;
1172 hw_init_params.int_mode = cdev->int_params.out.int_mode;
1173 hw_init_params.allow_npar_tx_switch = true;
1174 hw_init_params.bin_fw_data = data;
1176 memset(&drv_load_params, 0, sizeof(drv_load_params));
1177 drv_load_params.is_crash_kernel = is_kdump_kernel();
1178 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1179 drv_load_params.avoid_eng_reset = false;
1180 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1181 hw_init_params.p_drv_load_params = &drv_load_params;
1183 rc = qed_hw_init(cdev, &hw_init_params);
1188 "HW initialization and function start completed successfully\n");
1191 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1192 BIT(QED_MODE_L2GENEVE_TUNN) |
1193 BIT(QED_MODE_IPGENEVE_TUNN) |
1194 BIT(QED_MODE_L2GRE_TUNN) |
1195 BIT(QED_MODE_IPGRE_TUNN));
1198 /* Allocate LL2 interface if needed */
1199 if (QED_LEADING_HWFN(cdev)->using_ll2) {
1200 rc = qed_ll2_alloc_if(cdev);
1205 hwfn = QED_LEADING_HWFN(cdev);
1206 drv_version.version = (params->drv_major << 24) |
1207 (params->drv_minor << 16) |
1208 (params->drv_rev << 8) |
1210 strlcpy(drv_version.name, params->name,
1211 MCP_DRV_VER_STR_SIZE - 4);
1212 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1215 DP_NOTICE(cdev, "Failed sending drv version command\n");
1220 qed_reset_vport_stats(cdev);
1227 qed_hw_timers_stop_all(cdev);
1229 qed_slowpath_irq_free(cdev);
1230 qed_free_stream_mem(cdev);
1231 qed_disable_msix(cdev);
1233 qed_resc_free(cdev);
1236 release_firmware(cdev->firmware);
1238 if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1239 QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1240 qed_ptt_release(QED_LEADING_HWFN(cdev),
1241 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1243 qed_iov_wq_stop(cdev, false);
1245 qed_slowpath_wq_stop(cdev);
1250 static int qed_slowpath_stop(struct qed_dev *cdev)
1255 qed_slowpath_wq_stop(cdev);
1257 qed_ll2_dealloc_if(cdev);
1260 if (cdev->num_hwfns == 1)
1261 qed_ptt_release(QED_LEADING_HWFN(cdev),
1262 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1263 qed_free_stream_mem(cdev);
1264 if (IS_QED_ETH_IF(cdev))
1265 qed_sriov_disable(cdev, true);
1271 qed_slowpath_irq_free(cdev);
1273 qed_disable_msix(cdev);
1275 qed_resc_free(cdev);
1277 qed_iov_wq_stop(cdev, true);
1280 release_firmware(cdev->firmware);
1285 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1289 memcpy(cdev->name, name, NAME_SIZE);
1290 for_each_hwfn(cdev, i)
1291 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1294 static u32 qed_sb_init(struct qed_dev *cdev,
1295 struct qed_sb_info *sb_info,
1297 dma_addr_t sb_phy_addr, u16 sb_id,
1298 enum qed_sb_type type)
1300 struct qed_hwfn *p_hwfn;
1301 struct qed_ptt *p_ptt;
1307 /* RoCE uses single engine and CMT uses two engines. When using both
1308 * we force only a single engine. Storage uses only engine 0 too.
1310 if (type == QED_SB_TYPE_L2_QUEUE)
1311 n_hwfns = cdev->num_hwfns;
1315 hwfn_index = sb_id % n_hwfns;
1316 p_hwfn = &cdev->hwfns[hwfn_index];
1317 rel_sb_id = sb_id / n_hwfns;
1319 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1320 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1321 hwfn_index, rel_sb_id, sb_id);
1323 if (IS_PF(p_hwfn->cdev)) {
1324 p_ptt = qed_ptt_acquire(p_hwfn);
1328 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1329 sb_phy_addr, rel_sb_id);
1330 qed_ptt_release(p_hwfn, p_ptt);
1332 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1333 sb_phy_addr, rel_sb_id);
1339 static u32 qed_sb_release(struct qed_dev *cdev,
1340 struct qed_sb_info *sb_info, u16 sb_id)
1342 struct qed_hwfn *p_hwfn;
1347 hwfn_index = sb_id % cdev->num_hwfns;
1348 p_hwfn = &cdev->hwfns[hwfn_index];
1349 rel_sb_id = sb_id / cdev->num_hwfns;
1351 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1352 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1353 hwfn_index, rel_sb_id, sb_id);
1355 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1360 static bool qed_can_link_change(struct qed_dev *cdev)
1365 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1367 struct qed_hwfn *hwfn;
1368 struct qed_mcp_link_params *link_params;
1369 struct qed_ptt *ptt;
1376 /* The link should be set only once per PF */
1377 hwfn = &cdev->hwfns[0];
1379 /* When VF wants to set link, force it to read the bulletin instead.
1380 * This mimics the PF behavior, where a noitification [both immediate
1381 * and possible later] would be generated when changing properties.
1384 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1388 ptt = qed_ptt_acquire(hwfn);
1392 link_params = qed_mcp_get_link_params(hwfn);
1393 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1394 link_params->speed.autoneg = params->autoneg;
1395 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1396 link_params->speed.advertised_speeds = 0;
1397 sup_caps = QED_LM_1000baseT_Full_BIT |
1398 QED_LM_1000baseKX_Full_BIT |
1399 QED_LM_1000baseX_Full_BIT;
1400 if (params->adv_speeds & sup_caps)
1401 link_params->speed.advertised_speeds |=
1402 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1403 sup_caps = QED_LM_10000baseT_Full_BIT |
1404 QED_LM_10000baseKR_Full_BIT |
1405 QED_LM_10000baseKX4_Full_BIT |
1406 QED_LM_10000baseR_FEC_BIT |
1407 QED_LM_10000baseCR_Full_BIT |
1408 QED_LM_10000baseSR_Full_BIT |
1409 QED_LM_10000baseLR_Full_BIT |
1410 QED_LM_10000baseLRM_Full_BIT;
1411 if (params->adv_speeds & sup_caps)
1412 link_params->speed.advertised_speeds |=
1413 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1414 if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
1415 link_params->speed.advertised_speeds |=
1416 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
1417 sup_caps = QED_LM_25000baseKR_Full_BIT |
1418 QED_LM_25000baseCR_Full_BIT |
1419 QED_LM_25000baseSR_Full_BIT;
1420 if (params->adv_speeds & sup_caps)
1421 link_params->speed.advertised_speeds |=
1422 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1423 sup_caps = QED_LM_40000baseLR4_Full_BIT |
1424 QED_LM_40000baseKR4_Full_BIT |
1425 QED_LM_40000baseCR4_Full_BIT |
1426 QED_LM_40000baseSR4_Full_BIT;
1427 if (params->adv_speeds & sup_caps)
1428 link_params->speed.advertised_speeds |=
1429 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1430 sup_caps = QED_LM_50000baseKR2_Full_BIT |
1431 QED_LM_50000baseCR2_Full_BIT |
1432 QED_LM_50000baseSR2_Full_BIT;
1433 if (params->adv_speeds & sup_caps)
1434 link_params->speed.advertised_speeds |=
1435 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1436 sup_caps = QED_LM_100000baseKR4_Full_BIT |
1437 QED_LM_100000baseSR4_Full_BIT |
1438 QED_LM_100000baseCR4_Full_BIT |
1439 QED_LM_100000baseLR4_ER4_Full_BIT;
1440 if (params->adv_speeds & sup_caps)
1441 link_params->speed.advertised_speeds |=
1442 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1444 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1445 link_params->speed.forced_speed = params->forced_speed;
1446 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1447 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1448 link_params->pause.autoneg = true;
1450 link_params->pause.autoneg = false;
1451 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1452 link_params->pause.forced_rx = true;
1454 link_params->pause.forced_rx = false;
1455 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1456 link_params->pause.forced_tx = true;
1458 link_params->pause.forced_tx = false;
1460 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1461 switch (params->loopback_mode) {
1462 case QED_LINK_LOOPBACK_INT_PHY:
1463 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1465 case QED_LINK_LOOPBACK_EXT_PHY:
1466 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1468 case QED_LINK_LOOPBACK_EXT:
1469 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1471 case QED_LINK_LOOPBACK_MAC:
1472 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1475 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1480 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1481 memcpy(&link_params->eee, ¶ms->eee,
1482 sizeof(link_params->eee));
1484 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1486 qed_ptt_release(hwfn, ptt);
1491 static int qed_get_port_type(u32 media_type)
1495 switch (media_type) {
1496 case MEDIA_SFPP_10G_FIBER:
1497 case MEDIA_SFP_1G_FIBER:
1498 case MEDIA_XFP_FIBER:
1499 case MEDIA_MODULE_FIBER:
1501 port_type = PORT_FIBRE;
1503 case MEDIA_DA_TWINAX:
1504 port_type = PORT_DA;
1507 port_type = PORT_TP;
1509 case MEDIA_NOT_PRESENT:
1510 port_type = PORT_NONE;
1512 case MEDIA_UNSPECIFIED:
1514 port_type = PORT_OTHER;
1520 static int qed_get_link_data(struct qed_hwfn *hwfn,
1521 struct qed_mcp_link_params *params,
1522 struct qed_mcp_link_state *link,
1523 struct qed_mcp_link_capabilities *link_caps)
1527 if (!IS_PF(hwfn->cdev)) {
1528 qed_vf_get_link_params(hwfn, params);
1529 qed_vf_get_link_state(hwfn, link);
1530 qed_vf_get_link_caps(hwfn, link_caps);
1535 p = qed_mcp_get_link_params(hwfn);
1538 memcpy(params, p, sizeof(*params));
1540 p = qed_mcp_get_link_state(hwfn);
1543 memcpy(link, p, sizeof(*link));
1545 p = qed_mcp_get_link_capabilities(hwfn);
1548 memcpy(link_caps, p, sizeof(*link_caps));
1553 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1554 struct qed_ptt *ptt, u32 capability,
1557 u32 media_type, tcvr_state, tcvr_type;
1558 u32 speed_mask, board_cfg;
1560 if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1561 media_type = MEDIA_UNSPECIFIED;
1563 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1564 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1566 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1567 speed_mask = 0xFFFFFFFF;
1569 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1570 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1572 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1573 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1574 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1576 switch (media_type) {
1577 case MEDIA_DA_TWINAX:
1578 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1579 *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1580 /* For DAC media multiple speed capabilities are supported*/
1581 capability = capability & speed_mask;
1582 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1583 *if_capability |= QED_LM_1000baseKX_Full_BIT;
1584 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1585 *if_capability |= QED_LM_10000baseCR_Full_BIT;
1586 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1587 *if_capability |= QED_LM_40000baseCR4_Full_BIT;
1588 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1589 *if_capability |= QED_LM_25000baseCR_Full_BIT;
1590 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1591 *if_capability |= QED_LM_50000baseCR2_Full_BIT;
1593 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1594 *if_capability |= QED_LM_100000baseCR4_Full_BIT;
1597 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1599 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1600 *if_capability |= QED_LM_1000baseT_Full_BIT;
1603 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1604 *if_capability |= QED_LM_10000baseT_Full_BIT;
1607 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1608 if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
1609 *if_capability |= QED_LM_1000baseT_Full_BIT;
1610 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
1611 *if_capability |= QED_LM_10000baseT_Full_BIT;
1614 case MEDIA_SFP_1G_FIBER:
1615 case MEDIA_SFPP_10G_FIBER:
1616 case MEDIA_XFP_FIBER:
1617 case MEDIA_MODULE_FIBER:
1619 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1620 if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
1621 (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
1622 *if_capability |= QED_LM_1000baseKX_Full_BIT;
1625 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1626 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
1627 *if_capability |= QED_LM_10000baseSR_Full_BIT;
1628 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
1629 *if_capability |= QED_LM_10000baseLR_Full_BIT;
1630 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
1631 *if_capability |= QED_LM_10000baseLRM_Full_BIT;
1632 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
1633 *if_capability |= QED_LM_10000baseR_FEC_BIT;
1635 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1636 *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1638 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
1639 if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
1640 *if_capability |= QED_LM_25000baseSR_Full_BIT;
1643 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
1644 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
1645 *if_capability |= QED_LM_40000baseLR4_Full_BIT;
1646 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
1647 *if_capability |= QED_LM_40000baseSR4_Full_BIT;
1650 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1651 *if_capability |= QED_LM_50000baseKR2_Full_BIT;
1653 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
1654 if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
1655 *if_capability |= QED_LM_100000baseSR4_Full_BIT;
1660 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1661 *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1663 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1664 *if_capability |= QED_LM_1000baseKX_Full_BIT;
1666 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1667 *if_capability |= QED_LM_10000baseKR_Full_BIT;
1669 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1670 *if_capability |= QED_LM_25000baseKR_Full_BIT;
1672 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1673 *if_capability |= QED_LM_40000baseKR4_Full_BIT;
1675 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1676 *if_capability |= QED_LM_50000baseKR2_Full_BIT;
1678 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1679 *if_capability |= QED_LM_100000baseKR4_Full_BIT;
1681 case MEDIA_UNSPECIFIED:
1682 case MEDIA_NOT_PRESENT:
1683 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
1684 "Unknown media and transceiver type;\n");
1689 static void qed_fill_link(struct qed_hwfn *hwfn,
1690 struct qed_ptt *ptt,
1691 struct qed_link_output *if_link)
1693 struct qed_mcp_link_capabilities link_caps;
1694 struct qed_mcp_link_params params;
1695 struct qed_mcp_link_state link;
1698 memset(if_link, 0, sizeof(*if_link));
1700 /* Prepare source inputs */
1701 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1702 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1706 /* Set the link parameters to pass to protocol driver */
1708 if_link->link_up = true;
1710 /* TODO - at the moment assume supported and advertised speed equal */
1711 if_link->supported_caps = QED_LM_FIBRE_BIT;
1712 if (link_caps.default_speed_autoneg)
1713 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1714 if (params.pause.autoneg ||
1715 (params.pause.forced_rx && params.pause.forced_tx))
1716 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1717 if (params.pause.autoneg || params.pause.forced_rx ||
1718 params.pause.forced_tx)
1719 if_link->supported_caps |= QED_LM_Pause_BIT;
1721 if_link->advertised_caps = if_link->supported_caps;
1722 if (params.speed.autoneg)
1723 if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1725 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1727 /* Fill link advertised capability*/
1728 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
1729 &if_link->advertised_caps);
1730 /* Fill link supported capability*/
1731 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
1732 &if_link->supported_caps);
1735 if_link->speed = link.speed;
1737 /* TODO - fill duplex properly */
1738 if_link->duplex = DUPLEX_FULL;
1739 qed_mcp_get_media_type(hwfn, ptt, &media_type);
1740 if_link->port = qed_get_port_type(media_type);
1742 if_link->autoneg = params.speed.autoneg;
1744 if (params.pause.autoneg)
1745 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1746 if (params.pause.forced_rx)
1747 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1748 if (params.pause.forced_tx)
1749 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1751 /* Link partner capabilities */
1752 if (link.partner_adv_speed &
1753 QED_LINK_PARTNER_SPEED_1G_FD)
1754 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1755 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1756 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1757 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
1758 if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
1759 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1760 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1761 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1762 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1763 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1764 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1765 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1766 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1768 if (link.an_complete)
1769 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1771 if (link.partner_adv_pause)
1772 if_link->lp_caps |= QED_LM_Pause_BIT;
1773 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1774 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1775 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1777 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1778 if_link->eee_supported = false;
1780 if_link->eee_supported = true;
1781 if_link->eee_active = link.eee_active;
1782 if_link->sup_caps = link_caps.eee_speed_caps;
1783 /* MFW clears adv_caps on eee disable; use configured value */
1784 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1785 params.eee.adv_caps;
1786 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1787 if_link->eee.enable = params.eee.enable;
1788 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1789 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1793 static void qed_get_current_link(struct qed_dev *cdev,
1794 struct qed_link_output *if_link)
1796 struct qed_hwfn *hwfn;
1797 struct qed_ptt *ptt;
1800 hwfn = &cdev->hwfns[0];
1802 ptt = qed_ptt_acquire(hwfn);
1804 qed_fill_link(hwfn, ptt, if_link);
1805 qed_ptt_release(hwfn, ptt);
1807 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
1810 qed_fill_link(hwfn, NULL, if_link);
1813 for_each_hwfn(cdev, i)
1814 qed_inform_vf_link_state(&cdev->hwfns[i]);
1817 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1819 void *cookie = hwfn->cdev->ops_cookie;
1820 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1821 struct qed_link_output if_link;
1823 qed_fill_link(hwfn, ptt, &if_link);
1824 qed_inform_vf_link_state(hwfn);
1826 if (IS_LEAD_HWFN(hwfn) && cookie)
1827 op->link_update(cookie, &if_link);
1830 static int qed_drain(struct qed_dev *cdev)
1832 struct qed_hwfn *hwfn;
1833 struct qed_ptt *ptt;
1839 for_each_hwfn(cdev, i) {
1840 hwfn = &cdev->hwfns[i];
1841 ptt = qed_ptt_acquire(hwfn);
1843 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1846 rc = qed_mcp_drain(hwfn, ptt);
1847 qed_ptt_release(hwfn, ptt);
1855 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
1856 struct qed_nvm_image_att *nvm_image,
1863 /* Allocate a buffer for holding the nvram image */
1864 buf = kzalloc(nvm_image->length, GFP_KERNEL);
1868 /* Read image into buffer */
1869 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
1870 buf, nvm_image->length);
1872 DP_ERR(cdev, "Failed reading image from nvm\n");
1876 /* Convert the buffer into big-endian format (excluding the
1877 * closing 4 bytes of CRC).
1879 for (j = 0; j < nvm_image->length - 4; j += 4) {
1880 val = cpu_to_be32(*(u32 *)&buf[j]);
1881 *(u32 *)&buf[j] = val;
1884 /* Calc CRC for the "actual" image buffer, i.e. not including
1885 * the last 4 CRC bytes.
1887 *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
1895 /* Binary file format -
1896 * /----------------------------------------------------------------------\
1897 * 0B | 0x4 [command index] |
1898 * 4B | image_type | Options | Number of register settings |
1902 * \----------------------------------------------------------------------/
1903 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
1904 * Options - 0'b - Calculate & Update CRC for image
1906 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
1909 struct qed_nvm_image_att nvm_image;
1910 struct qed_hwfn *p_hwfn;
1911 bool is_crc = false;
1917 image_type = **data;
1918 p_hwfn = QED_LEADING_HWFN(cdev);
1919 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
1920 if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
1922 if (i == p_hwfn->nvm_info.num_images) {
1923 DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
1928 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
1929 nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
1931 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1932 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
1933 **data, image_type, nvm_image.start_addr,
1934 nvm_image.start_addr + nvm_image.length - 1);
1936 is_crc = !!(**data & BIT(0));
1938 len = *((u16 *)*data);
1943 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
1945 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
1949 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1950 (nvm_image.start_addr +
1951 nvm_image.length - 4), (u8 *)&crc, 4);
1953 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
1954 nvm_image.start_addr + nvm_image.length - 4, rc);
1958 /* Iterate over the values for setting */
1960 u32 offset, mask, value, cur_value;
1963 value = *((u32 *)*data);
1965 mask = *((u32 *)*data);
1967 offset = *((u32 *)*data);
1970 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
1973 DP_ERR(cdev, "Failed reading from %08x\n",
1974 nvm_image.start_addr + offset);
1978 cur_value = le32_to_cpu(*((__le32 *)buf));
1979 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1980 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
1981 nvm_image.start_addr + offset, cur_value,
1982 (cur_value & ~mask) | (value & mask), value, mask);
1983 value = (value & mask) | (cur_value & ~mask);
1984 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1985 nvm_image.start_addr + offset,
1988 DP_ERR(cdev, "Failed writing to %08x\n",
1989 nvm_image.start_addr + offset);
1999 /* Binary file format -
2000 * /----------------------------------------------------------------------\
2001 * 0B | 0x3 [command index] |
2002 * 4B | b'0: check_response? | b'1-31 reserved |
2003 * 8B | File-type | reserved |
2004 * 12B | Image length in bytes |
2005 * \----------------------------------------------------------------------/
2006 * Start a new file of the provided type
2008 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2009 const u8 **data, bool *check_resp)
2011 u32 file_type, file_size = 0;
2015 *check_resp = !!(**data & BIT(0));
2019 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2020 "About to start a new file of type %02x\n", file_type);
2021 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2023 file_size = *((u32 *)(*data));
2026 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2027 (u8 *)(&file_size), 4);
2033 /* Binary file format -
2034 * /----------------------------------------------------------------------\
2035 * 0B | 0x2 [command index] |
2036 * 4B | Length in bytes |
2037 * 8B | b'0: check_response? | b'1-31 reserved |
2038 * 12B | Offset in bytes |
2040 * \----------------------------------------------------------------------/
2041 * Write data as part of a file that was previously started. Data should be
2042 * of length equal to that provided in the message
2044 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2045 const u8 **data, bool *check_resp)
2051 len = *((u32 *)(*data));
2053 *check_resp = !!(**data & BIT(0));
2055 offset = *((u32 *)(*data));
2058 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2059 "About to write File-data: %08x bytes to offset %08x\n",
2062 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2063 (char *)(*data), len);
2069 /* Binary file format [General header] -
2070 * /----------------------------------------------------------------------\
2071 * 0B | QED_NVM_SIGNATURE |
2072 * 4B | Length in bytes |
2073 * 8B | Highest command in this batchfile | Reserved |
2074 * \----------------------------------------------------------------------/
2076 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2077 const struct firmware *image,
2082 /* Check minimum size */
2083 if (image->size < 12) {
2084 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2088 /* Check signature */
2089 signature = *((u32 *)(*data));
2090 if (signature != QED_NVM_SIGNATURE) {
2091 DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2096 /* Validate internal size equals the image-size */
2097 len = *((u32 *)(*data));
2098 if (len != image->size) {
2099 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2100 len, (u32)image->size);
2105 /* Make sure driver familiar with all commands necessary for this */
2106 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2107 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2117 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2119 const struct firmware *image;
2120 const u8 *data, *data_end;
2124 rc = request_firmware(&image, name, &cdev->pdev->dev);
2126 DP_ERR(cdev, "Failed to find '%s'\n", name);
2130 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2131 "Flashing '%s' - firmware's data at %p, size is %08x\n",
2132 name, image->data, (u32)image->size);
2134 data_end = data + image->size;
2136 rc = qed_nvm_flash_image_validate(cdev, image, &data);
2140 while (data < data_end) {
2141 bool check_resp = false;
2143 /* Parse the actual command */
2144 cmd_type = *((u32 *)data);
2146 case QED_NVM_FLASH_CMD_FILE_DATA:
2147 rc = qed_nvm_flash_image_file_data(cdev, &data,
2150 case QED_NVM_FLASH_CMD_FILE_START:
2151 rc = qed_nvm_flash_image_file_start(cdev, &data,
2154 case QED_NVM_FLASH_CMD_NVM_CHANGE:
2155 rc = qed_nvm_flash_image_access(cdev, &data,
2159 DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2165 DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2169 /* Check response if needed */
2171 u32 mcp_response = 0;
2173 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2174 DP_ERR(cdev, "Failed getting MCP response\n");
2179 switch (mcp_response & FW_MSG_CODE_MASK) {
2180 case FW_MSG_CODE_OK:
2181 case FW_MSG_CODE_NVM_OK:
2182 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2183 case FW_MSG_CODE_PHY_OK:
2186 DP_ERR(cdev, "MFW returns error: %08x\n",
2195 release_firmware(image);
2200 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2203 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2205 return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2208 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2210 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2211 void *cookie = p_hwfn->cdev->ops_cookie;
2213 if (ops && ops->schedule_recovery_handler)
2214 ops->schedule_recovery_handler(cookie);
2217 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2220 return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2223 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2225 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2226 struct qed_ptt *ptt;
2229 ptt = qed_ptt_acquire(hwfn);
2233 status = qed_mcp_set_led(hwfn, ptt, mode);
2235 qed_ptt_release(hwfn, ptt);
2240 static int qed_recovery_process(struct qed_dev *cdev)
2242 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2243 struct qed_ptt *p_ptt;
2246 p_ptt = qed_ptt_acquire(p_hwfn);
2250 rc = qed_start_recovery_process(p_hwfn, p_ptt);
2252 qed_ptt_release(p_hwfn, p_ptt);
2257 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2259 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2260 struct qed_ptt *ptt;
2266 ptt = qed_ptt_acquire(hwfn);
2270 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2271 : QED_OV_WOL_DISABLED);
2274 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2277 qed_ptt_release(hwfn, ptt);
2281 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2283 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2284 struct qed_ptt *ptt;
2290 ptt = qed_ptt_acquire(hwfn);
2294 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2295 QED_OV_DRIVER_STATE_ACTIVE :
2296 QED_OV_DRIVER_STATE_DISABLED);
2298 qed_ptt_release(hwfn, ptt);
2303 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2305 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2306 struct qed_ptt *ptt;
2312 ptt = qed_ptt_acquire(hwfn);
2316 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2320 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2323 qed_ptt_release(hwfn, ptt);
2327 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2329 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2330 struct qed_ptt *ptt;
2336 ptt = qed_ptt_acquire(hwfn);
2340 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2344 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2347 qed_ptt_release(hwfn, ptt);
2351 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2352 u8 dev_addr, u32 offset, u32 len)
2354 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2355 struct qed_ptt *ptt;
2361 ptt = qed_ptt_acquire(hwfn);
2365 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2368 qed_ptt_release(hwfn, ptt);
2373 static struct qed_selftest_ops qed_selftest_ops_pass = {
2374 .selftest_memory = &qed_selftest_memory,
2375 .selftest_interrupt = &qed_selftest_interrupt,
2376 .selftest_register = &qed_selftest_register,
2377 .selftest_clock = &qed_selftest_clock,
2378 .selftest_nvram = &qed_selftest_nvram,
2381 const struct qed_common_ops qed_common_ops_pass = {
2382 .selftest = &qed_selftest_ops_pass,
2383 .probe = &qed_probe,
2384 .remove = &qed_remove,
2385 .set_power_state = &qed_set_power_state,
2386 .set_name = &qed_set_name,
2387 .update_pf_params = &qed_update_pf_params,
2388 .slowpath_start = &qed_slowpath_start,
2389 .slowpath_stop = &qed_slowpath_stop,
2390 .set_fp_int = &qed_set_int_fp,
2391 .get_fp_int = &qed_get_int_fp,
2392 .sb_init = &qed_sb_init,
2393 .sb_release = &qed_sb_release,
2394 .simd_handler_config = &qed_simd_handler_config,
2395 .simd_handler_clean = &qed_simd_handler_clean,
2396 .dbg_grc = &qed_dbg_grc,
2397 .dbg_grc_size = &qed_dbg_grc_size,
2398 .can_link_change = &qed_can_link_change,
2399 .set_link = &qed_set_link,
2400 .get_link = &qed_get_current_link,
2401 .drain = &qed_drain,
2402 .update_msglvl = &qed_init_dp,
2403 .dbg_all_data = &qed_dbg_all_data,
2404 .dbg_all_data_size = &qed_dbg_all_data_size,
2405 .chain_alloc = &qed_chain_alloc,
2406 .chain_free = &qed_chain_free,
2407 .nvm_flash = &qed_nvm_flash,
2408 .nvm_get_image = &qed_nvm_get_image,
2409 .set_coalesce = &qed_set_coalesce,
2410 .set_led = &qed_set_led,
2411 .recovery_process = &qed_recovery_process,
2412 .recovery_prolog = &qed_recovery_prolog,
2413 .update_drv_state = &qed_update_drv_state,
2414 .update_mac = &qed_update_mac,
2415 .update_mtu = &qed_update_mtu,
2416 .update_wol = &qed_update_wol,
2417 .db_recovery_add = &qed_db_recovery_add,
2418 .db_recovery_del = &qed_db_recovery_del,
2419 .read_module_eeprom = &qed_read_module_eeprom,
2422 void qed_get_protocol_stats(struct qed_dev *cdev,
2423 enum qed_mcp_protocol_type type,
2424 union qed_mcp_protocol_stats *stats)
2426 struct qed_eth_stats eth_stats;
2428 memset(stats, 0, sizeof(*stats));
2431 case QED_MCP_LAN_STATS:
2432 qed_get_vport_stats(cdev, ð_stats);
2433 stats->lan_stats.ucast_rx_pkts =
2434 eth_stats.common.rx_ucast_pkts;
2435 stats->lan_stats.ucast_tx_pkts =
2436 eth_stats.common.tx_ucast_pkts;
2437 stats->lan_stats.fcs_err = -1;
2439 case QED_MCP_FCOE_STATS:
2440 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2442 case QED_MCP_ISCSI_STATS:
2443 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2446 DP_VERBOSE(cdev, QED_MSG_SP,
2447 "Invalid protocol type = %d\n", type);
2452 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2454 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2455 "Scheduling slowpath task [Flag: %d]\n",
2456 QED_SLOWPATH_MFW_TLV_REQ);
2457 smp_mb__before_atomic();
2458 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
2459 smp_mb__after_atomic();
2460 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2466 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2468 struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2469 struct qed_eth_stats_common *p_common;
2470 struct qed_generic_tlvs gen_tlvs;
2471 struct qed_eth_stats stats;
2474 memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2475 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2477 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2478 tlv->flags.ipv4_csum_offload = true;
2479 if (gen_tlvs.feat_flags & QED_TLV_LSO)
2480 tlv->flags.lso_supported = true;
2481 tlv->flags.b_set = true;
2483 for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2484 if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2485 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2486 tlv->mac_set[i] = true;
2490 qed_get_vport_stats(cdev, &stats);
2491 p_common = &stats.common;
2492 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2493 p_common->rx_bcast_pkts;
2494 tlv->rx_frames_set = true;
2495 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2496 p_common->rx_bcast_bytes;
2497 tlv->rx_bytes_set = true;
2498 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2499 p_common->tx_bcast_pkts;
2500 tlv->tx_frames_set = true;
2501 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2502 p_common->tx_bcast_bytes;
2503 tlv->rx_bytes_set = true;
2506 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2507 union qed_mfw_tlv_data *tlv_buf)
2509 struct qed_dev *cdev = hwfn->cdev;
2510 struct qed_common_cb_ops *ops;
2512 ops = cdev->protocol_ops.common;
2513 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2514 DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2519 case QED_MFW_TLV_GENERIC:
2520 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2522 case QED_MFW_TLV_ETH:
2523 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2525 case QED_MFW_TLV_FCOE:
2526 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2528 case QED_MFW_TLV_ISCSI:
2529 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);