1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
25 #include "qed_dev_api.h"
28 #include "qed_init_ops.h"
31 #include "qed_reg_addr.h"
33 #include "qed_sriov.h"
35 /* API common to all protocols */
37 BAR_ID_0, /* used for GRC */
38 BAR_ID_1 /* Used for doorbells */
41 static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
44 u32 bar_reg = (bar_id == BAR_ID_0 ?
45 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
46 u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
49 return 1 << (val + 15);
51 /* Old MFW initialized above registered only conditionally */
52 if (p_hwfn->cdev->num_hwfns > 1) {
54 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
55 return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
58 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
63 void qed_init_dp(struct qed_dev *cdev,
64 u32 dp_module, u8 dp_level)
68 cdev->dp_level = dp_level;
69 cdev->dp_module = dp_module;
70 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
71 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
73 p_hwfn->dp_level = dp_level;
74 p_hwfn->dp_module = dp_module;
78 void qed_init_struct(struct qed_dev *cdev)
82 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
83 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
87 p_hwfn->b_active = false;
89 mutex_init(&p_hwfn->dmae_info.mutex);
92 /* hwfn 0 is always active */
93 cdev->hwfns[0].b_active = true;
95 /* set the default cache alignment to 128 */
96 cdev->cache_shift = 7;
99 static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
101 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
103 kfree(qm_info->qm_pq_params);
104 qm_info->qm_pq_params = NULL;
105 kfree(qm_info->qm_vport_params);
106 qm_info->qm_vport_params = NULL;
107 kfree(qm_info->qm_port_params);
108 qm_info->qm_port_params = NULL;
109 kfree(qm_info->wfq_data);
110 qm_info->wfq_data = NULL;
113 void qed_resc_free(struct qed_dev *cdev)
117 kfree(cdev->fw_data);
118 cdev->fw_data = NULL;
120 kfree(cdev->reset_stats);
122 for_each_hwfn(cdev, i) {
123 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
125 kfree(p_hwfn->p_tx_cids);
126 p_hwfn->p_tx_cids = NULL;
127 kfree(p_hwfn->p_rx_cids);
128 p_hwfn->p_rx_cids = NULL;
131 for_each_hwfn(cdev, i) {
132 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
134 qed_cxt_mngr_free(p_hwfn);
135 qed_qm_info_free(p_hwfn);
136 qed_spq_free(p_hwfn);
137 qed_eq_free(p_hwfn, p_hwfn->p_eq);
138 qed_consq_free(p_hwfn, p_hwfn->p_consq);
139 qed_int_free(p_hwfn);
140 qed_iov_free(p_hwfn);
141 qed_dmae_info_free(p_hwfn);
145 static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
147 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
148 struct init_qm_port_params *p_qm_port;
149 u8 num_vports, i, vport_id, num_ports;
150 u16 num_pqs, multi_cos_tcs = 1;
152 memset(qm_info, 0, sizeof(*qm_info));
154 num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
155 num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
157 /* Sanity checking that setup requires legal number of resources */
158 if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
160 "Need too many Physical queues - 0x%04x when only %04x are available\n",
161 num_pqs, RESC_NUM(p_hwfn, QED_PQ));
165 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
167 qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
168 num_pqs, GFP_KERNEL);
169 if (!qm_info->qm_pq_params)
172 qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
173 num_vports, GFP_KERNEL);
174 if (!qm_info->qm_vport_params)
177 qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
178 MAX_NUM_PORTS, GFP_KERNEL);
179 if (!qm_info->qm_port_params)
182 qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
184 if (!qm_info->wfq_data)
187 vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
189 /* First init per-TC PQs */
190 for (i = 0; i < multi_cos_tcs; i++) {
191 struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
193 params->vport_id = vport_id;
194 params->tc_id = p_hwfn->hw_info.non_offload_tc;
195 params->wrr_group = 1;
198 /* Then init pure-LB PQ */
199 qm_info->pure_lb_pq = i;
200 qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
201 qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
202 qm_info->qm_pq_params[i].wrr_group = 1;
205 qm_info->offload_pq = 0;
206 qm_info->num_pqs = num_pqs;
207 qm_info->num_vports = num_vports;
209 /* Initialize qm port parameters */
210 num_ports = p_hwfn->cdev->num_ports_in_engines;
211 for (i = 0; i < num_ports; i++) {
212 p_qm_port = &qm_info->qm_port_params[i];
213 p_qm_port->active = 1;
214 p_qm_port->num_active_phys_tcs = 4;
215 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
216 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
219 qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
221 qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
223 qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
225 for (i = 0; i < qm_info->num_vports; i++)
226 qm_info->qm_vport_params[i].vport_wfq = 1;
230 qm_info->vport_rl_en = 1;
231 qm_info->vport_wfq_en = 1;
236 DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
237 qed_qm_info_free(p_hwfn);
241 int qed_resc_alloc(struct qed_dev *cdev)
243 struct qed_consq *p_consq;
247 cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
251 /* Allocate Memory for the Queue->CID mapping */
252 for_each_hwfn(cdev, i) {
253 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
254 int tx_size = sizeof(struct qed_hw_cid_data) *
255 RESC_NUM(p_hwfn, QED_L2_QUEUE);
256 int rx_size = sizeof(struct qed_hw_cid_data) *
257 RESC_NUM(p_hwfn, QED_L2_QUEUE);
259 p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
260 if (!p_hwfn->p_tx_cids) {
262 "Failed to allocate memory for Tx Cids\n");
267 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
268 if (!p_hwfn->p_rx_cids) {
270 "Failed to allocate memory for Rx Cids\n");
276 for_each_hwfn(cdev, i) {
277 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
279 /* First allocate the context manager structure */
280 rc = qed_cxt_mngr_alloc(p_hwfn);
284 /* Set the HW cid/tid numbers (in the contest manager)
285 * Must be done prior to any further computations.
287 rc = qed_cxt_set_pf_params(p_hwfn);
291 /* Prepare and process QM requirements */
292 rc = qed_init_qm_info(p_hwfn);
296 /* Compute the ILT client partition */
297 rc = qed_cxt_cfg_ilt_compute(p_hwfn);
301 /* CID map / ILT shadow table / T2
302 * The talbes sizes are determined by the computations above
304 rc = qed_cxt_tables_alloc(p_hwfn);
308 /* SPQ, must follow ILT because initializes SPQ context */
309 rc = qed_spq_alloc(p_hwfn);
313 /* SP status block allocation */
314 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
317 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
321 rc = qed_iov_alloc(p_hwfn);
326 p_eq = qed_eq_alloc(p_hwfn, 256);
333 p_consq = qed_consq_alloc(p_hwfn);
338 p_hwfn->p_consq = p_consq;
340 /* DMA info initialization */
341 rc = qed_dmae_info_alloc(p_hwfn);
344 "Failed to allocate memory for dmae_info structure\n");
349 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
350 if (!cdev->reset_stats) {
351 DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
363 void qed_resc_setup(struct qed_dev *cdev)
367 for_each_hwfn(cdev, i) {
368 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
370 qed_cxt_mngr_setup(p_hwfn);
371 qed_spq_setup(p_hwfn);
372 qed_eq_setup(p_hwfn, p_hwfn->p_eq);
373 qed_consq_setup(p_hwfn, p_hwfn->p_consq);
375 /* Read shadow of current MFW mailbox */
376 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
377 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
378 p_hwfn->mcp_info->mfw_mb_cur,
379 p_hwfn->mcp_info->mfw_mb_length);
381 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
383 qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
387 #define FINAL_CLEANUP_POLL_CNT (100)
388 #define FINAL_CLEANUP_POLL_TIME (10)
389 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
390 struct qed_ptt *p_ptt,
393 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
396 addr = GTT_BAR0_MAP_REG_USDM_RAM +
397 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
399 command |= X_FINAL_CLEANUP_AGG_INT <<
400 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
401 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
402 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
403 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
405 /* Make sure notification is not set before initiating final cleanup */
406 if (REG_RD(p_hwfn, addr)) {
409 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
410 REG_WR(p_hwfn, addr, 0);
413 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
414 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
417 qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
419 /* Poll until completion */
420 while (!REG_RD(p_hwfn, addr) && count--)
421 msleep(FINAL_CLEANUP_POLL_TIME);
423 if (REG_RD(p_hwfn, addr))
427 "Failed to receive FW final cleanup notification\n");
429 /* Cleanup afterwards */
430 REG_WR(p_hwfn, addr, 0);
435 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
439 hw_mode = (1 << MODE_BB_B0);
441 switch (p_hwfn->cdev->num_ports_in_engines) {
443 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
446 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
449 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
452 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
453 p_hwfn->cdev->num_ports_in_engines);
457 switch (p_hwfn->cdev->mf_mode) {
460 hw_mode |= 1 << MODE_MF_SI;
463 hw_mode |= 1 << MODE_MF_SD;
466 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
467 hw_mode |= 1 << MODE_MF_SI;
470 hw_mode |= 1 << MODE_ASIC;
472 p_hwfn->hw_info.hw_mode = hw_mode;
475 /* Init run time data for all PFs on an engine. */
476 static void qed_init_cau_rt_data(struct qed_dev *cdev)
478 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
481 for_each_hwfn(cdev, i) {
482 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
483 struct qed_igu_info *p_igu_info;
484 struct qed_igu_block *p_block;
485 struct cau_sb_entry sb_entry;
487 p_igu_info = p_hwfn->hw_info.p_igu_info;
489 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
491 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
495 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
496 p_block->function_id,
498 STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
504 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
505 struct qed_ptt *p_ptt,
508 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
509 struct qed_qm_common_rt_init_params params;
510 struct qed_dev *cdev = p_hwfn->cdev;
513 qed_init_cau_rt_data(cdev);
515 /* Program GTT windows */
516 qed_gtt_init(p_hwfn);
518 if (p_hwfn->mcp_info) {
519 if (p_hwfn->mcp_info->func_info.bandwidth_max)
520 qm_info->pf_rl_en = 1;
521 if (p_hwfn->mcp_info->func_info.bandwidth_min)
522 qm_info->pf_wfq_en = 1;
525 memset(¶ms, 0, sizeof(params));
526 params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
527 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
528 params.pf_rl_en = qm_info->pf_rl_en;
529 params.pf_wfq_en = qm_info->pf_wfq_en;
530 params.vport_rl_en = qm_info->vport_rl_en;
531 params.vport_wfq_en = qm_info->vport_wfq_en;
532 params.port_params = qm_info->qm_port_params;
534 qed_qm_common_rt_init(p_hwfn, ¶ms);
536 qed_cxt_hw_init_common(p_hwfn);
538 /* Close gate from NIG to BRB/Storm; By default they are open, but
539 * we close them to prevent NIG from passing data to reset blocks.
540 * Should have been done in the ENGINE phase, but init-tool lacks
541 * proper port-pretend capabilities.
543 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
544 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
545 qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
546 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
547 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
548 qed_port_unpretend(p_hwfn, p_ptt);
550 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
554 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
555 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
557 /* Disable relaxed ordering in the PCI config space */
558 qed_wr(p_hwfn, p_ptt, 0x20b4,
559 qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
564 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
565 struct qed_ptt *p_ptt,
570 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
575 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
576 struct qed_ptt *p_ptt,
577 struct qed_tunn_start_params *p_tunn,
580 enum qed_int_mode int_mode,
581 bool allow_npar_tx_switch)
583 u8 rel_pf_id = p_hwfn->rel_pf_id;
586 if (p_hwfn->mcp_info) {
587 struct qed_mcp_function_info *p_info;
589 p_info = &p_hwfn->mcp_info->func_info;
590 if (p_info->bandwidth_min)
591 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
593 /* Update rate limit once we'll actually have a link */
594 p_hwfn->qm_info.pf_rl = 100000;
597 qed_cxt_hw_init_pf(p_hwfn);
599 qed_int_igu_init_rt(p_hwfn);
601 /* Set VLAN in NIG if needed */
602 if (hw_mode & (1 << MODE_MF_SD)) {
603 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
604 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
605 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
606 p_hwfn->hw_info.ovlan);
609 /* Enable classification by MAC if needed */
610 if (hw_mode & (1 << MODE_MF_SI)) {
611 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
612 "Configuring TAGMAC_CLS_TYPE\n");
614 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
617 /* Protocl Configuration */
618 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
619 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
620 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
622 /* Cleanup chip from previous driver if such remains exist */
623 rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
627 /* PF Init sequence */
628 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
632 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
633 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
637 /* Pure runtime initializations - directly to the HW */
638 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
641 /* enable interrupts */
642 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
644 /* send function start command */
645 rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
647 DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
652 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
653 struct qed_ptt *p_ptt,
656 u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
658 /* Change PF in PXP */
659 qed_wr(p_hwfn, p_ptt,
660 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
662 /* wait until value is set - try for 1 second every 50us */
663 for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
664 val = qed_rd(p_hwfn, p_ptt,
665 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
669 usleep_range(50, 60);
672 if (val != set_val) {
674 "PFID_ENABLE_MASTER wasn't changed after a second\n");
681 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
682 struct qed_ptt *p_main_ptt)
684 /* Read shadow of current MFW mailbox */
685 qed_mcp_read_mb(p_hwfn, p_main_ptt);
686 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
687 p_hwfn->mcp_info->mfw_mb_cur,
688 p_hwfn->mcp_info->mfw_mb_length);
691 int qed_hw_init(struct qed_dev *cdev,
692 struct qed_tunn_start_params *p_tunn,
694 enum qed_int_mode int_mode,
695 bool allow_npar_tx_switch,
696 const u8 *bin_fw_data)
698 u32 load_code, param;
701 rc = qed_init_fw_data(cdev, bin_fw_data);
705 for_each_hwfn(cdev, i) {
706 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
708 /* Enable DMAE in PXP */
709 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
711 qed_calc_hw_mode(p_hwfn);
713 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
716 DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
720 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
722 DP_VERBOSE(p_hwfn, QED_MSG_SP,
723 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
726 p_hwfn->first_on_engine = (load_code ==
727 FW_MSG_CODE_DRV_LOAD_ENGINE);
730 case FW_MSG_CODE_DRV_LOAD_ENGINE:
731 rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
732 p_hwfn->hw_info.hw_mode);
736 case FW_MSG_CODE_DRV_LOAD_PORT:
737 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
738 p_hwfn->hw_info.hw_mode);
743 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
744 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
745 p_tunn, p_hwfn->hw_info.hw_mode,
746 b_hw_start, int_mode,
747 allow_npar_tx_switch);
756 "init phase failed for loadcode 0x%x (rc %d)\n",
759 /* ACK mfw regardless of success or failure of initialization */
760 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
761 DRV_MSG_CODE_LOAD_DONE,
762 0, &load_code, ¶m);
766 DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
770 p_hwfn->hw_init_done = true;
776 #define QED_HW_STOP_RETRY_LIMIT (10)
777 static inline void qed_hw_timers_stop(struct qed_dev *cdev,
778 struct qed_hwfn *p_hwfn,
779 struct qed_ptt *p_ptt)
784 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
785 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
787 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
788 if ((!qed_rd(p_hwfn, p_ptt,
789 TM_REG_PF_SCAN_ACTIVE_CONN)) &&
790 (!qed_rd(p_hwfn, p_ptt,
791 TM_REG_PF_SCAN_ACTIVE_TASK)))
794 /* Dependent on number of connection/tasks, possibly
795 * 1ms sleep is required between polls
797 usleep_range(1000, 2000);
800 if (i < QED_HW_STOP_RETRY_LIMIT)
804 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
805 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
806 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
809 void qed_hw_timers_stop_all(struct qed_dev *cdev)
813 for_each_hwfn(cdev, j) {
814 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
815 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
817 qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
821 int qed_hw_stop(struct qed_dev *cdev)
826 for_each_hwfn(cdev, j) {
827 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
828 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
830 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
832 /* mark the hw as uninitialized... */
833 p_hwfn->hw_init_done = false;
835 rc = qed_sp_pf_stop(p_hwfn);
838 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
840 qed_wr(p_hwfn, p_ptt,
841 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
843 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
844 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
845 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
846 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
847 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
849 qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
851 /* Disable Attention Generation */
852 qed_int_igu_disable_int(p_hwfn, p_ptt);
854 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
855 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
857 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
859 /* Need to wait 1ms to guarantee SBs are cleared */
860 usleep_range(1000, 2000);
863 /* Disable DMAE in PXP - in CMT, this should only be done for
864 * first hw-function, and only after all transactions have
865 * stopped for all active hw-functions.
867 t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
868 cdev->hwfns[0].p_main_ptt,
876 void qed_hw_stop_fastpath(struct qed_dev *cdev)
880 for_each_hwfn(cdev, j) {
881 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
882 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
886 "Shutting down the fastpath\n");
888 qed_wr(p_hwfn, p_ptt,
889 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
891 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
892 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
893 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
894 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
895 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
897 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
899 /* Need to wait 1ms to guarantee SBs are cleared */
900 usleep_range(1000, 2000);
904 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
906 /* Re-open incoming traffic */
907 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
908 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
911 static int qed_reg_assert(struct qed_hwfn *hwfn,
912 struct qed_ptt *ptt, u32 reg,
915 u32 assert_val = qed_rd(hwfn, ptt, reg);
917 if (assert_val != expected) {
918 DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
926 int qed_hw_reset(struct qed_dev *cdev)
929 u32 unload_resp, unload_param;
932 for_each_hwfn(cdev, i) {
933 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
935 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
937 /* Check for incorrect states */
938 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
939 QM_REG_USG_CNT_PF_TX, 0);
940 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
941 QM_REG_USG_CNT_PF_OTHER, 0);
943 /* Disable PF in HW blocks */
944 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
945 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
946 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
947 TCFC_REG_STRONG_ENABLE_PF, 0);
948 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
949 CCFC_REG_STRONG_ENABLE_PF, 0);
951 /* Send unload command to MCP */
952 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
953 DRV_MSG_CODE_UNLOAD_REQ,
954 DRV_MB_PARAM_UNLOAD_WOL_MCP,
955 &unload_resp, &unload_param);
957 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
958 unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
961 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
962 DRV_MSG_CODE_UNLOAD_DONE,
963 0, &unload_resp, &unload_param);
965 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
973 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
974 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
976 qed_ptt_pool_free(p_hwfn);
977 kfree(p_hwfn->hw_info.p_igu_info);
980 /* Setup bar access */
981 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
983 /* clear indirect access */
984 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
985 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
986 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
987 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
989 /* Clean Previous errors if such exist */
990 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
991 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
992 1 << p_hwfn->abs_pf_id);
994 /* enable internal target-read */
995 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
996 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
999 static void get_function_id(struct qed_hwfn *p_hwfn)
1002 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
1004 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
1006 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
1007 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1008 PXP_CONCRETE_FID_PFID);
1009 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1010 PXP_CONCRETE_FID_PORT);
1013 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
1015 u32 *feat_num = p_hwfn->hw_info.feat_num;
1016 int num_features = 1;
1018 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1020 RESC_NUM(p_hwfn, QED_L2_QUEUE));
1021 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1022 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1023 feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
1027 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1029 u32 *resc_start = p_hwfn->hw_info.resc_start;
1030 u32 *resc_num = p_hwfn->hw_info.resc_num;
1031 struct qed_sb_cnt_info sb_cnt_info;
1034 num_funcs = MAX_NUM_PFS_BB;
1036 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
1037 qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
1039 resc_num[QED_SB] = min_t(u32,
1040 (MAX_SB_PER_PATH_BB / num_funcs),
1041 sb_cnt_info.sb_cnt);
1042 resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1043 resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
1044 resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1045 resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1046 resc_num[QED_RL] = 8;
1047 resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1048 resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1050 resc_num[QED_ILT] = 950;
1052 for (i = 0; i < QED_MAX_RESC; i++)
1053 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1055 qed_hw_set_feat(p_hwfn);
1057 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1058 "The numbers for each resource are:\n"
1059 "SB = %d start = %d\n"
1060 "L2_QUEUE = %d start = %d\n"
1061 "VPORT = %d start = %d\n"
1062 "PQ = %d start = %d\n"
1063 "RL = %d start = %d\n"
1064 "MAC = %d start = %d\n"
1065 "VLAN = %d start = %d\n"
1066 "ILT = %d start = %d\n",
1067 p_hwfn->hw_info.resc_num[QED_SB],
1068 p_hwfn->hw_info.resc_start[QED_SB],
1069 p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
1070 p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1071 p_hwfn->hw_info.resc_num[QED_VPORT],
1072 p_hwfn->hw_info.resc_start[QED_VPORT],
1073 p_hwfn->hw_info.resc_num[QED_PQ],
1074 p_hwfn->hw_info.resc_start[QED_PQ],
1075 p_hwfn->hw_info.resc_num[QED_RL],
1076 p_hwfn->hw_info.resc_start[QED_RL],
1077 p_hwfn->hw_info.resc_num[QED_MAC],
1078 p_hwfn->hw_info.resc_start[QED_MAC],
1079 p_hwfn->hw_info.resc_num[QED_VLAN],
1080 p_hwfn->hw_info.resc_start[QED_VLAN],
1081 p_hwfn->hw_info.resc_num[QED_ILT],
1082 p_hwfn->hw_info.resc_start[QED_ILT]);
1085 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1086 struct qed_ptt *p_ptt)
1088 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1089 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1090 struct qed_mcp_link_params *link;
1092 /* Read global nvm_cfg address */
1093 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1095 /* Verify MCP has initialized it */
1096 if (!nvm_cfg_addr) {
1097 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1101 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1102 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1104 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1105 offsetof(struct nvm_cfg1, glob) +
1106 offsetof(struct nvm_cfg1_glob, core_cfg);
1108 core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1110 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1111 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1112 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1113 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1115 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1116 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1118 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1119 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1121 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1122 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1124 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1125 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1127 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1128 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1130 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1131 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1133 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1134 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1136 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1137 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1140 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1145 /* Read default link configuration */
1146 link = &p_hwfn->mcp_info->link_input;
1147 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1148 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1149 link_temp = qed_rd(p_hwfn, p_ptt,
1151 offsetof(struct nvm_cfg1_port, speed_cap_mask));
1152 link->speed.advertised_speeds =
1153 link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1155 p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1156 link->speed.advertised_speeds;
1158 link_temp = qed_rd(p_hwfn, p_ptt,
1160 offsetof(struct nvm_cfg1_port, link_settings));
1161 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1162 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1163 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1164 link->speed.autoneg = true;
1166 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1167 link->speed.forced_speed = 1000;
1169 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1170 link->speed.forced_speed = 10000;
1172 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1173 link->speed.forced_speed = 25000;
1175 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1176 link->speed.forced_speed = 40000;
1178 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1179 link->speed.forced_speed = 50000;
1181 case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1182 link->speed.forced_speed = 100000;
1185 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1189 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1190 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1191 link->pause.autoneg = !!(link_temp &
1192 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1193 link->pause.forced_rx = !!(link_temp &
1194 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1195 link->pause.forced_tx = !!(link_temp &
1196 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1197 link->loopback_mode = 0;
1199 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1200 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1201 link->speed.forced_speed, link->speed.advertised_speeds,
1202 link->speed.autoneg, link->pause.autoneg);
1204 /* Read Multi-function information from shmem */
1205 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1206 offsetof(struct nvm_cfg1, glob) +
1207 offsetof(struct nvm_cfg1_glob, generic_cont0);
1209 generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1211 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1212 NVM_CFG1_GLOB_MF_MODE_OFFSET;
1215 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1216 p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1218 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1219 p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1221 case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1222 p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1225 DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1226 p_hwfn->cdev->mf_mode);
1228 /* Read Multi-function information from shmem */
1229 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1230 offsetof(struct nvm_cfg1, glob) +
1231 offsetof(struct nvm_cfg1_glob, device_capabilities);
1233 device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1234 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1235 __set_bit(QED_DEV_CAP_ETH,
1236 &p_hwfn->hw_info.device_capabilities);
1238 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1242 qed_get_hw_info(struct qed_hwfn *p_hwfn,
1243 struct qed_ptt *p_ptt,
1244 enum qed_pci_personality personality)
1249 /* Since all information is common, only first hwfns should do this */
1250 if (IS_LEAD_HWFN(p_hwfn)) {
1251 rc = qed_iov_hw_info(p_hwfn);
1256 /* Read the port mode */
1257 port_mode = qed_rd(p_hwfn, p_ptt,
1258 CNIG_REG_NW_PORT_MODE_BB_B0);
1260 if (port_mode < 3) {
1261 p_hwfn->cdev->num_ports_in_engines = 1;
1262 } else if (port_mode <= 5) {
1263 p_hwfn->cdev->num_ports_in_engines = 2;
1265 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1266 p_hwfn->cdev->num_ports_in_engines);
1268 /* Default num_ports_in_engines to something */
1269 p_hwfn->cdev->num_ports_in_engines = 1;
1272 qed_hw_get_nvm_info(p_hwfn, p_ptt);
1274 rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1278 if (qed_mcp_is_init(p_hwfn))
1279 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1280 p_hwfn->mcp_info->func_info.mac);
1282 eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1284 if (qed_mcp_is_init(p_hwfn)) {
1285 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1286 p_hwfn->hw_info.ovlan =
1287 p_hwfn->mcp_info->func_info.ovlan;
1289 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1292 if (qed_mcp_is_init(p_hwfn)) {
1293 enum qed_pci_personality protocol;
1295 protocol = p_hwfn->mcp_info->func_info.protocol;
1296 p_hwfn->hw_info.personality = protocol;
1299 qed_hw_get_resc(p_hwfn);
1304 static int qed_get_dev_info(struct qed_dev *cdev)
1306 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1309 /* Read Vendor Id / Device Id */
1310 pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1312 pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1314 cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1315 MISCS_REG_CHIP_NUM);
1316 cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1317 MISCS_REG_CHIP_REV);
1318 MASK_FIELD(CHIP_REV, cdev->chip_rev);
1320 cdev->type = QED_DEV_TYPE_BB;
1321 /* Learn number of HW-functions */
1322 tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1323 MISCS_REG_CMT_ENABLED_FOR_PAIR);
1325 if (tmp & (1 << p_hwfn->rel_pf_id)) {
1326 DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1327 cdev->num_hwfns = 2;
1329 cdev->num_hwfns = 1;
1332 cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1333 MISCS_REG_CHIP_TEST_REG) >> 4;
1334 MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1335 cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1336 MISCS_REG_CHIP_METAL);
1337 MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1339 DP_INFO(cdev->hwfns,
1340 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1341 cdev->chip_num, cdev->chip_rev,
1342 cdev->chip_bond_id, cdev->chip_metal);
1344 if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
1345 DP_NOTICE(cdev->hwfns,
1346 "The chip type/rev (BB A0) is not supported!\n");
1353 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1354 void __iomem *p_regview,
1355 void __iomem *p_doorbells,
1356 enum qed_pci_personality personality)
1360 /* Split PCI bars evenly between hwfns */
1361 p_hwfn->regview = p_regview;
1362 p_hwfn->doorbells = p_doorbells;
1364 /* Validate that chip access is feasible */
1365 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1367 "Reading the ME register returns all Fs; Preventing further chip access\n");
1371 get_function_id(p_hwfn);
1373 /* Allocate PTT pool */
1374 rc = qed_ptt_pool_alloc(p_hwfn);
1376 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1380 /* Allocate the main PTT */
1381 p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
1383 /* First hwfn learns basic information, e.g., number of hwfns */
1384 if (!p_hwfn->my_id) {
1385 rc = qed_get_dev_info(p_hwfn->cdev);
1390 qed_hw_hwfn_prepare(p_hwfn);
1392 /* Initialize MCP structure */
1393 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1395 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1399 /* Read the device configuration information from the HW and SHMEM */
1400 rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1402 DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1406 /* Allocate the init RT array and initialize the init-ops engine */
1407 rc = qed_init_alloc(p_hwfn);
1409 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1415 if (IS_LEAD_HWFN(p_hwfn))
1416 qed_iov_free_hw_info(p_hwfn->cdev);
1417 qed_mcp_free(p_hwfn);
1419 qed_hw_hwfn_free(p_hwfn);
1424 int qed_hw_prepare(struct qed_dev *cdev,
1427 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1430 /* Store the precompiled init data ptrs */
1431 qed_init_iro_array(cdev);
1433 /* Initialize the first hwfn - will learn number of hwfns */
1434 rc = qed_hw_prepare_single(p_hwfn,
1436 cdev->doorbells, personality);
1440 personality = p_hwfn->hw_info.personality;
1442 /* Initialize the rest of the hwfns */
1443 if (cdev->num_hwfns > 1) {
1444 void __iomem *p_regview, *p_doorbell;
1447 /* adjust bar offset for second engine */
1448 addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
1451 /* adjust doorbell bar offset for second engine */
1452 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
1455 /* prepare second hw function */
1456 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1457 p_doorbell, personality);
1459 /* in case of error, need to free the previously
1460 * initiliazed hwfn 0.
1463 qed_init_free(p_hwfn);
1464 qed_mcp_free(p_hwfn);
1465 qed_hw_hwfn_free(p_hwfn);
1472 void qed_hw_remove(struct qed_dev *cdev)
1476 for_each_hwfn(cdev, i) {
1477 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1479 qed_init_free(p_hwfn);
1480 qed_hw_hwfn_free(p_hwfn);
1481 qed_mcp_free(p_hwfn);
1484 qed_iov_free_hw_info(cdev);
1487 int qed_chain_alloc(struct qed_dev *cdev,
1488 enum qed_chain_use_mode intended_use,
1489 enum qed_chain_mode mode,
1492 struct qed_chain *p_chain)
1494 dma_addr_t p_pbl_phys = 0;
1495 void *p_pbl_virt = NULL;
1496 dma_addr_t p_phys = 0;
1497 void *p_virt = NULL;
1501 if (mode == QED_CHAIN_MODE_SINGLE)
1504 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1506 size = page_cnt * QED_CHAIN_PAGE_SIZE;
1507 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1508 size, &p_phys, GFP_KERNEL);
1510 DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1514 if (mode == QED_CHAIN_MODE_PBL) {
1515 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1516 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1520 DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1524 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1525 (u8)elem_size, intended_use,
1526 p_pbl_phys, p_pbl_virt);
1528 qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1529 (u8)elem_size, intended_use, mode);
1535 dma_free_coherent(&cdev->pdev->dev,
1536 page_cnt * QED_CHAIN_PAGE_SIZE,
1538 dma_free_coherent(&cdev->pdev->dev,
1539 page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1540 p_pbl_virt, p_pbl_phys);
1545 void qed_chain_free(struct qed_dev *cdev,
1546 struct qed_chain *p_chain)
1550 if (!p_chain->p_virt_addr)
1553 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1554 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1555 dma_free_coherent(&cdev->pdev->dev, size,
1556 p_chain->pbl.p_virt_table,
1557 p_chain->pbl.p_phys_table);
1560 size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1561 dma_free_coherent(&cdev->pdev->dev, size,
1562 p_chain->p_virt_addr,
1563 p_chain->p_phys_addr);
1566 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1567 u16 src_id, u16 *dst_id)
1569 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1572 min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1573 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1575 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1581 *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1586 int qed_fw_vport(struct qed_hwfn *p_hwfn,
1587 u8 src_id, u8 *dst_id)
1589 if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1592 min = (u8)RESC_START(p_hwfn, QED_VPORT);
1593 max = min + RESC_NUM(p_hwfn, QED_VPORT);
1595 "vport id [%d] is not valid, available indices [%d - %d]\n",
1601 *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1606 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1607 u8 src_id, u8 *dst_id)
1609 if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1612 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1613 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1615 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1621 *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1626 /* Calculate final WFQ values for all vports and configure them.
1627 * After this configuration each vport will have
1628 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
1630 static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1631 struct qed_ptt *p_ptt,
1634 struct init_qm_vport_params *vport_params;
1637 vport_params = p_hwfn->qm_info.qm_vport_params;
1639 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1640 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1642 vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
1644 qed_init_vport_wfq(p_hwfn, p_ptt,
1645 vport_params[i].first_tx_pq_id,
1646 vport_params[i].vport_wfq);
1650 static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
1656 for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
1657 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
1660 static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1661 struct qed_ptt *p_ptt,
1664 struct init_qm_vport_params *vport_params;
1667 vport_params = p_hwfn->qm_info.qm_vport_params;
1669 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1670 qed_init_wfq_default_param(p_hwfn, min_pf_rate);
1671 qed_init_vport_wfq(p_hwfn, p_ptt,
1672 vport_params[i].first_tx_pq_id,
1673 vport_params[i].vport_wfq);
1677 /* This function performs several validations for WFQ
1678 * configuration and required min rate for a given vport
1679 * 1. req_rate must be greater than one percent of min_pf_rate.
1680 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
1681 * rates to get less than one percent of min_pf_rate.
1682 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
1684 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
1685 u16 vport_id, u32 req_rate,
1688 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
1689 int non_requested_count = 0, req_count = 0, i, num_vports;
1691 num_vports = p_hwfn->qm_info.num_vports;
1693 /* Accounting for the vports which are configured for WFQ explicitly */
1694 for (i = 0; i < num_vports; i++) {
1697 if ((i != vport_id) &&
1698 p_hwfn->qm_info.wfq_data[i].configured) {
1700 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1701 total_req_min_rate += tmp_speed;
1705 /* Include current vport data as well */
1707 total_req_min_rate += req_rate;
1708 non_requested_count = num_vports - req_count;
1710 if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
1711 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1712 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1713 vport_id, req_rate, min_pf_rate);
1717 if (num_vports > QED_WFQ_UNIT) {
1718 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1719 "Number of vports is greater than %d\n",
1724 if (total_req_min_rate > min_pf_rate) {
1725 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1726 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
1727 total_req_min_rate, min_pf_rate);
1731 total_left_rate = min_pf_rate - total_req_min_rate;
1733 left_rate_per_vp = total_left_rate / non_requested_count;
1734 if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) {
1735 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1736 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1737 left_rate_per_vp, min_pf_rate);
1741 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
1742 p_hwfn->qm_info.wfq_data[vport_id].configured = true;
1744 for (i = 0; i < num_vports; i++) {
1745 if (p_hwfn->qm_info.wfq_data[i].configured)
1748 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
1754 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
1755 struct qed_ptt *p_ptt,
1758 bool use_wfq = false;
1762 /* Validate all pre configured vports for wfq */
1763 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1766 if (!p_hwfn->qm_info.wfq_data[i].configured)
1769 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
1772 rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
1775 "WFQ validation failed while configuring min rate\n");
1781 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1783 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1788 /* API to configure WFQ from mcp link change */
1789 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
1793 for_each_hwfn(cdev, i) {
1794 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1796 __qed_configure_vp_wfq_on_link_change(p_hwfn,
1802 int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
1803 struct qed_ptt *p_ptt,
1804 struct qed_mcp_link_state *p_link,
1809 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
1811 if (!p_link->line_speed && (max_bw != 100))
1814 p_link->speed = (p_link->line_speed * max_bw) / 100;
1815 p_hwfn->qm_info.pf_rl = p_link->speed;
1817 /* Since the limiter also affects Tx-switched traffic, we don't want it
1818 * to limit such traffic in case there's no actual limit.
1819 * In that case, set limit to imaginary high boundary.
1822 p_hwfn->qm_info.pf_rl = 100000;
1824 rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1825 p_hwfn->qm_info.pf_rl);
1827 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1828 "Configured MAX bandwidth to be %08x Mb/sec\n",
1834 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
1835 int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
1837 int i, rc = -EINVAL;
1839 if (max_bw < 1 || max_bw > 100) {
1840 DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
1844 for_each_hwfn(cdev, i) {
1845 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1846 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
1847 struct qed_mcp_link_state *p_link;
1848 struct qed_ptt *p_ptt;
1850 p_link = &p_lead->mcp_info->link_output;
1852 p_ptt = qed_ptt_acquire(p_hwfn);
1856 rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1859 qed_ptt_release(p_hwfn, p_ptt);
1868 int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
1869 struct qed_ptt *p_ptt,
1870 struct qed_mcp_link_state *p_link,
1875 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
1876 p_hwfn->qm_info.pf_wfq = min_bw;
1878 if (!p_link->line_speed)
1881 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
1883 rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
1885 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1886 "Configured MIN bandwidth to be %d Mb/sec\n",
1887 p_link->min_pf_rate);
1892 /* Main API to configure PF min bandwidth where bw range is [1-100] */
1893 int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
1895 int i, rc = -EINVAL;
1897 if (min_bw < 1 || min_bw > 100) {
1898 DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
1902 for_each_hwfn(cdev, i) {
1903 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1904 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
1905 struct qed_mcp_link_state *p_link;
1906 struct qed_ptt *p_ptt;
1908 p_link = &p_lead->mcp_info->link_output;
1910 p_ptt = qed_ptt_acquire(p_hwfn);
1914 rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1917 qed_ptt_release(p_hwfn, p_ptt);
1921 if (p_link->min_pf_rate) {
1922 u32 min_rate = p_link->min_pf_rate;
1924 rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
1929 qed_ptt_release(p_hwfn, p_ptt);