1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
25 #include "qed_dev_api.h"
28 #include "qed_init_ops.h"
31 #include "qed_reg_addr.h"
34 /* API common to all protocols */
36 BAR_ID_0, /* used for GRC */
37 BAR_ID_1 /* Used for doorbells */
40 static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
43 u32 bar_reg = (bar_id == BAR_ID_0 ?
44 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
45 u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
48 return 1 << (val + 15);
50 /* Old MFW initialized above registered only conditionally */
51 if (p_hwfn->cdev->num_hwfns > 1) {
53 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
54 return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
57 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
62 void qed_init_dp(struct qed_dev *cdev,
63 u32 dp_module, u8 dp_level)
67 cdev->dp_level = dp_level;
68 cdev->dp_module = dp_module;
69 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
70 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
72 p_hwfn->dp_level = dp_level;
73 p_hwfn->dp_module = dp_module;
77 void qed_init_struct(struct qed_dev *cdev)
81 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
82 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
86 p_hwfn->b_active = false;
88 mutex_init(&p_hwfn->dmae_info.mutex);
91 /* hwfn 0 is always active */
92 cdev->hwfns[0].b_active = true;
94 /* set the default cache alignment to 128 */
95 cdev->cache_shift = 7;
98 static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
100 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
102 kfree(qm_info->qm_pq_params);
103 qm_info->qm_pq_params = NULL;
104 kfree(qm_info->qm_vport_params);
105 qm_info->qm_vport_params = NULL;
106 kfree(qm_info->qm_port_params);
107 qm_info->qm_port_params = NULL;
108 kfree(qm_info->wfq_data);
109 qm_info->wfq_data = NULL;
112 void qed_resc_free(struct qed_dev *cdev)
116 kfree(cdev->fw_data);
117 cdev->fw_data = NULL;
119 kfree(cdev->reset_stats);
121 for_each_hwfn(cdev, i) {
122 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
124 kfree(p_hwfn->p_tx_cids);
125 p_hwfn->p_tx_cids = NULL;
126 kfree(p_hwfn->p_rx_cids);
127 p_hwfn->p_rx_cids = NULL;
130 for_each_hwfn(cdev, i) {
131 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
133 qed_cxt_mngr_free(p_hwfn);
134 qed_qm_info_free(p_hwfn);
135 qed_spq_free(p_hwfn);
136 qed_eq_free(p_hwfn, p_hwfn->p_eq);
137 qed_consq_free(p_hwfn, p_hwfn->p_consq);
138 qed_int_free(p_hwfn);
139 qed_dmae_info_free(p_hwfn);
143 static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
145 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
146 struct init_qm_port_params *p_qm_port;
147 u8 num_vports, i, vport_id, num_ports;
148 u16 num_pqs, multi_cos_tcs = 1;
150 memset(qm_info, 0, sizeof(*qm_info));
152 num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
153 num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
155 /* Sanity checking that setup requires legal number of resources */
156 if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
158 "Need too many Physical queues - 0x%04x when only %04x are available\n",
159 num_pqs, RESC_NUM(p_hwfn, QED_PQ));
163 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
165 qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
166 num_pqs, GFP_KERNEL);
167 if (!qm_info->qm_pq_params)
170 qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
171 num_vports, GFP_KERNEL);
172 if (!qm_info->qm_vport_params)
175 qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
176 MAX_NUM_PORTS, GFP_KERNEL);
177 if (!qm_info->qm_port_params)
180 qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
182 if (!qm_info->wfq_data)
185 vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
187 /* First init per-TC PQs */
188 for (i = 0; i < multi_cos_tcs; i++) {
189 struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
191 params->vport_id = vport_id;
192 params->tc_id = p_hwfn->hw_info.non_offload_tc;
193 params->wrr_group = 1;
196 /* Then init pure-LB PQ */
197 qm_info->pure_lb_pq = i;
198 qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
199 qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
200 qm_info->qm_pq_params[i].wrr_group = 1;
203 qm_info->offload_pq = 0;
204 qm_info->num_pqs = num_pqs;
205 qm_info->num_vports = num_vports;
207 /* Initialize qm port parameters */
208 num_ports = p_hwfn->cdev->num_ports_in_engines;
209 for (i = 0; i < num_ports; i++) {
210 p_qm_port = &qm_info->qm_port_params[i];
211 p_qm_port->active = 1;
212 p_qm_port->num_active_phys_tcs = 4;
213 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
214 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
217 qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
219 qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
221 qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
223 for (i = 0; i < qm_info->num_vports; i++)
224 qm_info->qm_vport_params[i].vport_wfq = 1;
228 qm_info->vport_rl_en = 1;
229 qm_info->vport_wfq_en = 1;
234 DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
235 qed_qm_info_free(p_hwfn);
239 int qed_resc_alloc(struct qed_dev *cdev)
241 struct qed_consq *p_consq;
245 cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
249 /* Allocate Memory for the Queue->CID mapping */
250 for_each_hwfn(cdev, i) {
251 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
252 int tx_size = sizeof(struct qed_hw_cid_data) *
253 RESC_NUM(p_hwfn, QED_L2_QUEUE);
254 int rx_size = sizeof(struct qed_hw_cid_data) *
255 RESC_NUM(p_hwfn, QED_L2_QUEUE);
257 p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
258 if (!p_hwfn->p_tx_cids) {
260 "Failed to allocate memory for Tx Cids\n");
265 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
266 if (!p_hwfn->p_rx_cids) {
268 "Failed to allocate memory for Rx Cids\n");
274 for_each_hwfn(cdev, i) {
275 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
277 /* First allocate the context manager structure */
278 rc = qed_cxt_mngr_alloc(p_hwfn);
282 /* Set the HW cid/tid numbers (in the contest manager)
283 * Must be done prior to any further computations.
285 rc = qed_cxt_set_pf_params(p_hwfn);
289 /* Prepare and process QM requirements */
290 rc = qed_init_qm_info(p_hwfn);
294 /* Compute the ILT client partition */
295 rc = qed_cxt_cfg_ilt_compute(p_hwfn);
299 /* CID map / ILT shadow table / T2
300 * The talbes sizes are determined by the computations above
302 rc = qed_cxt_tables_alloc(p_hwfn);
306 /* SPQ, must follow ILT because initializes SPQ context */
307 rc = qed_spq_alloc(p_hwfn);
311 /* SP status block allocation */
312 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
315 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
320 p_eq = qed_eq_alloc(p_hwfn, 256);
327 p_consq = qed_consq_alloc(p_hwfn);
332 p_hwfn->p_consq = p_consq;
334 /* DMA info initialization */
335 rc = qed_dmae_info_alloc(p_hwfn);
338 "Failed to allocate memory for dmae_info structure\n");
343 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
344 if (!cdev->reset_stats) {
345 DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
357 void qed_resc_setup(struct qed_dev *cdev)
361 for_each_hwfn(cdev, i) {
362 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
364 qed_cxt_mngr_setup(p_hwfn);
365 qed_spq_setup(p_hwfn);
366 qed_eq_setup(p_hwfn, p_hwfn->p_eq);
367 qed_consq_setup(p_hwfn, p_hwfn->p_consq);
369 /* Read shadow of current MFW mailbox */
370 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
371 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
372 p_hwfn->mcp_info->mfw_mb_cur,
373 p_hwfn->mcp_info->mfw_mb_length);
375 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
379 #define FINAL_CLEANUP_POLL_CNT (100)
380 #define FINAL_CLEANUP_POLL_TIME (10)
381 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
382 struct qed_ptt *p_ptt,
385 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
388 addr = GTT_BAR0_MAP_REG_USDM_RAM +
389 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
391 command |= X_FINAL_CLEANUP_AGG_INT <<
392 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
393 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
394 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
395 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
397 /* Make sure notification is not set before initiating final cleanup */
398 if (REG_RD(p_hwfn, addr)) {
401 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
402 REG_WR(p_hwfn, addr, 0);
405 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
406 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
409 qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
411 /* Poll until completion */
412 while (!REG_RD(p_hwfn, addr) && count--)
413 msleep(FINAL_CLEANUP_POLL_TIME);
415 if (REG_RD(p_hwfn, addr))
419 "Failed to receive FW final cleanup notification\n");
421 /* Cleanup afterwards */
422 REG_WR(p_hwfn, addr, 0);
427 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
431 hw_mode = (1 << MODE_BB_B0);
433 switch (p_hwfn->cdev->num_ports_in_engines) {
435 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
438 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
441 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
444 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
445 p_hwfn->cdev->num_ports_in_engines);
449 switch (p_hwfn->cdev->mf_mode) {
452 hw_mode |= 1 << MODE_MF_SI;
455 hw_mode |= 1 << MODE_MF_SD;
458 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
459 hw_mode |= 1 << MODE_MF_SI;
462 hw_mode |= 1 << MODE_ASIC;
464 p_hwfn->hw_info.hw_mode = hw_mode;
467 /* Init run time data for all PFs on an engine. */
468 static void qed_init_cau_rt_data(struct qed_dev *cdev)
470 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
473 for_each_hwfn(cdev, i) {
474 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
475 struct qed_igu_info *p_igu_info;
476 struct qed_igu_block *p_block;
477 struct cau_sb_entry sb_entry;
479 p_igu_info = p_hwfn->hw_info.p_igu_info;
481 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
483 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
487 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
488 p_block->function_id,
490 STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
496 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
497 struct qed_ptt *p_ptt,
500 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
501 struct qed_qm_common_rt_init_params params;
502 struct qed_dev *cdev = p_hwfn->cdev;
505 qed_init_cau_rt_data(cdev);
507 /* Program GTT windows */
508 qed_gtt_init(p_hwfn);
510 if (p_hwfn->mcp_info) {
511 if (p_hwfn->mcp_info->func_info.bandwidth_max)
512 qm_info->pf_rl_en = 1;
513 if (p_hwfn->mcp_info->func_info.bandwidth_min)
514 qm_info->pf_wfq_en = 1;
517 memset(¶ms, 0, sizeof(params));
518 params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
519 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
520 params.pf_rl_en = qm_info->pf_rl_en;
521 params.pf_wfq_en = qm_info->pf_wfq_en;
522 params.vport_rl_en = qm_info->vport_rl_en;
523 params.vport_wfq_en = qm_info->vport_wfq_en;
524 params.port_params = qm_info->qm_port_params;
526 qed_qm_common_rt_init(p_hwfn, ¶ms);
528 qed_cxt_hw_init_common(p_hwfn);
530 /* Close gate from NIG to BRB/Storm; By default they are open, but
531 * we close them to prevent NIG from passing data to reset blocks.
532 * Should have been done in the ENGINE phase, but init-tool lacks
533 * proper port-pretend capabilities.
535 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
536 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
537 qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
538 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
539 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
540 qed_port_unpretend(p_hwfn, p_ptt);
542 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
546 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
547 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
549 /* Disable relaxed ordering in the PCI config space */
550 qed_wr(p_hwfn, p_ptt, 0x20b4,
551 qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
556 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
557 struct qed_ptt *p_ptt,
562 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
567 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
568 struct qed_ptt *p_ptt,
569 struct qed_tunn_start_params *p_tunn,
572 enum qed_int_mode int_mode,
573 bool allow_npar_tx_switch)
575 u8 rel_pf_id = p_hwfn->rel_pf_id;
578 if (p_hwfn->mcp_info) {
579 struct qed_mcp_function_info *p_info;
581 p_info = &p_hwfn->mcp_info->func_info;
582 if (p_info->bandwidth_min)
583 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
585 /* Update rate limit once we'll actually have a link */
586 p_hwfn->qm_info.pf_rl = 100000;
589 qed_cxt_hw_init_pf(p_hwfn);
591 qed_int_igu_init_rt(p_hwfn);
593 /* Set VLAN in NIG if needed */
594 if (hw_mode & (1 << MODE_MF_SD)) {
595 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
596 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
597 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
598 p_hwfn->hw_info.ovlan);
601 /* Enable classification by MAC if needed */
602 if (hw_mode & (1 << MODE_MF_SI)) {
603 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
604 "Configuring TAGMAC_CLS_TYPE\n");
606 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
609 /* Protocl Configuration */
610 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
611 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
612 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
614 /* Cleanup chip from previous driver if such remains exist */
615 rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
619 /* PF Init sequence */
620 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
624 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
625 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
629 /* Pure runtime initializations - directly to the HW */
630 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
633 /* enable interrupts */
634 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
636 /* send function start command */
637 rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
639 DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
644 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
645 struct qed_ptt *p_ptt,
648 u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
650 /* Change PF in PXP */
651 qed_wr(p_hwfn, p_ptt,
652 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
654 /* wait until value is set - try for 1 second every 50us */
655 for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
656 val = qed_rd(p_hwfn, p_ptt,
657 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
661 usleep_range(50, 60);
664 if (val != set_val) {
666 "PFID_ENABLE_MASTER wasn't changed after a second\n");
673 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
674 struct qed_ptt *p_main_ptt)
676 /* Read shadow of current MFW mailbox */
677 qed_mcp_read_mb(p_hwfn, p_main_ptt);
678 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
679 p_hwfn->mcp_info->mfw_mb_cur,
680 p_hwfn->mcp_info->mfw_mb_length);
683 int qed_hw_init(struct qed_dev *cdev,
684 struct qed_tunn_start_params *p_tunn,
686 enum qed_int_mode int_mode,
687 bool allow_npar_tx_switch,
688 const u8 *bin_fw_data)
690 u32 load_code, param;
693 rc = qed_init_fw_data(cdev, bin_fw_data);
697 for_each_hwfn(cdev, i) {
698 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
700 /* Enable DMAE in PXP */
701 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
703 qed_calc_hw_mode(p_hwfn);
705 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
708 DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
712 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
714 DP_VERBOSE(p_hwfn, QED_MSG_SP,
715 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
718 p_hwfn->first_on_engine = (load_code ==
719 FW_MSG_CODE_DRV_LOAD_ENGINE);
722 case FW_MSG_CODE_DRV_LOAD_ENGINE:
723 rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
724 p_hwfn->hw_info.hw_mode);
728 case FW_MSG_CODE_DRV_LOAD_PORT:
729 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
730 p_hwfn->hw_info.hw_mode);
735 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
736 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
737 p_tunn, p_hwfn->hw_info.hw_mode,
738 b_hw_start, int_mode,
739 allow_npar_tx_switch);
748 "init phase failed for loadcode 0x%x (rc %d)\n",
751 /* ACK mfw regardless of success or failure of initialization */
752 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
753 DRV_MSG_CODE_LOAD_DONE,
754 0, &load_code, ¶m);
758 DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
762 p_hwfn->hw_init_done = true;
768 #define QED_HW_STOP_RETRY_LIMIT (10)
769 static inline void qed_hw_timers_stop(struct qed_dev *cdev,
770 struct qed_hwfn *p_hwfn,
771 struct qed_ptt *p_ptt)
776 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
777 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
779 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
780 if ((!qed_rd(p_hwfn, p_ptt,
781 TM_REG_PF_SCAN_ACTIVE_CONN)) &&
782 (!qed_rd(p_hwfn, p_ptt,
783 TM_REG_PF_SCAN_ACTIVE_TASK)))
786 /* Dependent on number of connection/tasks, possibly
787 * 1ms sleep is required between polls
789 usleep_range(1000, 2000);
792 if (i < QED_HW_STOP_RETRY_LIMIT)
796 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
797 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
798 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
801 void qed_hw_timers_stop_all(struct qed_dev *cdev)
805 for_each_hwfn(cdev, j) {
806 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
807 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
809 qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
813 int qed_hw_stop(struct qed_dev *cdev)
818 for_each_hwfn(cdev, j) {
819 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
820 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
822 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
824 /* mark the hw as uninitialized... */
825 p_hwfn->hw_init_done = false;
827 rc = qed_sp_pf_stop(p_hwfn);
830 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
832 qed_wr(p_hwfn, p_ptt,
833 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
835 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
836 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
837 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
838 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
839 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
841 qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
843 /* Disable Attention Generation */
844 qed_int_igu_disable_int(p_hwfn, p_ptt);
846 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
847 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
849 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
851 /* Need to wait 1ms to guarantee SBs are cleared */
852 usleep_range(1000, 2000);
855 /* Disable DMAE in PXP - in CMT, this should only be done for
856 * first hw-function, and only after all transactions have
857 * stopped for all active hw-functions.
859 t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
860 cdev->hwfns[0].p_main_ptt,
868 void qed_hw_stop_fastpath(struct qed_dev *cdev)
872 for_each_hwfn(cdev, j) {
873 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
874 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
878 "Shutting down the fastpath\n");
880 qed_wr(p_hwfn, p_ptt,
881 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
883 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
884 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
885 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
886 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
887 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
889 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
891 /* Need to wait 1ms to guarantee SBs are cleared */
892 usleep_range(1000, 2000);
896 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
898 /* Re-open incoming traffic */
899 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
900 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
903 static int qed_reg_assert(struct qed_hwfn *hwfn,
904 struct qed_ptt *ptt, u32 reg,
907 u32 assert_val = qed_rd(hwfn, ptt, reg);
909 if (assert_val != expected) {
910 DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
918 int qed_hw_reset(struct qed_dev *cdev)
921 u32 unload_resp, unload_param;
924 for_each_hwfn(cdev, i) {
925 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
927 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
929 /* Check for incorrect states */
930 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
931 QM_REG_USG_CNT_PF_TX, 0);
932 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
933 QM_REG_USG_CNT_PF_OTHER, 0);
935 /* Disable PF in HW blocks */
936 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
937 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
938 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
939 TCFC_REG_STRONG_ENABLE_PF, 0);
940 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
941 CCFC_REG_STRONG_ENABLE_PF, 0);
943 /* Send unload command to MCP */
944 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
945 DRV_MSG_CODE_UNLOAD_REQ,
946 DRV_MB_PARAM_UNLOAD_WOL_MCP,
947 &unload_resp, &unload_param);
949 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
950 unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
953 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
954 DRV_MSG_CODE_UNLOAD_DONE,
955 0, &unload_resp, &unload_param);
957 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
965 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
966 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
968 qed_ptt_pool_free(p_hwfn);
969 kfree(p_hwfn->hw_info.p_igu_info);
972 /* Setup bar access */
973 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
975 /* clear indirect access */
976 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
977 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
978 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
979 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
981 /* Clean Previous errors if such exist */
982 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
983 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
984 1 << p_hwfn->abs_pf_id);
986 /* enable internal target-read */
987 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
988 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
991 static void get_function_id(struct qed_hwfn *p_hwfn)
994 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
996 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
998 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
999 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1000 PXP_CONCRETE_FID_PFID);
1001 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1002 PXP_CONCRETE_FID_PORT);
1005 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
1007 u32 *feat_num = p_hwfn->hw_info.feat_num;
1008 int num_features = 1;
1010 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1012 RESC_NUM(p_hwfn, QED_L2_QUEUE));
1013 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1014 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1015 feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
1019 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1021 u32 *resc_start = p_hwfn->hw_info.resc_start;
1022 u32 *resc_num = p_hwfn->hw_info.resc_num;
1023 struct qed_sb_cnt_info sb_cnt_info;
1026 num_funcs = MAX_NUM_PFS_BB;
1028 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
1029 qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
1031 resc_num[QED_SB] = min_t(u32,
1032 (MAX_SB_PER_PATH_BB / num_funcs),
1033 sb_cnt_info.sb_cnt);
1034 resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1035 resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
1036 resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1037 resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1038 resc_num[QED_RL] = 8;
1039 resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1040 resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1042 resc_num[QED_ILT] = 950;
1044 for (i = 0; i < QED_MAX_RESC; i++)
1045 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1047 qed_hw_set_feat(p_hwfn);
1049 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1050 "The numbers for each resource are:\n"
1051 "SB = %d start = %d\n"
1052 "L2_QUEUE = %d start = %d\n"
1053 "VPORT = %d start = %d\n"
1054 "PQ = %d start = %d\n"
1055 "RL = %d start = %d\n"
1056 "MAC = %d start = %d\n"
1057 "VLAN = %d start = %d\n"
1058 "ILT = %d start = %d\n",
1059 p_hwfn->hw_info.resc_num[QED_SB],
1060 p_hwfn->hw_info.resc_start[QED_SB],
1061 p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
1062 p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1063 p_hwfn->hw_info.resc_num[QED_VPORT],
1064 p_hwfn->hw_info.resc_start[QED_VPORT],
1065 p_hwfn->hw_info.resc_num[QED_PQ],
1066 p_hwfn->hw_info.resc_start[QED_PQ],
1067 p_hwfn->hw_info.resc_num[QED_RL],
1068 p_hwfn->hw_info.resc_start[QED_RL],
1069 p_hwfn->hw_info.resc_num[QED_MAC],
1070 p_hwfn->hw_info.resc_start[QED_MAC],
1071 p_hwfn->hw_info.resc_num[QED_VLAN],
1072 p_hwfn->hw_info.resc_start[QED_VLAN],
1073 p_hwfn->hw_info.resc_num[QED_ILT],
1074 p_hwfn->hw_info.resc_start[QED_ILT]);
1077 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1078 struct qed_ptt *p_ptt)
1080 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1081 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1082 struct qed_mcp_link_params *link;
1084 /* Read global nvm_cfg address */
1085 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1087 /* Verify MCP has initialized it */
1088 if (!nvm_cfg_addr) {
1089 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1093 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1094 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1096 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1097 offsetof(struct nvm_cfg1, glob) +
1098 offsetof(struct nvm_cfg1_glob, core_cfg);
1100 core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1102 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1103 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1104 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1105 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1107 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1108 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1110 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1111 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1113 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1114 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1116 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1117 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1119 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1120 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1122 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1123 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1125 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1126 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1128 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1129 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1132 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1137 /* Read default link configuration */
1138 link = &p_hwfn->mcp_info->link_input;
1139 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1140 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1141 link_temp = qed_rd(p_hwfn, p_ptt,
1143 offsetof(struct nvm_cfg1_port, speed_cap_mask));
1144 link->speed.advertised_speeds =
1145 link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1147 p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1148 link->speed.advertised_speeds;
1150 link_temp = qed_rd(p_hwfn, p_ptt,
1152 offsetof(struct nvm_cfg1_port, link_settings));
1153 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1154 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1155 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1156 link->speed.autoneg = true;
1158 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1159 link->speed.forced_speed = 1000;
1161 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1162 link->speed.forced_speed = 10000;
1164 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1165 link->speed.forced_speed = 25000;
1167 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1168 link->speed.forced_speed = 40000;
1170 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1171 link->speed.forced_speed = 50000;
1173 case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1174 link->speed.forced_speed = 100000;
1177 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1181 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1182 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1183 link->pause.autoneg = !!(link_temp &
1184 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1185 link->pause.forced_rx = !!(link_temp &
1186 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1187 link->pause.forced_tx = !!(link_temp &
1188 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1189 link->loopback_mode = 0;
1191 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1192 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1193 link->speed.forced_speed, link->speed.advertised_speeds,
1194 link->speed.autoneg, link->pause.autoneg);
1196 /* Read Multi-function information from shmem */
1197 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1198 offsetof(struct nvm_cfg1, glob) +
1199 offsetof(struct nvm_cfg1_glob, generic_cont0);
1201 generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1203 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1204 NVM_CFG1_GLOB_MF_MODE_OFFSET;
1207 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1208 p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1210 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1211 p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1213 case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1214 p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1217 DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1218 p_hwfn->cdev->mf_mode);
1220 /* Read Multi-function information from shmem */
1221 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1222 offsetof(struct nvm_cfg1, glob) +
1223 offsetof(struct nvm_cfg1_glob, device_capabilities);
1225 device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1226 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1227 __set_bit(QED_DEV_CAP_ETH,
1228 &p_hwfn->hw_info.device_capabilities);
1230 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1234 qed_get_hw_info(struct qed_hwfn *p_hwfn,
1235 struct qed_ptt *p_ptt,
1236 enum qed_pci_personality personality)
1241 /* Read the port mode */
1242 port_mode = qed_rd(p_hwfn, p_ptt,
1243 CNIG_REG_NW_PORT_MODE_BB_B0);
1245 if (port_mode < 3) {
1246 p_hwfn->cdev->num_ports_in_engines = 1;
1247 } else if (port_mode <= 5) {
1248 p_hwfn->cdev->num_ports_in_engines = 2;
1250 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1251 p_hwfn->cdev->num_ports_in_engines);
1253 /* Default num_ports_in_engines to something */
1254 p_hwfn->cdev->num_ports_in_engines = 1;
1257 qed_hw_get_nvm_info(p_hwfn, p_ptt);
1259 rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1263 if (qed_mcp_is_init(p_hwfn))
1264 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1265 p_hwfn->mcp_info->func_info.mac);
1267 eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1269 if (qed_mcp_is_init(p_hwfn)) {
1270 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1271 p_hwfn->hw_info.ovlan =
1272 p_hwfn->mcp_info->func_info.ovlan;
1274 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1277 if (qed_mcp_is_init(p_hwfn)) {
1278 enum qed_pci_personality protocol;
1280 protocol = p_hwfn->mcp_info->func_info.protocol;
1281 p_hwfn->hw_info.personality = protocol;
1284 qed_hw_get_resc(p_hwfn);
1289 static int qed_get_dev_info(struct qed_dev *cdev)
1291 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1294 /* Read Vendor Id / Device Id */
1295 pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1297 pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1299 cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1300 MISCS_REG_CHIP_NUM);
1301 cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1302 MISCS_REG_CHIP_REV);
1303 MASK_FIELD(CHIP_REV, cdev->chip_rev);
1305 cdev->type = QED_DEV_TYPE_BB;
1306 /* Learn number of HW-functions */
1307 tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1308 MISCS_REG_CMT_ENABLED_FOR_PAIR);
1310 if (tmp & (1 << p_hwfn->rel_pf_id)) {
1311 DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1312 cdev->num_hwfns = 2;
1314 cdev->num_hwfns = 1;
1317 cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1318 MISCS_REG_CHIP_TEST_REG) >> 4;
1319 MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1320 cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1321 MISCS_REG_CHIP_METAL);
1322 MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1324 DP_INFO(cdev->hwfns,
1325 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1326 cdev->chip_num, cdev->chip_rev,
1327 cdev->chip_bond_id, cdev->chip_metal);
1329 if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
1330 DP_NOTICE(cdev->hwfns,
1331 "The chip type/rev (BB A0) is not supported!\n");
1338 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1339 void __iomem *p_regview,
1340 void __iomem *p_doorbells,
1341 enum qed_pci_personality personality)
1345 /* Split PCI bars evenly between hwfns */
1346 p_hwfn->regview = p_regview;
1347 p_hwfn->doorbells = p_doorbells;
1349 /* Validate that chip access is feasible */
1350 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1352 "Reading the ME register returns all Fs; Preventing further chip access\n");
1356 get_function_id(p_hwfn);
1358 /* Allocate PTT pool */
1359 rc = qed_ptt_pool_alloc(p_hwfn);
1361 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1365 /* Allocate the main PTT */
1366 p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
1368 /* First hwfn learns basic information, e.g., number of hwfns */
1369 if (!p_hwfn->my_id) {
1370 rc = qed_get_dev_info(p_hwfn->cdev);
1375 qed_hw_hwfn_prepare(p_hwfn);
1377 /* Initialize MCP structure */
1378 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1380 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1384 /* Read the device configuration information from the HW and SHMEM */
1385 rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1387 DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1391 /* Allocate the init RT array and initialize the init-ops engine */
1392 rc = qed_init_alloc(p_hwfn);
1394 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1400 qed_mcp_free(p_hwfn);
1402 qed_hw_hwfn_free(p_hwfn);
1407 int qed_hw_prepare(struct qed_dev *cdev,
1410 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1413 /* Store the precompiled init data ptrs */
1414 qed_init_iro_array(cdev);
1416 /* Initialize the first hwfn - will learn number of hwfns */
1417 rc = qed_hw_prepare_single(p_hwfn,
1419 cdev->doorbells, personality);
1423 personality = p_hwfn->hw_info.personality;
1425 /* Initialize the rest of the hwfns */
1426 if (cdev->num_hwfns > 1) {
1427 void __iomem *p_regview, *p_doorbell;
1430 /* adjust bar offset for second engine */
1431 addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
1434 /* adjust doorbell bar offset for second engine */
1435 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
1438 /* prepare second hw function */
1439 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1440 p_doorbell, personality);
1442 /* in case of error, need to free the previously
1443 * initiliazed hwfn 0.
1446 qed_init_free(p_hwfn);
1447 qed_mcp_free(p_hwfn);
1448 qed_hw_hwfn_free(p_hwfn);
1455 void qed_hw_remove(struct qed_dev *cdev)
1459 for_each_hwfn(cdev, i) {
1460 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1462 qed_init_free(p_hwfn);
1463 qed_hw_hwfn_free(p_hwfn);
1464 qed_mcp_free(p_hwfn);
1468 int qed_chain_alloc(struct qed_dev *cdev,
1469 enum qed_chain_use_mode intended_use,
1470 enum qed_chain_mode mode,
1473 struct qed_chain *p_chain)
1475 dma_addr_t p_pbl_phys = 0;
1476 void *p_pbl_virt = NULL;
1477 dma_addr_t p_phys = 0;
1478 void *p_virt = NULL;
1482 if (mode == QED_CHAIN_MODE_SINGLE)
1485 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1487 size = page_cnt * QED_CHAIN_PAGE_SIZE;
1488 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1489 size, &p_phys, GFP_KERNEL);
1491 DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1495 if (mode == QED_CHAIN_MODE_PBL) {
1496 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1497 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1501 DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1505 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1506 (u8)elem_size, intended_use,
1507 p_pbl_phys, p_pbl_virt);
1509 qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1510 (u8)elem_size, intended_use, mode);
1516 dma_free_coherent(&cdev->pdev->dev,
1517 page_cnt * QED_CHAIN_PAGE_SIZE,
1519 dma_free_coherent(&cdev->pdev->dev,
1520 page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1521 p_pbl_virt, p_pbl_phys);
1526 void qed_chain_free(struct qed_dev *cdev,
1527 struct qed_chain *p_chain)
1531 if (!p_chain->p_virt_addr)
1534 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1535 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1536 dma_free_coherent(&cdev->pdev->dev, size,
1537 p_chain->pbl.p_virt_table,
1538 p_chain->pbl.p_phys_table);
1541 size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1542 dma_free_coherent(&cdev->pdev->dev, size,
1543 p_chain->p_virt_addr,
1544 p_chain->p_phys_addr);
1547 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1548 u16 src_id, u16 *dst_id)
1550 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1553 min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1554 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1556 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1562 *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1567 int qed_fw_vport(struct qed_hwfn *p_hwfn,
1568 u8 src_id, u8 *dst_id)
1570 if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1573 min = (u8)RESC_START(p_hwfn, QED_VPORT);
1574 max = min + RESC_NUM(p_hwfn, QED_VPORT);
1576 "vport id [%d] is not valid, available indices [%d - %d]\n",
1582 *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1587 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1588 u8 src_id, u8 *dst_id)
1590 if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1593 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1594 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1596 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1602 *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1607 /* Calculate final WFQ values for all vports and configure them.
1608 * After this configuration each vport will have
1609 * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
1611 static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1612 struct qed_ptt *p_ptt,
1615 struct init_qm_vport_params *vport_params;
1618 vport_params = p_hwfn->qm_info.qm_vport_params;
1620 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1621 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1623 vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
1625 qed_init_vport_wfq(p_hwfn, p_ptt,
1626 vport_params[i].first_tx_pq_id,
1627 vport_params[i].vport_wfq);
1631 static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
1637 for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
1638 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
1641 static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1642 struct qed_ptt *p_ptt,
1645 struct init_qm_vport_params *vport_params;
1648 vport_params = p_hwfn->qm_info.qm_vport_params;
1650 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1651 qed_init_wfq_default_param(p_hwfn, min_pf_rate);
1652 qed_init_vport_wfq(p_hwfn, p_ptt,
1653 vport_params[i].first_tx_pq_id,
1654 vport_params[i].vport_wfq);
1658 /* This function performs several validations for WFQ
1659 * configuration and required min rate for a given vport
1660 * 1. req_rate must be greater than one percent of min_pf_rate.
1661 * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
1662 * rates to get less than one percent of min_pf_rate.
1663 * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
1665 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
1666 u16 vport_id, u32 req_rate,
1669 u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
1670 int non_requested_count = 0, req_count = 0, i, num_vports;
1672 num_vports = p_hwfn->qm_info.num_vports;
1674 /* Accounting for the vports which are configured for WFQ explicitly */
1675 for (i = 0; i < num_vports; i++) {
1678 if ((i != vport_id) &&
1679 p_hwfn->qm_info.wfq_data[i].configured) {
1681 tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1682 total_req_min_rate += tmp_speed;
1686 /* Include current vport data as well */
1688 total_req_min_rate += req_rate;
1689 non_requested_count = num_vports - req_count;
1691 if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
1692 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1693 "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1694 vport_id, req_rate, min_pf_rate);
1698 if (num_vports > QED_WFQ_UNIT) {
1699 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1700 "Number of vports is greater than %d\n",
1705 if (total_req_min_rate > min_pf_rate) {
1706 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1707 "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
1708 total_req_min_rate, min_pf_rate);
1712 total_left_rate = min_pf_rate - total_req_min_rate;
1714 left_rate_per_vp = total_left_rate / non_requested_count;
1715 if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) {
1716 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1717 "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1718 left_rate_per_vp, min_pf_rate);
1722 p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
1723 p_hwfn->qm_info.wfq_data[vport_id].configured = true;
1725 for (i = 0; i < num_vports; i++) {
1726 if (p_hwfn->qm_info.wfq_data[i].configured)
1729 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
1735 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
1736 struct qed_ptt *p_ptt,
1739 bool use_wfq = false;
1743 /* Validate all pre configured vports for wfq */
1744 for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1747 if (!p_hwfn->qm_info.wfq_data[i].configured)
1750 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
1753 rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
1756 "WFQ validation failed while configuring min rate\n");
1762 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1764 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1769 /* API to configure WFQ from mcp link change */
1770 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
1774 for_each_hwfn(cdev, i) {
1775 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1777 __qed_configure_vp_wfq_on_link_change(p_hwfn,
1783 int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
1784 struct qed_ptt *p_ptt,
1785 struct qed_mcp_link_state *p_link,
1790 p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
1792 if (!p_link->line_speed && (max_bw != 100))
1795 p_link->speed = (p_link->line_speed * max_bw) / 100;
1796 p_hwfn->qm_info.pf_rl = p_link->speed;
1798 /* Since the limiter also affects Tx-switched traffic, we don't want it
1799 * to limit such traffic in case there's no actual limit.
1800 * In that case, set limit to imaginary high boundary.
1803 p_hwfn->qm_info.pf_rl = 100000;
1805 rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1806 p_hwfn->qm_info.pf_rl);
1808 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1809 "Configured MAX bandwidth to be %08x Mb/sec\n",
1815 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
1816 int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
1818 int i, rc = -EINVAL;
1820 if (max_bw < 1 || max_bw > 100) {
1821 DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
1825 for_each_hwfn(cdev, i) {
1826 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1827 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
1828 struct qed_mcp_link_state *p_link;
1829 struct qed_ptt *p_ptt;
1831 p_link = &p_lead->mcp_info->link_output;
1833 p_ptt = qed_ptt_acquire(p_hwfn);
1837 rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1840 qed_ptt_release(p_hwfn, p_ptt);
1849 int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
1850 struct qed_ptt *p_ptt,
1851 struct qed_mcp_link_state *p_link,
1856 p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
1857 p_hwfn->qm_info.pf_wfq = min_bw;
1859 if (!p_link->line_speed)
1862 p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
1864 rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
1866 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1867 "Configured MIN bandwidth to be %d Mb/sec\n",
1868 p_link->min_pf_rate);
1873 /* Main API to configure PF min bandwidth where bw range is [1-100] */
1874 int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
1876 int i, rc = -EINVAL;
1878 if (min_bw < 1 || min_bw > 100) {
1879 DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
1883 for_each_hwfn(cdev, i) {
1884 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1885 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
1886 struct qed_mcp_link_state *p_link;
1887 struct qed_ptt *p_ptt;
1889 p_link = &p_lead->mcp_info->link_output;
1891 p_ptt = qed_ptt_acquire(p_hwfn);
1895 rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1898 qed_ptt_release(p_hwfn, p_ptt);
1902 if (p_link->min_pf_rate) {
1903 u32 min_rate = p_link->min_pf_rate;
1905 rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
1910 qed_ptt_release(p_hwfn, p_ptt);