1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
25 #include "qed_dev_api.h"
28 #include "qed_init_ops.h"
31 #include "qed_reg_addr.h"
34 /* API common to all protocols */
36 BAR_ID_0, /* used for GRC */
37 BAR_ID_1 /* Used for doorbells */
40 static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
43 u32 bar_reg = (bar_id == BAR_ID_0 ?
44 PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
45 u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
48 return 1 << (val + 15);
50 /* Old MFW initialized above registered only conditionally */
51 if (p_hwfn->cdev->num_hwfns > 1) {
53 "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
54 return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
57 "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
62 void qed_init_dp(struct qed_dev *cdev,
63 u32 dp_module, u8 dp_level)
67 cdev->dp_level = dp_level;
68 cdev->dp_module = dp_module;
69 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
70 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
72 p_hwfn->dp_level = dp_level;
73 p_hwfn->dp_module = dp_module;
77 void qed_init_struct(struct qed_dev *cdev)
81 for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
82 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
86 p_hwfn->b_active = false;
88 mutex_init(&p_hwfn->dmae_info.mutex);
91 /* hwfn 0 is always active */
92 cdev->hwfns[0].b_active = true;
94 /* set the default cache alignment to 128 */
95 cdev->cache_shift = 7;
98 static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
100 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
102 kfree(qm_info->qm_pq_params);
103 qm_info->qm_pq_params = NULL;
104 kfree(qm_info->qm_vport_params);
105 qm_info->qm_vport_params = NULL;
106 kfree(qm_info->qm_port_params);
107 qm_info->qm_port_params = NULL;
110 void qed_resc_free(struct qed_dev *cdev)
114 kfree(cdev->fw_data);
115 cdev->fw_data = NULL;
117 kfree(cdev->reset_stats);
119 for_each_hwfn(cdev, i) {
120 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
122 kfree(p_hwfn->p_tx_cids);
123 p_hwfn->p_tx_cids = NULL;
124 kfree(p_hwfn->p_rx_cids);
125 p_hwfn->p_rx_cids = NULL;
128 for_each_hwfn(cdev, i) {
129 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
131 qed_cxt_mngr_free(p_hwfn);
132 qed_qm_info_free(p_hwfn);
133 qed_spq_free(p_hwfn);
134 qed_eq_free(p_hwfn, p_hwfn->p_eq);
135 qed_consq_free(p_hwfn, p_hwfn->p_consq);
136 qed_int_free(p_hwfn);
137 qed_dmae_info_free(p_hwfn);
141 static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
143 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
144 struct init_qm_port_params *p_qm_port;
145 u8 num_vports, i, vport_id, num_ports;
146 u16 num_pqs, multi_cos_tcs = 1;
148 memset(qm_info, 0, sizeof(*qm_info));
150 num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
151 num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
153 /* Sanity checking that setup requires legal number of resources */
154 if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
156 "Need too many Physical queues - 0x%04x when only %04x are available\n",
157 num_pqs, RESC_NUM(p_hwfn, QED_PQ));
161 /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
163 qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
164 num_pqs, GFP_KERNEL);
165 if (!qm_info->qm_pq_params)
168 qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
169 num_vports, GFP_KERNEL);
170 if (!qm_info->qm_vport_params)
173 qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
174 MAX_NUM_PORTS, GFP_KERNEL);
175 if (!qm_info->qm_port_params)
178 vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
180 /* First init per-TC PQs */
181 for (i = 0; i < multi_cos_tcs; i++) {
182 struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
184 params->vport_id = vport_id;
185 params->tc_id = p_hwfn->hw_info.non_offload_tc;
186 params->wrr_group = 1;
189 /* Then init pure-LB PQ */
190 qm_info->pure_lb_pq = i;
191 qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
192 qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
193 qm_info->qm_pq_params[i].wrr_group = 1;
196 qm_info->offload_pq = 0;
197 qm_info->num_pqs = num_pqs;
198 qm_info->num_vports = num_vports;
200 /* Initialize qm port parameters */
201 num_ports = p_hwfn->cdev->num_ports_in_engines;
202 for (i = 0; i < num_ports; i++) {
203 p_qm_port = &qm_info->qm_port_params[i];
204 p_qm_port->active = 1;
205 p_qm_port->num_active_phys_tcs = 4;
206 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
207 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
210 qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
212 qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
214 qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
218 qm_info->vport_rl_en = 1;
223 DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
224 kfree(qm_info->qm_pq_params);
225 kfree(qm_info->qm_vport_params);
226 kfree(qm_info->qm_port_params);
231 int qed_resc_alloc(struct qed_dev *cdev)
233 struct qed_consq *p_consq;
237 cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
241 /* Allocate Memory for the Queue->CID mapping */
242 for_each_hwfn(cdev, i) {
243 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
244 int tx_size = sizeof(struct qed_hw_cid_data) *
245 RESC_NUM(p_hwfn, QED_L2_QUEUE);
246 int rx_size = sizeof(struct qed_hw_cid_data) *
247 RESC_NUM(p_hwfn, QED_L2_QUEUE);
249 p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
250 if (!p_hwfn->p_tx_cids) {
252 "Failed to allocate memory for Tx Cids\n");
257 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
258 if (!p_hwfn->p_rx_cids) {
260 "Failed to allocate memory for Rx Cids\n");
266 for_each_hwfn(cdev, i) {
267 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
269 /* First allocate the context manager structure */
270 rc = qed_cxt_mngr_alloc(p_hwfn);
274 /* Set the HW cid/tid numbers (in the contest manager)
275 * Must be done prior to any further computations.
277 rc = qed_cxt_set_pf_params(p_hwfn);
281 /* Prepare and process QM requirements */
282 rc = qed_init_qm_info(p_hwfn);
286 /* Compute the ILT client partition */
287 rc = qed_cxt_cfg_ilt_compute(p_hwfn);
291 /* CID map / ILT shadow table / T2
292 * The talbes sizes are determined by the computations above
294 rc = qed_cxt_tables_alloc(p_hwfn);
298 /* SPQ, must follow ILT because initializes SPQ context */
299 rc = qed_spq_alloc(p_hwfn);
303 /* SP status block allocation */
304 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
307 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
312 p_eq = qed_eq_alloc(p_hwfn, 256);
319 p_consq = qed_consq_alloc(p_hwfn);
324 p_hwfn->p_consq = p_consq;
326 /* DMA info initialization */
327 rc = qed_dmae_info_alloc(p_hwfn);
330 "Failed to allocate memory for dmae_info structure\n");
335 cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
336 if (!cdev->reset_stats) {
337 DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
349 void qed_resc_setup(struct qed_dev *cdev)
353 for_each_hwfn(cdev, i) {
354 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
356 qed_cxt_mngr_setup(p_hwfn);
357 qed_spq_setup(p_hwfn);
358 qed_eq_setup(p_hwfn, p_hwfn->p_eq);
359 qed_consq_setup(p_hwfn, p_hwfn->p_consq);
361 /* Read shadow of current MFW mailbox */
362 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
363 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
364 p_hwfn->mcp_info->mfw_mb_cur,
365 p_hwfn->mcp_info->mfw_mb_length);
367 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
371 #define FINAL_CLEANUP_POLL_CNT (100)
372 #define FINAL_CLEANUP_POLL_TIME (10)
373 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
374 struct qed_ptt *p_ptt,
377 u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
380 addr = GTT_BAR0_MAP_REG_USDM_RAM +
381 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
383 command |= X_FINAL_CLEANUP_AGG_INT <<
384 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
385 command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
386 command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
387 command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
389 /* Make sure notification is not set before initiating final cleanup */
390 if (REG_RD(p_hwfn, addr)) {
393 "Unexpected; Found final cleanup notification before initiating final cleanup\n");
394 REG_WR(p_hwfn, addr, 0);
397 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
398 "Sending final cleanup for PFVF[%d] [Command %08x\n]",
401 qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
403 /* Poll until completion */
404 while (!REG_RD(p_hwfn, addr) && count--)
405 msleep(FINAL_CLEANUP_POLL_TIME);
407 if (REG_RD(p_hwfn, addr))
411 "Failed to receive FW final cleanup notification\n");
413 /* Cleanup afterwards */
414 REG_WR(p_hwfn, addr, 0);
419 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
423 hw_mode = (1 << MODE_BB_B0);
425 switch (p_hwfn->cdev->num_ports_in_engines) {
427 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
430 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
433 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
436 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
437 p_hwfn->cdev->num_ports_in_engines);
441 switch (p_hwfn->cdev->mf_mode) {
444 hw_mode |= 1 << MODE_MF_SI;
447 hw_mode |= 1 << MODE_MF_SD;
450 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
451 hw_mode |= 1 << MODE_MF_SI;
454 hw_mode |= 1 << MODE_ASIC;
456 p_hwfn->hw_info.hw_mode = hw_mode;
459 /* Init run time data for all PFs on an engine. */
460 static void qed_init_cau_rt_data(struct qed_dev *cdev)
462 u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
465 for_each_hwfn(cdev, i) {
466 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
467 struct qed_igu_info *p_igu_info;
468 struct qed_igu_block *p_block;
469 struct cau_sb_entry sb_entry;
471 p_igu_info = p_hwfn->hw_info.p_igu_info;
473 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
475 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
479 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
480 p_block->function_id,
482 STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
488 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
489 struct qed_ptt *p_ptt,
492 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
493 struct qed_qm_common_rt_init_params params;
494 struct qed_dev *cdev = p_hwfn->cdev;
497 qed_init_cau_rt_data(cdev);
499 /* Program GTT windows */
500 qed_gtt_init(p_hwfn);
502 if (p_hwfn->mcp_info) {
503 if (p_hwfn->mcp_info->func_info.bandwidth_max)
504 qm_info->pf_rl_en = 1;
505 if (p_hwfn->mcp_info->func_info.bandwidth_min)
506 qm_info->pf_wfq_en = 1;
509 memset(¶ms, 0, sizeof(params));
510 params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
511 params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
512 params.pf_rl_en = qm_info->pf_rl_en;
513 params.pf_wfq_en = qm_info->pf_wfq_en;
514 params.vport_rl_en = qm_info->vport_rl_en;
515 params.vport_wfq_en = qm_info->vport_wfq_en;
516 params.port_params = qm_info->qm_port_params;
518 qed_qm_common_rt_init(p_hwfn, ¶ms);
520 qed_cxt_hw_init_common(p_hwfn);
522 /* Close gate from NIG to BRB/Storm; By default they are open, but
523 * we close them to prevent NIG from passing data to reset blocks.
524 * Should have been done in the ENGINE phase, but init-tool lacks
525 * proper port-pretend capabilities.
527 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
528 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
529 qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
530 qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
531 qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
532 qed_port_unpretend(p_hwfn, p_ptt);
534 rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
538 qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
539 qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
541 /* Disable relaxed ordering in the PCI config space */
542 qed_wr(p_hwfn, p_ptt, 0x20b4,
543 qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
548 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
549 struct qed_ptt *p_ptt,
554 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
559 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
560 struct qed_ptt *p_ptt,
561 struct qed_tunn_start_params *p_tunn,
564 enum qed_int_mode int_mode,
565 bool allow_npar_tx_switch)
567 u8 rel_pf_id = p_hwfn->rel_pf_id;
570 if (p_hwfn->mcp_info) {
571 struct qed_mcp_function_info *p_info;
573 p_info = &p_hwfn->mcp_info->func_info;
574 if (p_info->bandwidth_min)
575 p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
577 /* Update rate limit once we'll actually have a link */
578 p_hwfn->qm_info.pf_rl = 100;
581 qed_cxt_hw_init_pf(p_hwfn);
583 qed_int_igu_init_rt(p_hwfn);
585 /* Set VLAN in NIG if needed */
586 if (hw_mode & (1 << MODE_MF_SD)) {
587 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
588 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
589 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
590 p_hwfn->hw_info.ovlan);
593 /* Enable classification by MAC if needed */
594 if (hw_mode & (1 << MODE_MF_SI)) {
595 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
596 "Configuring TAGMAC_CLS_TYPE\n");
598 NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
601 /* Protocl Configuration */
602 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
603 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
604 STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
606 /* Cleanup chip from previous driver if such remains exist */
607 rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
611 /* PF Init sequence */
612 rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
616 /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
617 rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
621 /* Pure runtime initializations - directly to the HW */
622 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
625 /* enable interrupts */
626 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
628 /* send function start command */
629 rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
631 DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
636 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
637 struct qed_ptt *p_ptt,
640 u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
642 /* Change PF in PXP */
643 qed_wr(p_hwfn, p_ptt,
644 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
646 /* wait until value is set - try for 1 second every 50us */
647 for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
648 val = qed_rd(p_hwfn, p_ptt,
649 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
653 usleep_range(50, 60);
656 if (val != set_val) {
658 "PFID_ENABLE_MASTER wasn't changed after a second\n");
665 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
666 struct qed_ptt *p_main_ptt)
668 /* Read shadow of current MFW mailbox */
669 qed_mcp_read_mb(p_hwfn, p_main_ptt);
670 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
671 p_hwfn->mcp_info->mfw_mb_cur,
672 p_hwfn->mcp_info->mfw_mb_length);
675 int qed_hw_init(struct qed_dev *cdev,
676 struct qed_tunn_start_params *p_tunn,
678 enum qed_int_mode int_mode,
679 bool allow_npar_tx_switch,
680 const u8 *bin_fw_data)
682 u32 load_code, param;
685 rc = qed_init_fw_data(cdev, bin_fw_data);
689 for_each_hwfn(cdev, i) {
690 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
692 /* Enable DMAE in PXP */
693 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
695 qed_calc_hw_mode(p_hwfn);
697 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
700 DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
704 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
706 DP_VERBOSE(p_hwfn, QED_MSG_SP,
707 "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
710 p_hwfn->first_on_engine = (load_code ==
711 FW_MSG_CODE_DRV_LOAD_ENGINE);
714 case FW_MSG_CODE_DRV_LOAD_ENGINE:
715 rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
716 p_hwfn->hw_info.hw_mode);
720 case FW_MSG_CODE_DRV_LOAD_PORT:
721 rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
722 p_hwfn->hw_info.hw_mode);
727 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
728 rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
729 p_tunn, p_hwfn->hw_info.hw_mode,
730 b_hw_start, int_mode,
731 allow_npar_tx_switch);
740 "init phase failed for loadcode 0x%x (rc %d)\n",
743 /* ACK mfw regardless of success or failure of initialization */
744 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
745 DRV_MSG_CODE_LOAD_DONE,
746 0, &load_code, ¶m);
750 DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
754 p_hwfn->hw_init_done = true;
760 #define QED_HW_STOP_RETRY_LIMIT (10)
761 static inline void qed_hw_timers_stop(struct qed_dev *cdev,
762 struct qed_hwfn *p_hwfn,
763 struct qed_ptt *p_ptt)
768 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
769 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
771 for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
772 if ((!qed_rd(p_hwfn, p_ptt,
773 TM_REG_PF_SCAN_ACTIVE_CONN)) &&
774 (!qed_rd(p_hwfn, p_ptt,
775 TM_REG_PF_SCAN_ACTIVE_TASK)))
778 /* Dependent on number of connection/tasks, possibly
779 * 1ms sleep is required between polls
781 usleep_range(1000, 2000);
784 if (i < QED_HW_STOP_RETRY_LIMIT)
788 "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
789 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
790 (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
793 void qed_hw_timers_stop_all(struct qed_dev *cdev)
797 for_each_hwfn(cdev, j) {
798 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
799 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
801 qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
805 int qed_hw_stop(struct qed_dev *cdev)
810 for_each_hwfn(cdev, j) {
811 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
812 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
814 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
816 /* mark the hw as uninitialized... */
817 p_hwfn->hw_init_done = false;
819 rc = qed_sp_pf_stop(p_hwfn);
822 "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
824 qed_wr(p_hwfn, p_ptt,
825 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
827 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
828 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
829 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
830 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
831 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
833 qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
835 /* Disable Attention Generation */
836 qed_int_igu_disable_int(p_hwfn, p_ptt);
838 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
839 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
841 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
843 /* Need to wait 1ms to guarantee SBs are cleared */
844 usleep_range(1000, 2000);
847 /* Disable DMAE in PXP - in CMT, this should only be done for
848 * first hw-function, and only after all transactions have
849 * stopped for all active hw-functions.
851 t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
852 cdev->hwfns[0].p_main_ptt,
860 void qed_hw_stop_fastpath(struct qed_dev *cdev)
864 for_each_hwfn(cdev, j) {
865 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
866 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
870 "Shutting down the fastpath\n");
872 qed_wr(p_hwfn, p_ptt,
873 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
875 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
876 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
877 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
878 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
879 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
881 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
883 /* Need to wait 1ms to guarantee SBs are cleared */
884 usleep_range(1000, 2000);
888 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
890 /* Re-open incoming traffic */
891 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
892 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
895 static int qed_reg_assert(struct qed_hwfn *hwfn,
896 struct qed_ptt *ptt, u32 reg,
899 u32 assert_val = qed_rd(hwfn, ptt, reg);
901 if (assert_val != expected) {
902 DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
910 int qed_hw_reset(struct qed_dev *cdev)
913 u32 unload_resp, unload_param;
916 for_each_hwfn(cdev, i) {
917 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
919 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
921 /* Check for incorrect states */
922 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
923 QM_REG_USG_CNT_PF_TX, 0);
924 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
925 QM_REG_USG_CNT_PF_OTHER, 0);
927 /* Disable PF in HW blocks */
928 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
929 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
930 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
931 TCFC_REG_STRONG_ENABLE_PF, 0);
932 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
933 CCFC_REG_STRONG_ENABLE_PF, 0);
935 /* Send unload command to MCP */
936 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
937 DRV_MSG_CODE_UNLOAD_REQ,
938 DRV_MB_PARAM_UNLOAD_WOL_MCP,
939 &unload_resp, &unload_param);
941 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
942 unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
945 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
946 DRV_MSG_CODE_UNLOAD_DONE,
947 0, &unload_resp, &unload_param);
949 DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
957 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
958 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
960 qed_ptt_pool_free(p_hwfn);
961 kfree(p_hwfn->hw_info.p_igu_info);
964 /* Setup bar access */
965 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
967 /* clear indirect access */
968 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
969 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
970 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
971 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
973 /* Clean Previous errors if such exist */
974 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
975 PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
976 1 << p_hwfn->abs_pf_id);
978 /* enable internal target-read */
979 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
980 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
983 static void get_function_id(struct qed_hwfn *p_hwfn)
986 p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
988 p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
990 p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
991 p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
992 PXP_CONCRETE_FID_PFID);
993 p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
994 PXP_CONCRETE_FID_PORT);
997 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
999 u32 *feat_num = p_hwfn->hw_info.feat_num;
1000 int num_features = 1;
1002 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1004 RESC_NUM(p_hwfn, QED_L2_QUEUE));
1005 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1006 "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1007 feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
1011 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1013 u32 *resc_start = p_hwfn->hw_info.resc_start;
1014 u32 *resc_num = p_hwfn->hw_info.resc_num;
1015 struct qed_sb_cnt_info sb_cnt_info;
1018 num_funcs = MAX_NUM_PFS_BB;
1020 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
1021 qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
1023 resc_num[QED_SB] = min_t(u32,
1024 (MAX_SB_PER_PATH_BB / num_funcs),
1025 sb_cnt_info.sb_cnt);
1026 resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1027 resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
1028 resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1029 resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1030 resc_num[QED_RL] = 8;
1031 resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1032 resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1034 resc_num[QED_ILT] = 950;
1036 for (i = 0; i < QED_MAX_RESC; i++)
1037 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1039 qed_hw_set_feat(p_hwfn);
1041 DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1042 "The numbers for each resource are:\n"
1043 "SB = %d start = %d\n"
1044 "L2_QUEUE = %d start = %d\n"
1045 "VPORT = %d start = %d\n"
1046 "PQ = %d start = %d\n"
1047 "RL = %d start = %d\n"
1048 "MAC = %d start = %d\n"
1049 "VLAN = %d start = %d\n"
1050 "ILT = %d start = %d\n",
1051 p_hwfn->hw_info.resc_num[QED_SB],
1052 p_hwfn->hw_info.resc_start[QED_SB],
1053 p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
1054 p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1055 p_hwfn->hw_info.resc_num[QED_VPORT],
1056 p_hwfn->hw_info.resc_start[QED_VPORT],
1057 p_hwfn->hw_info.resc_num[QED_PQ],
1058 p_hwfn->hw_info.resc_start[QED_PQ],
1059 p_hwfn->hw_info.resc_num[QED_RL],
1060 p_hwfn->hw_info.resc_start[QED_RL],
1061 p_hwfn->hw_info.resc_num[QED_MAC],
1062 p_hwfn->hw_info.resc_start[QED_MAC],
1063 p_hwfn->hw_info.resc_num[QED_VLAN],
1064 p_hwfn->hw_info.resc_start[QED_VLAN],
1065 p_hwfn->hw_info.resc_num[QED_ILT],
1066 p_hwfn->hw_info.resc_start[QED_ILT]);
1069 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1070 struct qed_ptt *p_ptt)
1072 u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1073 u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1074 struct qed_mcp_link_params *link;
1076 /* Read global nvm_cfg address */
1077 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1079 /* Verify MCP has initialized it */
1080 if (!nvm_cfg_addr) {
1081 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1085 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
1086 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1088 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1089 offsetof(struct nvm_cfg1, glob) +
1090 offsetof(struct nvm_cfg1_glob, core_cfg);
1092 core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1094 switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1095 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1096 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1097 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1099 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1100 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1102 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1103 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1105 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1106 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1108 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1109 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1111 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1112 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1114 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1115 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1117 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1118 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1120 case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1121 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1124 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1129 /* Read default link configuration */
1130 link = &p_hwfn->mcp_info->link_input;
1131 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1132 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1133 link_temp = qed_rd(p_hwfn, p_ptt,
1135 offsetof(struct nvm_cfg1_port, speed_cap_mask));
1136 link->speed.advertised_speeds =
1137 link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1139 p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1140 link->speed.advertised_speeds;
1142 link_temp = qed_rd(p_hwfn, p_ptt,
1144 offsetof(struct nvm_cfg1_port, link_settings));
1145 switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1146 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1147 case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1148 link->speed.autoneg = true;
1150 case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1151 link->speed.forced_speed = 1000;
1153 case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1154 link->speed.forced_speed = 10000;
1156 case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1157 link->speed.forced_speed = 25000;
1159 case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1160 link->speed.forced_speed = 40000;
1162 case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1163 link->speed.forced_speed = 50000;
1165 case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1166 link->speed.forced_speed = 100000;
1169 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1173 link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1174 link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1175 link->pause.autoneg = !!(link_temp &
1176 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1177 link->pause.forced_rx = !!(link_temp &
1178 NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1179 link->pause.forced_tx = !!(link_temp &
1180 NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1181 link->loopback_mode = 0;
1183 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1184 "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1185 link->speed.forced_speed, link->speed.advertised_speeds,
1186 link->speed.autoneg, link->pause.autoneg);
1188 /* Read Multi-function information from shmem */
1189 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1190 offsetof(struct nvm_cfg1, glob) +
1191 offsetof(struct nvm_cfg1_glob, generic_cont0);
1193 generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1195 mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1196 NVM_CFG1_GLOB_MF_MODE_OFFSET;
1199 case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1200 p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1202 case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1203 p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1205 case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1206 p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1209 DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1210 p_hwfn->cdev->mf_mode);
1212 /* Read Multi-function information from shmem */
1213 addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1214 offsetof(struct nvm_cfg1, glob) +
1215 offsetof(struct nvm_cfg1_glob, device_capabilities);
1217 device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1218 if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1219 __set_bit(QED_DEV_CAP_ETH,
1220 &p_hwfn->hw_info.device_capabilities);
1222 return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1226 qed_get_hw_info(struct qed_hwfn *p_hwfn,
1227 struct qed_ptt *p_ptt,
1228 enum qed_pci_personality personality)
1233 /* Read the port mode */
1234 port_mode = qed_rd(p_hwfn, p_ptt,
1235 CNIG_REG_NW_PORT_MODE_BB_B0);
1237 if (port_mode < 3) {
1238 p_hwfn->cdev->num_ports_in_engines = 1;
1239 } else if (port_mode <= 5) {
1240 p_hwfn->cdev->num_ports_in_engines = 2;
1242 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1243 p_hwfn->cdev->num_ports_in_engines);
1245 /* Default num_ports_in_engines to something */
1246 p_hwfn->cdev->num_ports_in_engines = 1;
1249 qed_hw_get_nvm_info(p_hwfn, p_ptt);
1251 rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1255 if (qed_mcp_is_init(p_hwfn))
1256 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1257 p_hwfn->mcp_info->func_info.mac);
1259 eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1261 if (qed_mcp_is_init(p_hwfn)) {
1262 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1263 p_hwfn->hw_info.ovlan =
1264 p_hwfn->mcp_info->func_info.ovlan;
1266 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1269 if (qed_mcp_is_init(p_hwfn)) {
1270 enum qed_pci_personality protocol;
1272 protocol = p_hwfn->mcp_info->func_info.protocol;
1273 p_hwfn->hw_info.personality = protocol;
1276 qed_hw_get_resc(p_hwfn);
1281 static int qed_get_dev_info(struct qed_dev *cdev)
1283 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1286 /* Read Vendor Id / Device Id */
1287 pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1289 pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1291 cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1292 MISCS_REG_CHIP_NUM);
1293 cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1294 MISCS_REG_CHIP_REV);
1295 MASK_FIELD(CHIP_REV, cdev->chip_rev);
1297 cdev->type = QED_DEV_TYPE_BB;
1298 /* Learn number of HW-functions */
1299 tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1300 MISCS_REG_CMT_ENABLED_FOR_PAIR);
1302 if (tmp & (1 << p_hwfn->rel_pf_id)) {
1303 DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1304 cdev->num_hwfns = 2;
1306 cdev->num_hwfns = 1;
1309 cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1310 MISCS_REG_CHIP_TEST_REG) >> 4;
1311 MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1312 cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1313 MISCS_REG_CHIP_METAL);
1314 MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1316 DP_INFO(cdev->hwfns,
1317 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1318 cdev->chip_num, cdev->chip_rev,
1319 cdev->chip_bond_id, cdev->chip_metal);
1321 if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
1322 DP_NOTICE(cdev->hwfns,
1323 "The chip type/rev (BB A0) is not supported!\n");
1330 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1331 void __iomem *p_regview,
1332 void __iomem *p_doorbells,
1333 enum qed_pci_personality personality)
1337 /* Split PCI bars evenly between hwfns */
1338 p_hwfn->regview = p_regview;
1339 p_hwfn->doorbells = p_doorbells;
1341 /* Validate that chip access is feasible */
1342 if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1344 "Reading the ME register returns all Fs; Preventing further chip access\n");
1348 get_function_id(p_hwfn);
1350 /* Allocate PTT pool */
1351 rc = qed_ptt_pool_alloc(p_hwfn);
1353 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1357 /* Allocate the main PTT */
1358 p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
1360 /* First hwfn learns basic information, e.g., number of hwfns */
1361 if (!p_hwfn->my_id) {
1362 rc = qed_get_dev_info(p_hwfn->cdev);
1367 qed_hw_hwfn_prepare(p_hwfn);
1369 /* Initialize MCP structure */
1370 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1372 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1376 /* Read the device configuration information from the HW and SHMEM */
1377 rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1379 DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1383 /* Allocate the init RT array and initialize the init-ops engine */
1384 rc = qed_init_alloc(p_hwfn);
1386 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1392 qed_mcp_free(p_hwfn);
1394 qed_hw_hwfn_free(p_hwfn);
1399 int qed_hw_prepare(struct qed_dev *cdev,
1402 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1405 /* Store the precompiled init data ptrs */
1406 qed_init_iro_array(cdev);
1408 /* Initialize the first hwfn - will learn number of hwfns */
1409 rc = qed_hw_prepare_single(p_hwfn,
1411 cdev->doorbells, personality);
1415 personality = p_hwfn->hw_info.personality;
1417 /* Initialize the rest of the hwfns */
1418 if (cdev->num_hwfns > 1) {
1419 void __iomem *p_regview, *p_doorbell;
1422 /* adjust bar offset for second engine */
1423 addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
1426 /* adjust doorbell bar offset for second engine */
1427 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
1430 /* prepare second hw function */
1431 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1432 p_doorbell, personality);
1434 /* in case of error, need to free the previously
1435 * initiliazed hwfn 0.
1438 qed_init_free(p_hwfn);
1439 qed_mcp_free(p_hwfn);
1440 qed_hw_hwfn_free(p_hwfn);
1447 void qed_hw_remove(struct qed_dev *cdev)
1451 for_each_hwfn(cdev, i) {
1452 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1454 qed_init_free(p_hwfn);
1455 qed_hw_hwfn_free(p_hwfn);
1456 qed_mcp_free(p_hwfn);
1460 int qed_chain_alloc(struct qed_dev *cdev,
1461 enum qed_chain_use_mode intended_use,
1462 enum qed_chain_mode mode,
1465 struct qed_chain *p_chain)
1467 dma_addr_t p_pbl_phys = 0;
1468 void *p_pbl_virt = NULL;
1469 dma_addr_t p_phys = 0;
1470 void *p_virt = NULL;
1474 if (mode == QED_CHAIN_MODE_SINGLE)
1477 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1479 size = page_cnt * QED_CHAIN_PAGE_SIZE;
1480 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1481 size, &p_phys, GFP_KERNEL);
1483 DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1487 if (mode == QED_CHAIN_MODE_PBL) {
1488 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1489 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1493 DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1497 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1498 (u8)elem_size, intended_use,
1499 p_pbl_phys, p_pbl_virt);
1501 qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1502 (u8)elem_size, intended_use, mode);
1508 dma_free_coherent(&cdev->pdev->dev,
1509 page_cnt * QED_CHAIN_PAGE_SIZE,
1511 dma_free_coherent(&cdev->pdev->dev,
1512 page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1513 p_pbl_virt, p_pbl_phys);
1518 void qed_chain_free(struct qed_dev *cdev,
1519 struct qed_chain *p_chain)
1523 if (!p_chain->p_virt_addr)
1526 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1527 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1528 dma_free_coherent(&cdev->pdev->dev, size,
1529 p_chain->pbl.p_virt_table,
1530 p_chain->pbl.p_phys_table);
1533 size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1534 dma_free_coherent(&cdev->pdev->dev, size,
1535 p_chain->p_virt_addr,
1536 p_chain->p_phys_addr);
1539 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1540 u16 src_id, u16 *dst_id)
1542 if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1545 min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1546 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1548 "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1554 *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1559 int qed_fw_vport(struct qed_hwfn *p_hwfn,
1560 u8 src_id, u8 *dst_id)
1562 if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1565 min = (u8)RESC_START(p_hwfn, QED_VPORT);
1566 max = min + RESC_NUM(p_hwfn, QED_VPORT);
1568 "vport id [%d] is not valid, available indices [%d - %d]\n",
1574 *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1579 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1580 u8 src_id, u8 *dst_id)
1582 if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1585 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1586 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1588 "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1594 *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;