1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/delay.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/etherdevice.h>
48 #include "qed_reg_addr.h"
49 #include "qed_sriov.h"
51 #define CHIP_MCP_RESP_ITER_US 10
53 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
54 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
56 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
57 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
60 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
61 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
63 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
64 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
65 offsetof(struct public_drv_mb, _field), _val)
67 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
68 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
69 offsetof(struct public_drv_mb, _field))
71 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
72 DRV_ID_PDA_COMP_VER_SHIFT)
74 #define MCP_BYTES_PER_MBIT_SHIFT 17
76 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
78 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
83 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
85 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
87 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
89 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
91 DP_VERBOSE(p_hwfn, QED_MSG_SP,
92 "port_addr = 0x%x, port_id 0x%02x\n",
93 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
96 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
98 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
101 if (!p_hwfn->mcp_info->public_base)
104 for (i = 0; i < length; i++) {
105 tmp = qed_rd(p_hwfn, p_ptt,
106 p_hwfn->mcp_info->mfw_mb_addr +
107 (i << 2) + sizeof(u32));
109 /* The MB data is actually BE; Need to force it to cpu */
110 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
111 be32_to_cpu((__force __be32)tmp);
115 struct qed_mcp_cmd_elem {
116 struct list_head list;
117 struct qed_mcp_mb_params *p_mb_params;
118 u16 expected_seq_num;
122 /* Must be called while cmd_lock is acquired */
123 static struct qed_mcp_cmd_elem *
124 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
125 struct qed_mcp_mb_params *p_mb_params,
126 u16 expected_seq_num)
128 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
130 p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
134 p_cmd_elem->p_mb_params = p_mb_params;
135 p_cmd_elem->expected_seq_num = expected_seq_num;
136 list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
141 /* Must be called while cmd_lock is acquired */
142 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
143 struct qed_mcp_cmd_elem *p_cmd_elem)
145 list_del(&p_cmd_elem->list);
149 /* Must be called while cmd_lock is acquired */
150 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
153 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
155 list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
156 if (p_cmd_elem->expected_seq_num == seq_num)
163 int qed_mcp_free(struct qed_hwfn *p_hwfn)
165 if (p_hwfn->mcp_info) {
166 struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
168 kfree(p_hwfn->mcp_info->mfw_mb_cur);
169 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
171 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
172 list_for_each_entry_safe(p_cmd_elem,
174 &p_hwfn->mcp_info->cmd_list, list) {
175 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
177 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
180 kfree(p_hwfn->mcp_info);
181 p_hwfn->mcp_info = NULL;
186 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
188 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
189 u32 drv_mb_offsize, mfw_mb_offsize;
190 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
192 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
193 if (!p_info->public_base)
196 p_info->public_base |= GRCBASE_MCP;
198 /* Calculate the driver and MFW mailbox address */
199 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
200 SECTION_OFFSIZE_ADDR(p_info->public_base,
202 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
203 DP_VERBOSE(p_hwfn, QED_MSG_SP,
204 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
205 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
207 /* Set the MFW MB address */
208 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
209 SECTION_OFFSIZE_ADDR(p_info->public_base,
211 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
214 /* Get the current driver mailbox sequence before sending
217 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
218 DRV_MSG_SEQ_NUMBER_MASK;
220 /* Get current FW pulse sequence */
221 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
224 p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
229 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
231 struct qed_mcp_info *p_info;
234 /* Allocate mcp_info structure */
235 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
236 if (!p_hwfn->mcp_info)
238 p_info = p_hwfn->mcp_info;
240 /* Initialize the MFW spinlock */
241 spin_lock_init(&p_info->cmd_lock);
242 spin_lock_init(&p_info->link_lock);
244 INIT_LIST_HEAD(&p_info->cmd_list);
246 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
247 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
248 /* Do not free mcp_info here, since public_base indicate that
249 * the MCP is not initialized
254 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
255 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
256 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
257 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
263 qed_mcp_free(p_hwfn);
267 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
268 struct qed_ptt *p_ptt)
270 u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
272 /* Use MCP history register to check if MCP reset occurred between init
275 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
278 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
279 p_hwfn->mcp_info->mcp_hist, generic_por_0);
281 qed_load_mcp_offsets(p_hwfn, p_ptt);
282 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
286 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
288 u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
291 /* Ensure that only a single thread is accessing the mailbox */
292 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
294 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
296 /* Set drv command along with the updated sequence */
297 qed_mcp_reread_offsets(p_hwfn, p_ptt);
298 seq = ++p_hwfn->mcp_info->drv_mb_seq;
299 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
302 /* Wait for MFW response */
304 /* Give the FW up to 500 second (50*1000*10usec) */
305 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
306 MISCS_REG_GENERIC_POR_0)) &&
307 (cnt++ < QED_MCP_RESET_RETRIES));
309 if (org_mcp_reset_seq !=
310 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
311 DP_VERBOSE(p_hwfn, QED_MSG_SP,
312 "MCP was reset after %d usec\n", cnt * delay);
314 DP_ERR(p_hwfn, "Failed to reset MCP\n");
318 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
323 /* Must be called while cmd_lock is acquired */
324 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
326 struct qed_mcp_cmd_elem *p_cmd_elem;
328 /* There is at most one pending command at a certain time, and if it
329 * exists - it is placed at the HEAD of the list.
331 if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
332 p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
333 struct qed_mcp_cmd_elem, list);
334 return !p_cmd_elem->b_is_completed;
340 /* Must be called while cmd_lock is acquired */
342 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
344 struct qed_mcp_mb_params *p_mb_params;
345 struct qed_mcp_cmd_elem *p_cmd_elem;
349 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
350 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
352 /* Return if no new non-handled response has been received */
353 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
356 p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
359 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
364 p_mb_params = p_cmd_elem->p_mb_params;
366 /* Get the MFW response along with the sequence number */
367 p_mb_params->mcp_resp = mcp_resp;
369 /* Get the MFW param */
370 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
372 /* Get the union data */
373 if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
374 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
375 offsetof(struct public_drv_mb,
377 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
378 union_data_addr, p_mb_params->data_dst_size);
381 p_cmd_elem->b_is_completed = true;
386 /* Must be called while cmd_lock is acquired */
387 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
388 struct qed_ptt *p_ptt,
389 struct qed_mcp_mb_params *p_mb_params,
392 union drv_union_data union_data;
395 /* Set the union data */
396 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
397 offsetof(struct public_drv_mb, union_data);
398 memset(&union_data, 0, sizeof(union_data));
399 if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
400 memcpy(&union_data, p_mb_params->p_data_src,
401 p_mb_params->data_src_size);
402 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
405 /* Set the drv param */
406 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
408 /* Set the drv command along with the sequence number */
409 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
411 DP_VERBOSE(p_hwfn, QED_MSG_SP,
412 "MFW mailbox: command 0x%08x param 0x%08x\n",
413 (p_mb_params->cmd | seq_num), p_mb_params->param);
417 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
418 struct qed_ptt *p_ptt,
419 struct qed_mcp_mb_params *p_mb_params,
420 u32 max_retries, u32 delay)
422 struct qed_mcp_cmd_elem *p_cmd_elem;
427 /* Wait until the mailbox is non-occupied */
429 /* Exit the loop if there is no pending command, or if the
430 * pending command is completed during this iteration.
431 * The spinlock stays locked until the command is sent.
434 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
436 if (!qed_mcp_has_pending_cmd(p_hwfn))
439 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
442 else if (rc != -EAGAIN)
445 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
447 } while (++cnt < max_retries);
449 if (cnt >= max_retries) {
451 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
452 p_mb_params->cmd, p_mb_params->param);
456 /* Send the mailbox command */
457 qed_mcp_reread_offsets(p_hwfn, p_ptt);
458 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
459 p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
465 __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
466 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
468 /* Wait for the MFW response */
470 /* Exit the loop if the command is already completed, or if the
471 * command is completed during this iteration.
472 * The spinlock stays locked until the list element is removed.
476 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
478 if (p_cmd_elem->b_is_completed)
481 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
484 else if (rc != -EAGAIN)
487 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
488 } while (++cnt < max_retries);
490 if (cnt >= max_retries) {
492 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
493 p_mb_params->cmd, p_mb_params->param);
495 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
496 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
497 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
502 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
503 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
507 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
508 p_mb_params->mcp_resp,
509 p_mb_params->mcp_param,
510 (cnt * delay) / 1000, (cnt * delay) % 1000);
512 /* Clear the sequence number from the MFW response */
513 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
518 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
522 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
523 struct qed_ptt *p_ptt,
524 struct qed_mcp_mb_params *p_mb_params)
526 size_t union_data_size = sizeof(union drv_union_data);
527 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
528 u32 delay = CHIP_MCP_RESP_ITER_US;
530 /* MCP not initialized */
531 if (!qed_mcp_is_init(p_hwfn)) {
532 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
536 if (p_mb_params->data_src_size > union_data_size ||
537 p_mb_params->data_dst_size > union_data_size) {
539 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
540 p_mb_params->data_src_size,
541 p_mb_params->data_dst_size, union_data_size);
545 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
549 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
550 struct qed_ptt *p_ptt,
556 struct qed_mcp_mb_params mb_params;
559 memset(&mb_params, 0, sizeof(mb_params));
561 mb_params.param = param;
563 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
567 *o_mcp_resp = mb_params.mcp_resp;
568 *o_mcp_param = mb_params.mcp_param;
573 int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
574 struct qed_ptt *p_ptt,
578 u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
580 struct qed_mcp_mb_params mb_params;
583 memset(&mb_params, 0, sizeof(mb_params));
585 mb_params.param = param;
586 mb_params.p_data_src = i_buf;
587 mb_params.data_src_size = (u8)i_txn_size;
588 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
592 *o_mcp_resp = mb_params.mcp_resp;
593 *o_mcp_param = mb_params.mcp_param;
595 /* nvm_info needs to be updated */
596 p_hwfn->nvm_info.valid = false;
601 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
602 struct qed_ptt *p_ptt,
606 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
608 struct qed_mcp_mb_params mb_params;
609 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
612 memset(&mb_params, 0, sizeof(mb_params));
614 mb_params.param = param;
615 mb_params.p_data_dst = raw_data;
617 /* Use the maximal value since the actual one is part of the response */
618 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
620 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
624 *o_mcp_resp = mb_params.mcp_resp;
625 *o_mcp_param = mb_params.mcp_param;
627 *o_txn_size = *o_mcp_param;
628 memcpy(o_buf, raw_data, *o_txn_size);
634 qed_mcp_can_force_load(u8 drv_role,
636 enum qed_override_force_load override_force_load)
638 bool can_force_load = false;
640 switch (override_force_load) {
641 case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
642 can_force_load = true;
644 case QED_OVERRIDE_FORCE_LOAD_NEVER:
645 can_force_load = false;
648 can_force_load = (drv_role == DRV_ROLE_OS &&
649 exist_drv_role == DRV_ROLE_PREBOOT) ||
650 (drv_role == DRV_ROLE_KDUMP &&
651 exist_drv_role == DRV_ROLE_OS);
655 return can_force_load;
658 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
659 struct qed_ptt *p_ptt)
661 u32 resp = 0, param = 0;
664 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
668 "Failed to send cancel load request, rc = %d\n", rc);
673 #define CONFIG_QEDE_BITMAP_IDX BIT(0)
674 #define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
675 #define CONFIG_QEDR_BITMAP_IDX BIT(2)
676 #define CONFIG_QEDF_BITMAP_IDX BIT(4)
677 #define CONFIG_QEDI_BITMAP_IDX BIT(5)
678 #define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
680 static u32 qed_get_config_bitmap(void)
682 u32 config_bitmap = 0x0;
684 if (IS_ENABLED(CONFIG_QEDE))
685 config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
687 if (IS_ENABLED(CONFIG_QED_SRIOV))
688 config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
690 if (IS_ENABLED(CONFIG_QED_RDMA))
691 config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
693 if (IS_ENABLED(CONFIG_QED_FCOE))
694 config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
696 if (IS_ENABLED(CONFIG_QED_ISCSI))
697 config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
699 if (IS_ENABLED(CONFIG_QED_LL2))
700 config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
702 return config_bitmap;
705 struct qed_load_req_in_params {
707 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0
708 #define QED_LOAD_REQ_HSI_VER_1 1
715 bool avoid_eng_reset;
718 struct qed_load_req_out_params {
729 __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
730 struct qed_ptt *p_ptt,
731 struct qed_load_req_in_params *p_in_params,
732 struct qed_load_req_out_params *p_out_params)
734 struct qed_mcp_mb_params mb_params;
735 struct load_req_stc load_req;
736 struct load_rsp_stc load_rsp;
740 memset(&load_req, 0, sizeof(load_req));
741 load_req.drv_ver_0 = p_in_params->drv_ver_0;
742 load_req.drv_ver_1 = p_in_params->drv_ver_1;
743 load_req.fw_ver = p_in_params->fw_ver;
744 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
745 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
746 p_in_params->timeout_val);
747 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
748 p_in_params->force_cmd);
749 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
750 p_in_params->avoid_eng_reset);
752 hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
753 DRV_ID_MCP_HSI_VER_CURRENT :
754 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
756 memset(&mb_params, 0, sizeof(mb_params));
757 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
758 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
759 mb_params.p_data_src = &load_req;
760 mb_params.data_src_size = sizeof(load_req);
761 mb_params.p_data_dst = &load_rsp;
762 mb_params.data_dst_size = sizeof(load_rsp);
764 DP_VERBOSE(p_hwfn, QED_MSG_SP,
765 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
767 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
768 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
769 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
770 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
772 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
773 DP_VERBOSE(p_hwfn, QED_MSG_SP,
774 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
779 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
780 QED_MFW_GET_FIELD(load_req.misc0,
782 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
783 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
786 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
788 DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
792 DP_VERBOSE(p_hwfn, QED_MSG_SP,
793 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
794 p_out_params->load_code = mb_params.mcp_resp;
796 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
797 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
800 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
805 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
806 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
807 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
809 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
810 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
811 p_out_params->exist_fw_ver = load_rsp.fw_ver;
812 p_out_params->exist_drv_role =
813 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
814 p_out_params->mfw_hsi_ver =
815 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
816 p_out_params->drv_exists =
817 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
818 LOAD_RSP_FLAGS0_DRV_EXISTS;
824 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
825 enum qed_drv_role drv_role,
829 case QED_DRV_ROLE_OS:
830 *p_mfw_drv_role = DRV_ROLE_OS;
832 case QED_DRV_ROLE_KDUMP:
833 *p_mfw_drv_role = DRV_ROLE_KDUMP;
836 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
843 enum qed_load_req_force {
844 QED_LOAD_REQ_FORCE_NONE,
845 QED_LOAD_REQ_FORCE_PF,
846 QED_LOAD_REQ_FORCE_ALL,
849 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
851 enum qed_load_req_force force_cmd,
855 case QED_LOAD_REQ_FORCE_NONE:
856 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
858 case QED_LOAD_REQ_FORCE_PF:
859 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
861 case QED_LOAD_REQ_FORCE_ALL:
862 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
867 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
868 struct qed_ptt *p_ptt,
869 struct qed_load_req_params *p_params)
871 struct qed_load_req_out_params out_params;
872 struct qed_load_req_in_params in_params;
873 u8 mfw_drv_role, mfw_force_cmd;
876 memset(&in_params, 0, sizeof(in_params));
877 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
878 in_params.drv_ver_0 = QED_VERSION;
879 in_params.drv_ver_1 = qed_get_config_bitmap();
880 in_params.fw_ver = STORM_FW_VERSION;
881 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
885 in_params.drv_role = mfw_drv_role;
886 in_params.timeout_val = p_params->timeout_val;
887 qed_get_mfw_force_cmd(p_hwfn,
888 QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
890 in_params.force_cmd = mfw_force_cmd;
891 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
893 memset(&out_params, 0, sizeof(out_params));
894 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
898 /* First handle cases where another load request should/might be sent:
899 * - MFW expects the old interface [HSI version = 1]
900 * - MFW responds that a force load request is required
902 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
904 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
906 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
907 memset(&out_params, 0, sizeof(out_params));
908 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
911 } else if (out_params.load_code ==
912 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
913 if (qed_mcp_can_force_load(in_params.drv_role,
914 out_params.exist_drv_role,
915 p_params->override_force_load)) {
917 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
918 in_params.drv_role, in_params.fw_ver,
919 in_params.drv_ver_0, in_params.drv_ver_1,
920 out_params.exist_drv_role,
921 out_params.exist_fw_ver,
922 out_params.exist_drv_ver_0,
923 out_params.exist_drv_ver_1);
925 qed_get_mfw_force_cmd(p_hwfn,
926 QED_LOAD_REQ_FORCE_ALL,
929 in_params.force_cmd = mfw_force_cmd;
930 memset(&out_params, 0, sizeof(out_params));
931 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
937 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
938 in_params.drv_role, in_params.fw_ver,
939 in_params.drv_ver_0, in_params.drv_ver_1,
940 out_params.exist_drv_role,
941 out_params.exist_fw_ver,
942 out_params.exist_drv_ver_0,
943 out_params.exist_drv_ver_1);
945 "Avoid sending a force load request to prevent disruption of active PFs\n");
947 qed_mcp_cancel_load_req(p_hwfn, p_ptt);
952 /* Now handle the other types of responses.
953 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
954 * expected here after the additional revised load requests were sent.
956 switch (out_params.load_code) {
957 case FW_MSG_CODE_DRV_LOAD_ENGINE:
958 case FW_MSG_CODE_DRV_LOAD_PORT:
959 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
960 if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
961 out_params.drv_exists) {
962 /* The role and fw/driver version match, but the PF is
963 * already loaded and has not been unloaded gracefully.
966 "PF is already loaded\n");
972 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
973 out_params.load_code);
977 p_params->load_code = out_params.load_code;
982 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
984 u32 wol_param, mcp_resp, mcp_param;
986 switch (p_hwfn->cdev->wol_config) {
987 case QED_OV_WOL_DISABLED:
988 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
990 case QED_OV_WOL_ENABLED:
991 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
995 "Unknown WoL configuration %02x\n",
996 p_hwfn->cdev->wol_config);
998 case QED_OV_WOL_DEFAULT:
999 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1002 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1003 &mcp_resp, &mcp_param);
1006 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1008 struct qed_mcp_mb_params mb_params;
1009 struct mcp_mac wol_mac;
1011 memset(&mb_params, 0, sizeof(mb_params));
1012 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1014 /* Set the primary MAC if WoL is enabled */
1015 if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1016 u8 *p_mac = p_hwfn->cdev->wol_mac;
1018 memset(&wol_mac, 0, sizeof(wol_mac));
1019 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1020 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1021 p_mac[4] << 8 | p_mac[5];
1024 (QED_MSG_SP | NETIF_MSG_IFDOWN),
1025 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1026 p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1028 mb_params.p_data_src = &wol_mac;
1029 mb_params.data_src_size = sizeof(wol_mac);
1032 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1035 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1036 struct qed_ptt *p_ptt)
1038 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1040 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1041 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1042 QED_PATH_ID(p_hwfn));
1043 u32 disabled_vfs[VF_MAX_STATIC / 32];
1048 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1049 mfw_path_offsize, path_addr);
1051 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1052 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1054 offsetof(struct public_path,
1057 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1058 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1059 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1062 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1063 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1066 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1067 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1069 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1071 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1072 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1074 struct qed_mcp_mb_params mb_params;
1078 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1079 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1080 "Acking VFs [%08x,...,%08x] - %08x\n",
1081 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1083 memset(&mb_params, 0, sizeof(mb_params));
1084 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1085 mb_params.p_data_src = vfs_to_ack;
1086 mb_params.data_src_size = VF_MAX_STATIC / 8;
1087 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1089 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1093 /* Clear the ACK bits */
1094 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1095 qed_wr(p_hwfn, p_ptt,
1097 offsetof(struct public_func, drv_ack_vf_disabled) +
1098 i * sizeof(u32), 0);
1103 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1104 struct qed_ptt *p_ptt)
1106 u32 transceiver_state;
1108 transceiver_state = qed_rd(p_hwfn, p_ptt,
1109 p_hwfn->mcp_info->port_addr +
1110 offsetof(struct public_port,
1114 (NETIF_MSG_HW | QED_MSG_SP),
1115 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1117 (u32)(p_hwfn->mcp_info->port_addr +
1118 offsetof(struct public_port, transceiver_data)));
1120 transceiver_state = GET_FIELD(transceiver_state,
1121 ETH_TRANSCEIVER_STATE);
1123 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1124 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1126 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1129 static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1130 struct qed_ptt *p_ptt,
1131 struct qed_mcp_link_state *p_link)
1133 u32 eee_status, val;
1135 p_link->eee_adv_caps = 0;
1136 p_link->eee_lp_adv_caps = 0;
1137 eee_status = qed_rd(p_hwfn,
1139 p_hwfn->mcp_info->port_addr +
1140 offsetof(struct public_port, eee_status));
1141 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1142 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1143 if (val & EEE_1G_ADV)
1144 p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1145 if (val & EEE_10G_ADV)
1146 p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1147 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1148 if (val & EEE_1G_ADV)
1149 p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1150 if (val & EEE_10G_ADV)
1151 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1154 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1155 struct qed_ptt *p_ptt, bool b_reset)
1157 struct qed_mcp_link_state *p_link;
1161 /* Prevent SW/attentions from doing this at the same time */
1162 spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1164 p_link = &p_hwfn->mcp_info->link_output;
1165 memset(p_link, 0, sizeof(*p_link));
1167 status = qed_rd(p_hwfn, p_ptt,
1168 p_hwfn->mcp_info->port_addr +
1169 offsetof(struct public_port, link_status));
1170 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1171 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1173 (u32)(p_hwfn->mcp_info->port_addr +
1174 offsetof(struct public_port, link_status)));
1176 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1177 "Resetting link indications\n");
1181 if (p_hwfn->b_drv_link_init)
1182 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1184 p_link->link_up = false;
1186 p_link->full_duplex = true;
1187 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1188 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1189 p_link->speed = 100000;
1191 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1192 p_link->speed = 50000;
1194 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1195 p_link->speed = 40000;
1197 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1198 p_link->speed = 25000;
1200 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1201 p_link->speed = 20000;
1203 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1204 p_link->speed = 10000;
1206 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1207 p_link->full_duplex = false;
1209 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1210 p_link->speed = 1000;
1216 if (p_link->link_up && p_link->speed)
1217 p_link->line_speed = p_link->speed;
1219 p_link->line_speed = 0;
1221 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1222 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1224 /* Max bandwidth configuration */
1225 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1227 /* Min bandwidth configuration */
1228 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1229 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1230 p_link->min_pf_rate);
1232 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1233 p_link->an_complete = !!(status &
1234 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1235 p_link->parallel_detection = !!(status &
1236 LINK_STATUS_PARALLEL_DETECTION_USED);
1237 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1239 p_link->partner_adv_speed |=
1240 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1241 QED_LINK_PARTNER_SPEED_1G_FD : 0;
1242 p_link->partner_adv_speed |=
1243 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1244 QED_LINK_PARTNER_SPEED_1G_HD : 0;
1245 p_link->partner_adv_speed |=
1246 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1247 QED_LINK_PARTNER_SPEED_10G : 0;
1248 p_link->partner_adv_speed |=
1249 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1250 QED_LINK_PARTNER_SPEED_20G : 0;
1251 p_link->partner_adv_speed |=
1252 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1253 QED_LINK_PARTNER_SPEED_25G : 0;
1254 p_link->partner_adv_speed |=
1255 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1256 QED_LINK_PARTNER_SPEED_40G : 0;
1257 p_link->partner_adv_speed |=
1258 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1259 QED_LINK_PARTNER_SPEED_50G : 0;
1260 p_link->partner_adv_speed |=
1261 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1262 QED_LINK_PARTNER_SPEED_100G : 0;
1264 p_link->partner_tx_flow_ctrl_en =
1265 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1266 p_link->partner_rx_flow_ctrl_en =
1267 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1269 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1270 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1271 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1273 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1274 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1276 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1277 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1280 p_link->partner_adv_pause = 0;
1283 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1285 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1286 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1288 qed_link_update(p_hwfn);
1290 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1293 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1295 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1296 struct qed_mcp_mb_params mb_params;
1297 struct eth_phy_cfg phy_cfg;
1301 /* Set the shmem configuration according to params */
1302 memset(&phy_cfg, 0, sizeof(phy_cfg));
1303 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1304 if (!params->speed.autoneg)
1305 phy_cfg.speed = params->speed.forced_speed;
1306 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1307 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1308 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1309 phy_cfg.adv_speed = params->speed.advertised_speeds;
1310 phy_cfg.loopback_mode = params->loopback_mode;
1311 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
1312 if (params->eee.enable)
1313 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1314 if (params->eee.tx_lpi_enable)
1315 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1316 if (params->eee.adv_caps & QED_EEE_1G_ADV)
1317 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1318 if (params->eee.adv_caps & QED_EEE_10G_ADV)
1319 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1320 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1321 EEE_TX_TIMER_USEC_OFFSET) &
1322 EEE_TX_TIMER_USEC_MASK;
1325 p_hwfn->b_drv_link_init = b_up;
1328 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1329 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1333 phy_cfg.loopback_mode,
1334 phy_cfg.feature_config_flags);
1336 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1337 "Resetting link\n");
1340 memset(&mb_params, 0, sizeof(mb_params));
1341 mb_params.cmd = cmd;
1342 mb_params.p_data_src = &phy_cfg;
1343 mb_params.data_src_size = sizeof(phy_cfg);
1344 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1346 /* if mcp fails to respond we must abort */
1348 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1352 /* Mimic link-change attention, done for several reasons:
1353 * - On reset, there's no guarantee MFW would trigger
1355 * - On initialization, older MFWs might not indicate link change
1356 * during LFA, so we'll never get an UP indication.
1358 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1363 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1364 struct qed_ptt *p_ptt,
1365 enum MFW_DRV_MSG_TYPE type)
1367 enum qed_mcp_protocol_type stats_type;
1368 union qed_mcp_protocol_stats stats;
1369 struct qed_mcp_mb_params mb_params;
1373 case MFW_DRV_MSG_GET_LAN_STATS:
1374 stats_type = QED_MCP_LAN_STATS;
1375 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1377 case MFW_DRV_MSG_GET_FCOE_STATS:
1378 stats_type = QED_MCP_FCOE_STATS;
1379 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1381 case MFW_DRV_MSG_GET_ISCSI_STATS:
1382 stats_type = QED_MCP_ISCSI_STATS;
1383 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1385 case MFW_DRV_MSG_GET_RDMA_STATS:
1386 stats_type = QED_MCP_RDMA_STATS;
1387 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1390 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1394 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1396 memset(&mb_params, 0, sizeof(mb_params));
1397 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1398 mb_params.param = hsi_param;
1399 mb_params.p_data_src = &stats;
1400 mb_params.data_src_size = sizeof(stats);
1401 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1404 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1405 struct public_func *p_shmem_info)
1407 struct qed_mcp_function_info *p_info;
1409 p_info = &p_hwfn->mcp_info->func_info;
1411 p_info->bandwidth_min = (p_shmem_info->config &
1412 FUNC_MF_CFG_MIN_BW_MASK) >>
1413 FUNC_MF_CFG_MIN_BW_SHIFT;
1414 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1416 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1417 p_info->bandwidth_min);
1418 p_info->bandwidth_min = 1;
1421 p_info->bandwidth_max = (p_shmem_info->config &
1422 FUNC_MF_CFG_MAX_BW_MASK) >>
1423 FUNC_MF_CFG_MAX_BW_SHIFT;
1424 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1426 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1427 p_info->bandwidth_max);
1428 p_info->bandwidth_max = 100;
1432 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1433 struct qed_ptt *p_ptt,
1434 struct public_func *p_data, int pfid)
1436 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1438 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1439 u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1442 memset(p_data, 0, sizeof(*p_data));
1444 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1445 for (i = 0; i < size / sizeof(u32); i++)
1446 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1447 func_addr + (i << 2));
1451 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1453 struct qed_mcp_function_info *p_info;
1454 struct public_func shmem_info;
1455 u32 resp = 0, param = 0;
1457 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1459 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1461 p_info = &p_hwfn->mcp_info->func_info;
1463 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1464 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1466 /* Acknowledge the MFW */
1467 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1471 static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1473 struct public_func shmem_info;
1474 u32 resp = 0, param = 0;
1476 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1478 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1479 FUNC_MF_CFG_OV_STAG_MASK;
1480 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1481 if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
1482 (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
1483 qed_wr(p_hwfn, p_ptt,
1484 NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
1485 qed_sp_pf_update_stag(p_hwfn);
1488 /* Acknowledge the MFW */
1489 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1493 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1495 struct public_func shmem_info;
1498 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1501 memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1502 port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1503 offsetof(struct public_port, oem_cfg_port));
1504 val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1505 OEM_CFG_CHANNEL_TYPE_OFFSET;
1506 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1507 DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val);
1509 val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1510 if (val == OEM_CFG_SCHED_TYPE_ETS) {
1511 p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1512 } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1513 p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1515 p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1516 DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
1519 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1520 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1521 OEM_CFG_FUNC_TC_OFFSET;
1522 p_hwfn->ufp_info.tc = (u8)val;
1523 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1524 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1525 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1526 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1527 } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1528 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1530 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1531 DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
1535 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
1536 p_hwfn->ufp_info.mode,
1537 p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
1541 qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1543 qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1545 if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1546 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1547 p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
1549 qed_qm_reconf(p_hwfn, p_ptt);
1550 } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1551 /* Merge UFP TC with the dcbx TC data */
1552 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1553 QED_DCBX_OPERATIONAL_MIB);
1555 DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1559 /* update storm FW with negotiation results */
1560 qed_sp_pf_update_ufp(p_hwfn);
1562 /* update stag pcp value */
1563 qed_sp_pf_update_stag(p_hwfn);
1568 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1569 struct qed_ptt *p_ptt)
1571 struct qed_mcp_info *info = p_hwfn->mcp_info;
1576 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1578 /* Read Messages from MFW */
1579 qed_mcp_read_mb(p_hwfn, p_ptt);
1581 /* Compare current messages to old ones */
1582 for (i = 0; i < info->mfw_mb_length; i++) {
1583 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1588 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1589 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1590 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1593 case MFW_DRV_MSG_LINK_CHANGE:
1594 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1596 case MFW_DRV_MSG_VF_DISABLED:
1597 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1599 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1600 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1601 QED_DCBX_REMOTE_LLDP_MIB);
1603 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1604 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1605 QED_DCBX_REMOTE_MIB);
1607 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1608 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1609 QED_DCBX_OPERATIONAL_MIB);
1611 case MFW_DRV_MSG_OEM_CFG_UPDATE:
1612 qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
1614 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1615 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1617 case MFW_DRV_MSG_GET_LAN_STATS:
1618 case MFW_DRV_MSG_GET_FCOE_STATS:
1619 case MFW_DRV_MSG_GET_ISCSI_STATS:
1620 case MFW_DRV_MSG_GET_RDMA_STATS:
1621 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1623 case MFW_DRV_MSG_BW_UPDATE:
1624 qed_mcp_update_bw(p_hwfn, p_ptt);
1626 case MFW_DRV_MSG_S_TAG_UPDATE:
1627 qed_mcp_update_stag(p_hwfn, p_ptt);
1629 case MFW_DRV_MSG_GET_TLV_REQ:
1630 qed_mfw_tlv_req(p_hwfn);
1633 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1638 /* ACK everything */
1639 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1640 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1642 /* MFW expect answer in BE, so we force write in that format */
1643 qed_wr(p_hwfn, p_ptt,
1644 info->mfw_mb_addr + sizeof(u32) +
1645 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1646 sizeof(u32) + i * sizeof(u32),
1652 "Received an MFW message indication but no new message!\n");
1656 /* Copy the new mfw messages into the shadow */
1657 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1662 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1663 struct qed_ptt *p_ptt,
1664 u32 *p_mfw_ver, u32 *p_running_bundle_id)
1668 if (IS_VF(p_hwfn->cdev)) {
1669 if (p_hwfn->vf_iov_info) {
1670 struct pfvf_acquire_resp_tlv *p_resp;
1672 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1673 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1678 "VF requested MFW version prior to ACQUIRE\n");
1683 global_offsize = qed_rd(p_hwfn, p_ptt,
1684 SECTION_OFFSIZE_ADDR(p_hwfn->
1685 mcp_info->public_base,
1688 qed_rd(p_hwfn, p_ptt,
1689 SECTION_ADDR(global_offsize,
1690 0) + offsetof(struct public_global, mfw_ver));
1692 if (p_running_bundle_id != NULL) {
1693 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
1694 SECTION_ADDR(global_offsize, 0) +
1695 offsetof(struct public_global,
1696 running_bundle_id));
1702 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
1703 struct qed_ptt *p_ptt, u32 *p_mbi_ver)
1705 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
1707 if (IS_VF(p_hwfn->cdev))
1710 /* Read the address of the nvm_cfg */
1711 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1712 if (!nvm_cfg_addr) {
1713 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1717 /* Read the offset of nvm_cfg1 */
1718 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1720 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1721 offsetof(struct nvm_cfg1, glob) +
1722 offsetof(struct nvm_cfg1_glob, mbi_version);
1723 *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
1725 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
1726 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
1727 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
1732 int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
1734 struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
1735 struct qed_ptt *p_ptt;
1740 if (!qed_mcp_is_init(p_hwfn)) {
1741 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
1745 *p_media_type = MEDIA_UNSPECIFIED;
1747 p_ptt = qed_ptt_acquire(p_hwfn);
1751 *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1752 offsetof(struct public_port, media_type));
1754 qed_ptt_release(p_hwfn, p_ptt);
1759 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1761 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
1762 enum qed_pci_personality *p_proto)
1764 /* There wasn't ever a legacy MFW that published iwarp.
1765 * So at this point, this is either plain l2 or RoCE.
1767 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
1768 *p_proto = QED_PCI_ETH_ROCE;
1770 *p_proto = QED_PCI_ETH;
1772 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1773 "According to Legacy capabilities, L2 personality is %08x\n",
1778 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
1779 struct qed_ptt *p_ptt,
1780 enum qed_pci_personality *p_proto)
1782 u32 resp = 0, param = 0;
1785 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1786 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
1789 if (resp != FW_MSG_CODE_OK) {
1790 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1791 "MFW lacks support for command; Returns %08x\n",
1797 case FW_MB_PARAM_GET_PF_RDMA_NONE:
1798 *p_proto = QED_PCI_ETH;
1800 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
1801 *p_proto = QED_PCI_ETH_ROCE;
1803 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
1804 *p_proto = QED_PCI_ETH_IWARP;
1806 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
1807 *p_proto = QED_PCI_ETH_RDMA;
1811 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
1818 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1819 (u32) *p_proto, resp, param);
1824 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
1825 struct public_func *p_info,
1826 struct qed_ptt *p_ptt,
1827 enum qed_pci_personality *p_proto)
1831 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1832 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1833 if (!IS_ENABLED(CONFIG_QED_RDMA))
1834 *p_proto = QED_PCI_ETH;
1835 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
1836 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1838 case FUNC_MF_CFG_PROTOCOL_ISCSI:
1839 *p_proto = QED_PCI_ISCSI;
1841 case FUNC_MF_CFG_PROTOCOL_FCOE:
1842 *p_proto = QED_PCI_FCOE;
1844 case FUNC_MF_CFG_PROTOCOL_ROCE:
1845 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
1854 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
1855 struct qed_ptt *p_ptt)
1857 struct qed_mcp_function_info *info;
1858 struct public_func shmem_info;
1860 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1861 info = &p_hwfn->mcp_info->func_info;
1863 info->pause_on_host = (shmem_info.config &
1864 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1866 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1868 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1869 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1873 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1875 if (shmem_info.mac_upper || shmem_info.mac_lower) {
1876 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1877 info->mac[1] = (u8)(shmem_info.mac_upper);
1878 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1879 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1880 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1881 info->mac[5] = (u8)(shmem_info.mac_lower);
1883 /* Store primary MAC for later possible WoL */
1884 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
1886 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
1889 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
1890 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
1891 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
1892 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
1894 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1896 info->mtu = (u16)shmem_info.mtu_size;
1898 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
1899 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
1900 if (qed_mcp_is_init(p_hwfn)) {
1901 u32 resp = 0, param = 0;
1904 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1905 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
1908 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
1909 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
1912 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
1913 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
1914 info->pause_on_host, info->protocol,
1915 info->bandwidth_min, info->bandwidth_max,
1916 info->mac[0], info->mac[1], info->mac[2],
1917 info->mac[3], info->mac[4], info->mac[5],
1918 info->wwn_port, info->wwn_node,
1919 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
1924 struct qed_mcp_link_params
1925 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
1927 if (!p_hwfn || !p_hwfn->mcp_info)
1929 return &p_hwfn->mcp_info->link_input;
1932 struct qed_mcp_link_state
1933 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
1935 if (!p_hwfn || !p_hwfn->mcp_info)
1937 return &p_hwfn->mcp_info->link_output;
1940 struct qed_mcp_link_capabilities
1941 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
1943 if (!p_hwfn || !p_hwfn->mcp_info)
1945 return &p_hwfn->mcp_info->link_capabilities;
1948 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1950 u32 resp = 0, param = 0;
1953 rc = qed_mcp_cmd(p_hwfn, p_ptt,
1954 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
1956 /* Wait for the drain to complete before returning */
1962 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
1963 struct qed_ptt *p_ptt, u32 *p_flash_size)
1967 if (IS_VF(p_hwfn->cdev))
1970 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1971 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1972 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1973 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1975 *p_flash_size = flash_size;
1981 qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
1982 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
1984 u32 resp = 0, param = 0, rc_param = 0;
1987 /* Only Leader can configure MSIX, and need to take CMT into account */
1988 if (!IS_LEAD_HWFN(p_hwfn))
1990 num *= p_hwfn->cdev->num_hwfns;
1992 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1993 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1994 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1995 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1997 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2000 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2001 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2004 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2005 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2013 qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2014 struct qed_ptt *p_ptt, u8 num)
2016 u32 resp = 0, param = num, rc_param = 0;
2019 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2020 param, &resp, &rc_param);
2022 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2023 DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2026 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2027 "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2033 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2034 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2036 if (QED_IS_BB(p_hwfn->cdev))
2037 return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2039 return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2043 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2044 struct qed_ptt *p_ptt,
2045 struct qed_mcp_drv_version *p_ver)
2047 struct qed_mcp_mb_params mb_params;
2048 struct drv_version_stc drv_version;
2053 memset(&drv_version, 0, sizeof(drv_version));
2054 drv_version.version = p_ver->version;
2055 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2056 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2057 *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2060 memset(&mb_params, 0, sizeof(mb_params));
2061 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2062 mb_params.p_data_src = &drv_version;
2063 mb_params.data_src_size = sizeof(drv_version);
2064 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2066 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2071 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2073 u32 resp = 0, param = 0;
2076 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2079 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2084 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2086 u32 value, cpu_mode;
2088 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2090 value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2091 value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2092 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
2093 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2095 return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
2098 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2099 struct qed_ptt *p_ptt,
2100 enum qed_ov_client client)
2102 u32 resp = 0, param = 0;
2107 case QED_OV_CLIENT_DRV:
2108 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2110 case QED_OV_CLIENT_USER:
2111 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2113 case QED_OV_CLIENT_VENDOR_SPEC:
2114 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2117 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2121 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2122 drv_mb_param, &resp, ¶m);
2124 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2129 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2130 struct qed_ptt *p_ptt,
2131 enum qed_ov_driver_state drv_state)
2133 u32 resp = 0, param = 0;
2137 switch (drv_state) {
2138 case QED_OV_DRIVER_STATE_NOT_LOADED:
2139 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2141 case QED_OV_DRIVER_STATE_DISABLED:
2142 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2144 case QED_OV_DRIVER_STATE_ACTIVE:
2145 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2148 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2152 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2153 drv_mb_param, &resp, ¶m);
2155 DP_ERR(p_hwfn, "Failed to send driver state\n");
2160 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2161 struct qed_ptt *p_ptt, u16 mtu)
2163 u32 resp = 0, param = 0;
2167 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2168 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2169 drv_mb_param, &resp, ¶m);
2171 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2176 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2177 struct qed_ptt *p_ptt, u8 *mac)
2179 struct qed_mcp_mb_params mb_params;
2183 memset(&mb_params, 0, sizeof(mb_params));
2184 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2185 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2186 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2187 mb_params.param |= MCP_PF_ID(p_hwfn);
2189 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2190 * in 32-bit granularity.
2191 * So the MAC has to be set in native order [and not byte order],
2192 * otherwise it would be read incorrectly by MFW after swap.
2194 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2195 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2197 mb_params.p_data_src = (u8 *)mfw_mac;
2198 mb_params.data_src_size = 8;
2199 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2201 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2203 /* Store primary MAC for later possible WoL */
2204 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2209 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2210 struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2212 u32 resp = 0, param = 0;
2216 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2217 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2218 "Can't change WoL configuration when WoL isn't supported\n");
2223 case QED_OV_WOL_DEFAULT:
2224 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2226 case QED_OV_WOL_DISABLED:
2227 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2229 case QED_OV_WOL_ENABLED:
2230 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2233 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2237 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2238 drv_mb_param, &resp, ¶m);
2240 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2242 /* Store the WoL update for a future unload */
2243 p_hwfn->cdev->wol_config = (u8)wol;
2248 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2249 struct qed_ptt *p_ptt,
2250 enum qed_ov_eswitch eswitch)
2252 u32 resp = 0, param = 0;
2257 case QED_OV_ESWITCH_NONE:
2258 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2260 case QED_OV_ESWITCH_VEB:
2261 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2263 case QED_OV_ESWITCH_VEPA:
2264 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2267 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2271 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2272 drv_mb_param, &resp, ¶m);
2274 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2279 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2280 struct qed_ptt *p_ptt, enum qed_led_mode mode)
2282 u32 resp = 0, param = 0, drv_mb_param;
2286 case QED_LED_MODE_ON:
2287 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2289 case QED_LED_MODE_OFF:
2290 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2292 case QED_LED_MODE_RESTORE:
2293 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2296 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2300 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2301 drv_mb_param, &resp, ¶m);
2306 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2307 struct qed_ptt *p_ptt, u32 mask_parities)
2309 u32 resp = 0, param = 0;
2312 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2313 mask_parities, &resp, ¶m);
2317 "MCP response failure for mask parities, aborting\n");
2318 } else if (resp != FW_MSG_CODE_OK) {
2320 "MCP did not acknowledge mask parity request. Old MFW?\n");
2327 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2329 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2330 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2331 u32 resp = 0, resp_param = 0;
2332 struct qed_ptt *p_ptt;
2335 p_ptt = qed_ptt_acquire(p_hwfn);
2339 while (bytes_left > 0) {
2340 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2342 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2343 DRV_MSG_CODE_NVM_READ_NVRAM,
2346 DRV_MB_PARAM_NVM_LEN_OFFSET),
2349 (u32 *)(p_buf + offset));
2351 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2352 DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2356 /* This can be a lengthy process, and it's possible scheduler
2357 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2359 if (bytes_left % 0x1000 <
2360 (bytes_left - read_len) % 0x1000)
2361 usleep_range(1000, 2000);
2364 bytes_left -= read_len;
2367 cdev->mcp_nvm_resp = resp;
2368 qed_ptt_release(p_hwfn, p_ptt);
2373 int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
2375 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2376 struct qed_ptt *p_ptt;
2378 p_ptt = qed_ptt_acquire(p_hwfn);
2382 memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
2383 qed_ptt_release(p_hwfn, p_ptt);
2388 int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr)
2390 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2391 struct qed_ptt *p_ptt;
2395 p_ptt = qed_ptt_acquire(p_hwfn);
2398 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
2400 cdev->mcp_nvm_resp = resp;
2401 qed_ptt_release(p_hwfn, p_ptt);
2406 int qed_mcp_nvm_write(struct qed_dev *cdev,
2407 u32 cmd, u32 addr, u8 *p_buf, u32 len)
2409 u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
2410 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2411 struct qed_ptt *p_ptt;
2414 p_ptt = qed_ptt_acquire(p_hwfn);
2419 case QED_PUT_FILE_DATA:
2420 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2422 case QED_NVM_WRITE_NVRAM:
2423 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2426 DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
2431 while (buf_idx < len) {
2432 buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
2433 nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
2435 rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
2436 &resp, ¶m, buf_size,
2437 (u32 *)&p_buf[buf_idx]);
2439 DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
2440 resp = FW_MSG_CODE_ERROR;
2444 if (resp != FW_MSG_CODE_OK &&
2445 resp != FW_MSG_CODE_NVM_OK &&
2446 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
2448 "nvm write failed, resp = 0x%08x\n", resp);
2453 /* This can be a lengthy process, and it's possible scheduler
2454 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
2456 if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
2457 usleep_range(1000, 2000);
2459 buf_idx += buf_size;
2462 cdev->mcp_nvm_resp = resp;
2464 qed_ptt_release(p_hwfn, p_ptt);
2469 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2471 u32 drv_mb_param = 0, rsp, param;
2474 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2475 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2477 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2478 drv_mb_param, &rsp, ¶m);
2483 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2484 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2490 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2492 u32 drv_mb_param, rsp, param;
2495 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2496 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2498 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2499 drv_mb_param, &rsp, ¶m);
2504 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2505 (param != DRV_MB_PARAM_BIST_RC_PASSED))
2511 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
2512 struct qed_ptt *p_ptt,
2515 u32 drv_mb_param = 0, rsp;
2518 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2519 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2521 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2522 drv_mb_param, &rsp, num_images);
2526 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2532 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
2533 struct qed_ptt *p_ptt,
2534 struct bist_nvm_image_att *p_image_att,
2537 u32 buf_size = 0, param, resp = 0, resp_param = 0;
2540 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2541 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
2542 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
2544 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2545 DRV_MSG_CODE_BIST_TEST, param,
2548 (u32 *)p_image_att);
2552 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2553 (p_image_att->return_code != 1))
2559 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
2561 struct qed_nvm_image_info nvm_info;
2562 struct qed_ptt *p_ptt;
2566 if (p_hwfn->nvm_info.valid)
2569 p_ptt = qed_ptt_acquire(p_hwfn);
2571 DP_ERR(p_hwfn, "failed to acquire ptt\n");
2575 /* Acquire from MFW the amount of available images */
2576 nvm_info.num_images = 0;
2577 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
2578 p_ptt, &nvm_info.num_images);
2579 if (rc == -EOPNOTSUPP) {
2580 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
2582 } else if (rc || !nvm_info.num_images) {
2583 DP_ERR(p_hwfn, "Failed getting number of images\n");
2587 nvm_info.image_att = kmalloc_array(nvm_info.num_images,
2588 sizeof(struct bist_nvm_image_att),
2590 if (!nvm_info.image_att) {
2595 /* Iterate over images and get their attributes */
2596 for (i = 0; i < nvm_info.num_images; i++) {
2597 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
2598 &nvm_info.image_att[i], i);
2601 "Failed getting image index %d attributes\n", i);
2605 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
2606 nvm_info.image_att[i].len);
2609 /* Update hwfn's nvm_info */
2610 if (nvm_info.num_images) {
2611 p_hwfn->nvm_info.num_images = nvm_info.num_images;
2612 kfree(p_hwfn->nvm_info.image_att);
2613 p_hwfn->nvm_info.image_att = nvm_info.image_att;
2614 p_hwfn->nvm_info.valid = true;
2617 qed_ptt_release(p_hwfn, p_ptt);
2621 kfree(nvm_info.image_att);
2623 qed_ptt_release(p_hwfn, p_ptt);
2628 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
2629 enum qed_nvm_images image_id,
2630 struct qed_nvm_image_att *p_image_att)
2632 enum nvm_image_type type;
2635 /* Translate image_id into MFW definitions */
2637 case QED_NVM_IMAGE_ISCSI_CFG:
2638 type = NVM_TYPE_ISCSI_CFG;
2640 case QED_NVM_IMAGE_FCOE_CFG:
2641 type = NVM_TYPE_FCOE_CFG;
2643 case QED_NVM_IMAGE_NVM_CFG1:
2644 type = NVM_TYPE_NVM_CFG1;
2646 case QED_NVM_IMAGE_DEFAULT_CFG:
2647 type = NVM_TYPE_DEFAULT_CFG;
2649 case QED_NVM_IMAGE_NVM_META:
2650 type = NVM_TYPE_META;
2653 DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
2658 qed_mcp_nvm_info_populate(p_hwfn);
2659 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2660 if (type == p_hwfn->nvm_info.image_att[i].image_type)
2662 if (i == p_hwfn->nvm_info.num_images) {
2663 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
2664 "Failed to find nvram image of type %08x\n",
2669 p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2670 p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
2675 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
2676 enum qed_nvm_images image_id,
2677 u8 *p_buffer, u32 buffer_len)
2679 struct qed_nvm_image_att image_att;
2682 memset(p_buffer, 0, buffer_len);
2684 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
2688 /* Validate sizes - both the image's and the supplied buffer's */
2689 if (image_att.length <= 4) {
2690 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
2691 "Image [%d] is too small - only %d bytes\n",
2692 image_id, image_att.length);
2696 if (image_att.length > buffer_len) {
2699 "Image [%d] is too big - %08x bytes where only %08x are available\n",
2700 image_id, image_att.length, buffer_len);
2704 return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
2705 p_buffer, image_att.length);
2708 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
2710 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2714 mfw_res_id = RESOURCE_NUM_SB_E;
2717 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2720 mfw_res_id = RESOURCE_NUM_VPORT_E;
2723 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2726 mfw_res_id = RESOURCE_NUM_PQ_E;
2729 mfw_res_id = RESOURCE_NUM_RL_E;
2733 /* Each VFC resource can accommodate both a MAC and a VLAN */
2734 mfw_res_id = RESOURCE_VFC_FILTER_E;
2737 mfw_res_id = RESOURCE_ILT_E;
2740 mfw_res_id = RESOURCE_LL2_QUEUE_E;
2742 case QED_RDMA_CNQ_RAM:
2744 /* CNQ/CMDQS are the same resource */
2745 mfw_res_id = RESOURCE_CQS_E;
2747 case QED_RDMA_STATS_QUEUE:
2748 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2751 mfw_res_id = RESOURCE_BDQ_E;
2760 #define QED_RESC_ALLOC_VERSION_MAJOR 2
2761 #define QED_RESC_ALLOC_VERSION_MINOR 0
2762 #define QED_RESC_ALLOC_VERSION \
2763 ((QED_RESC_ALLOC_VERSION_MAJOR << \
2764 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2765 (QED_RESC_ALLOC_VERSION_MINOR << \
2766 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2768 struct qed_resc_alloc_in_params {
2770 enum qed_resources res_id;
2774 struct qed_resc_alloc_out_params {
2785 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
2786 struct qed_ptt *p_ptt,
2787 struct qed_resc_alloc_in_params *p_in_params,
2788 struct qed_resc_alloc_out_params *p_out_params)
2790 struct qed_mcp_mb_params mb_params;
2791 struct resource_info mfw_resc_info;
2794 memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
2796 mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
2797 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
2799 "Failed to match resource %d [%s] with the MFW resources\n",
2800 p_in_params->res_id,
2801 qed_hw_get_resc_name(p_in_params->res_id));
2805 switch (p_in_params->cmd) {
2806 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2807 mfw_resc_info.size = p_in_params->resc_max_val;
2809 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2812 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2817 memset(&mb_params, 0, sizeof(mb_params));
2818 mb_params.cmd = p_in_params->cmd;
2819 mb_params.param = QED_RESC_ALLOC_VERSION;
2820 mb_params.p_data_src = &mfw_resc_info;
2821 mb_params.data_src_size = sizeof(mfw_resc_info);
2822 mb_params.p_data_dst = mb_params.p_data_src;
2823 mb_params.data_dst_size = mb_params.data_src_size;
2827 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2829 p_in_params->res_id,
2830 qed_hw_get_resc_name(p_in_params->res_id),
2831 QED_MFW_GET_FIELD(mb_params.param,
2832 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2833 QED_MFW_GET_FIELD(mb_params.param,
2834 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2835 p_in_params->resc_max_val);
2837 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2841 p_out_params->mcp_resp = mb_params.mcp_resp;
2842 p_out_params->mcp_param = mb_params.mcp_param;
2843 p_out_params->resc_num = mfw_resc_info.size;
2844 p_out_params->resc_start = mfw_resc_info.offset;
2845 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
2846 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
2847 p_out_params->flags = mfw_resc_info.flags;
2851 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2852 QED_MFW_GET_FIELD(p_out_params->mcp_param,
2853 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2854 QED_MFW_GET_FIELD(p_out_params->mcp_param,
2855 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2856 p_out_params->resc_num,
2857 p_out_params->resc_start,
2858 p_out_params->vf_resc_num,
2859 p_out_params->vf_resc_start, p_out_params->flags);
2865 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
2866 struct qed_ptt *p_ptt,
2867 enum qed_resources res_id,
2868 u32 resc_max_val, u32 *p_mcp_resp)
2870 struct qed_resc_alloc_out_params out_params;
2871 struct qed_resc_alloc_in_params in_params;
2874 memset(&in_params, 0, sizeof(in_params));
2875 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
2876 in_params.res_id = res_id;
2877 in_params.resc_max_val = resc_max_val;
2878 memset(&out_params, 0, sizeof(out_params));
2879 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2884 *p_mcp_resp = out_params.mcp_resp;
2890 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
2891 struct qed_ptt *p_ptt,
2892 enum qed_resources res_id,
2893 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
2895 struct qed_resc_alloc_out_params out_params;
2896 struct qed_resc_alloc_in_params in_params;
2899 memset(&in_params, 0, sizeof(in_params));
2900 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2901 in_params.res_id = res_id;
2902 memset(&out_params, 0, sizeof(out_params));
2903 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2908 *p_mcp_resp = out_params.mcp_resp;
2910 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
2911 *p_resc_num = out_params.resc_num;
2912 *p_resc_start = out_params.resc_start;
2918 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2920 u32 mcp_resp, mcp_param;
2922 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2923 &mcp_resp, &mcp_param);
2926 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
2927 struct qed_ptt *p_ptt,
2928 u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
2932 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
2933 p_mcp_resp, p_mcp_param);
2937 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
2939 "The resource command is unsupported by the MFW\n");
2943 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
2944 u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
2947 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
2956 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
2957 struct qed_ptt *p_ptt,
2958 struct qed_resc_lock_params *p_params)
2960 u32 param = 0, mcp_resp, mcp_param;
2964 switch (p_params->timeout) {
2965 case QED_MCP_RESC_LOCK_TO_DEFAULT:
2966 opcode = RESOURCE_OPCODE_REQ;
2967 p_params->timeout = 0;
2969 case QED_MCP_RESC_LOCK_TO_NONE:
2970 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
2971 p_params->timeout = 0;
2974 opcode = RESOURCE_OPCODE_REQ_W_AGING;
2978 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
2979 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2980 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
2984 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
2985 param, p_params->timeout, opcode, p_params->resource);
2987 /* Attempt to acquire the resource */
2988 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
2992 /* Analyze the response */
2993 p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
2994 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2998 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
2999 mcp_param, opcode, p_params->owner);
3002 case RESOURCE_OPCODE_GNT:
3003 p_params->b_granted = true;
3005 case RESOURCE_OPCODE_BUSY:
3006 p_params->b_granted = false;
3010 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3019 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3020 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3026 /* No need for an interval before the first iteration */
3028 if (p_params->sleep_b4_retry) {
3029 u16 retry_interval_in_ms =
3030 DIV_ROUND_UP(p_params->retry_interval,
3033 msleep(retry_interval_in_ms);
3035 udelay(p_params->retry_interval);
3039 rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3043 if (p_params->b_granted)
3045 } while (retry_cnt++ < p_params->retry_num);
3051 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3052 struct qed_ptt *p_ptt,
3053 struct qed_resc_unlock_params *p_params)
3055 u32 param = 0, mcp_resp, mcp_param;
3059 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3060 : RESOURCE_OPCODE_RELEASE;
3061 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3062 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3064 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3065 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3066 param, opcode, p_params->resource);
3068 /* Attempt to release the resource */
3069 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3073 /* Analyze the response */
3074 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3076 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3077 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3081 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3083 "Resource unlock request for an already released resource [%d]\n",
3084 p_params->resource);
3086 case RESOURCE_OPCODE_RELEASED:
3087 p_params->b_released = true;
3089 case RESOURCE_OPCODE_WRONG_OWNER:
3090 p_params->b_released = false;
3094 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3102 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3103 struct qed_resc_unlock_params *p_unlock,
3105 resource, bool b_is_permanent)
3108 memset(p_lock, 0, sizeof(*p_lock));
3110 /* Permanent resources don't require aging, and there's no
3111 * point in trying to acquire them more than once since it's
3112 * unexpected another entity would release them.
3114 if (b_is_permanent) {
3115 p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3117 p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3118 p_lock->retry_interval =
3119 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3120 p_lock->sleep_b4_retry = true;
3123 p_lock->resource = resource;
3127 memset(p_unlock, 0, sizeof(*p_unlock));
3128 p_unlock->resource = resource;
3132 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3137 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3138 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3140 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3141 "MFW supported features: %08x\n",
3142 p_hwfn->mcp_info->capabilities);
3147 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3149 u32 mcp_resp, mcp_param, features;
3151 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE;
3153 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3154 features, &mcp_resp, &mcp_param);