1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/string.h>
15 #include <linux/etherdevice.h>
22 #include "qed_reg_addr.h"
23 #include "qed_sriov.h"
25 #define GRCBASE_MCP 0xe00000
27 #define QED_MCP_RESP_ITER_US 10
29 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
30 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
33 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
37 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
40 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
41 offsetof(struct public_drv_mb, _field), _val)
43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
44 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
45 offsetof(struct public_drv_mb, _field))
47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
48 DRV_ID_PDA_COMP_VER_SHIFT)
50 #define MCP_BYTES_PER_MBIT_SHIFT 17
52 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
54 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
59 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
61 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
63 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
65 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
67 DP_VERBOSE(p_hwfn, QED_MSG_SP,
68 "port_addr = 0x%x, port_id 0x%02x\n",
69 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
72 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
74 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
77 if (!p_hwfn->mcp_info->public_base)
80 for (i = 0; i < length; i++) {
81 tmp = qed_rd(p_hwfn, p_ptt,
82 p_hwfn->mcp_info->mfw_mb_addr +
83 (i << 2) + sizeof(u32));
85 /* The MB data is actually BE; Need to force it to cpu */
86 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
87 be32_to_cpu((__force __be32)tmp);
91 struct qed_mcp_cmd_elem {
92 struct list_head list;
93 struct qed_mcp_mb_params *p_mb_params;
98 /* Must be called while cmd_lock is acquired */
99 static struct qed_mcp_cmd_elem *
100 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
101 struct qed_mcp_mb_params *p_mb_params,
102 u16 expected_seq_num)
104 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
106 p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
110 p_cmd_elem->p_mb_params = p_mb_params;
111 p_cmd_elem->expected_seq_num = expected_seq_num;
112 list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
117 /* Must be called while cmd_lock is acquired */
118 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
119 struct qed_mcp_cmd_elem *p_cmd_elem)
121 list_del(&p_cmd_elem->list);
125 /* Must be called while cmd_lock is acquired */
126 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
129 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
131 list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
132 if (p_cmd_elem->expected_seq_num == seq_num)
139 int qed_mcp_free(struct qed_hwfn *p_hwfn)
141 if (p_hwfn->mcp_info) {
142 struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
144 kfree(p_hwfn->mcp_info->mfw_mb_cur);
145 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
147 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
148 list_for_each_entry_safe(p_cmd_elem,
150 &p_hwfn->mcp_info->cmd_list, list) {
151 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
153 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
156 kfree(p_hwfn->mcp_info);
157 p_hwfn->mcp_info = NULL;
162 /* Maximum of 1 sec to wait for the SHMEM ready indication */
163 #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
164 #define QED_MCP_SHMEM_RDY_ITER_MS 50
166 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
168 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
169 u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
170 u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
171 u32 drv_mb_offsize, mfw_mb_offsize;
172 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
174 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
175 if (!p_info->public_base) {
177 "The address of the MCP scratch-pad is not configured\n");
181 p_info->public_base |= GRCBASE_MCP;
183 /* Get the MFW MB address and number of supported messages */
184 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
185 SECTION_OFFSIZE_ADDR(p_info->public_base,
187 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
188 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
189 p_info->mfw_mb_addr +
190 offsetof(struct public_mfw_mb,
193 /* The driver can notify that there was an MCP reset, and might read the
194 * SHMEM values before the MFW has completed initializing them.
195 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
196 * data ready indication.
198 while (!p_info->mfw_mb_length && --cnt) {
200 p_info->mfw_mb_length =
201 (u16)qed_rd(p_hwfn, p_ptt,
202 p_info->mfw_mb_addr +
203 offsetof(struct public_mfw_mb, sup_msgs));
208 "Failed to get the SHMEM ready notification after %d msec\n",
209 QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
213 /* Calculate the driver and MFW mailbox address */
214 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
215 SECTION_OFFSIZE_ADDR(p_info->public_base,
217 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
218 DP_VERBOSE(p_hwfn, QED_MSG_SP,
219 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
220 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
222 /* Get the current driver mailbox sequence before sending
225 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
226 DRV_MSG_SEQ_NUMBER_MASK;
228 /* Get current FW pulse sequence */
229 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
232 p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
237 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
239 struct qed_mcp_info *p_info;
242 /* Allocate mcp_info structure */
243 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
244 if (!p_hwfn->mcp_info)
246 p_info = p_hwfn->mcp_info;
248 /* Initialize the MFW spinlock */
249 spin_lock_init(&p_info->cmd_lock);
250 spin_lock_init(&p_info->link_lock);
252 INIT_LIST_HEAD(&p_info->cmd_list);
254 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
255 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
256 /* Do not free mcp_info here, since public_base indicate that
257 * the MCP is not initialized
262 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
263 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
264 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
265 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
271 qed_mcp_free(p_hwfn);
275 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
276 struct qed_ptt *p_ptt)
278 u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
280 /* Use MCP history register to check if MCP reset occurred between init
283 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
286 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
287 p_hwfn->mcp_info->mcp_hist, generic_por_0);
289 qed_load_mcp_offsets(p_hwfn, p_ptt);
290 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
294 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
296 u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
299 if (p_hwfn->mcp_info->b_block_cmd) {
301 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
305 /* Ensure that only a single thread is accessing the mailbox */
306 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
308 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
310 /* Set drv command along with the updated sequence */
311 qed_mcp_reread_offsets(p_hwfn, p_ptt);
312 seq = ++p_hwfn->mcp_info->drv_mb_seq;
313 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
316 /* Wait for MFW response */
318 /* Give the FW up to 500 second (50*1000*10usec) */
319 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
320 MISCS_REG_GENERIC_POR_0)) &&
321 (cnt++ < QED_MCP_RESET_RETRIES));
323 if (org_mcp_reset_seq !=
324 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325 DP_VERBOSE(p_hwfn, QED_MSG_SP,
326 "MCP was reset after %d usec\n", cnt * delay);
328 DP_ERR(p_hwfn, "Failed to reset MCP\n");
332 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
337 /* Must be called while cmd_lock is acquired */
338 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
340 struct qed_mcp_cmd_elem *p_cmd_elem;
342 /* There is at most one pending command at a certain time, and if it
343 * exists - it is placed at the HEAD of the list.
345 if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
346 p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
347 struct qed_mcp_cmd_elem, list);
348 return !p_cmd_elem->b_is_completed;
354 /* Must be called while cmd_lock is acquired */
356 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
358 struct qed_mcp_mb_params *p_mb_params;
359 struct qed_mcp_cmd_elem *p_cmd_elem;
363 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
364 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
366 /* Return if no new non-handled response has been received */
367 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
370 p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
373 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
378 p_mb_params = p_cmd_elem->p_mb_params;
380 /* Get the MFW response along with the sequence number */
381 p_mb_params->mcp_resp = mcp_resp;
383 /* Get the MFW param */
384 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
386 /* Get the union data */
387 if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
388 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
389 offsetof(struct public_drv_mb,
391 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
392 union_data_addr, p_mb_params->data_dst_size);
395 p_cmd_elem->b_is_completed = true;
400 /* Must be called while cmd_lock is acquired */
401 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
402 struct qed_ptt *p_ptt,
403 struct qed_mcp_mb_params *p_mb_params,
406 union drv_union_data union_data;
409 /* Set the union data */
410 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
411 offsetof(struct public_drv_mb, union_data);
412 memset(&union_data, 0, sizeof(union_data));
413 if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
414 memcpy(&union_data, p_mb_params->p_data_src,
415 p_mb_params->data_src_size);
416 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
419 /* Set the drv param */
420 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
422 /* Set the drv command along with the sequence number */
423 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
425 DP_VERBOSE(p_hwfn, QED_MSG_SP,
426 "MFW mailbox: command 0x%08x param 0x%08x\n",
427 (p_mb_params->cmd | seq_num), p_mb_params->param);
430 static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
432 p_hwfn->mcp_info->b_block_cmd = block_cmd;
434 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
435 block_cmd ? "Block" : "Unblock");
438 static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
439 struct qed_ptt *p_ptt)
441 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
442 u32 delay = QED_MCP_RESP_ITER_US;
444 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
445 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
446 cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
448 cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
450 cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
453 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
454 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
458 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
459 struct qed_ptt *p_ptt,
460 struct qed_mcp_mb_params *p_mb_params,
461 u32 max_retries, u32 usecs)
463 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
464 struct qed_mcp_cmd_elem *p_cmd_elem;
468 /* Wait until the mailbox is non-occupied */
470 /* Exit the loop if there is no pending command, or if the
471 * pending command is completed during this iteration.
472 * The spinlock stays locked until the command is sent.
475 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
477 if (!qed_mcp_has_pending_cmd(p_hwfn))
480 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
483 else if (rc != -EAGAIN)
486 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
488 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
492 } while (++cnt < max_retries);
494 if (cnt >= max_retries) {
496 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
497 p_mb_params->cmd, p_mb_params->param);
501 /* Send the mailbox command */
502 qed_mcp_reread_offsets(p_hwfn, p_ptt);
503 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
504 p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
510 __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
511 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
513 /* Wait for the MFW response */
515 /* Exit the loop if the command is already completed, or if the
516 * command is completed during this iteration.
517 * The spinlock stays locked until the list element is removed.
520 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
525 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
527 if (p_cmd_elem->b_is_completed)
530 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
533 else if (rc != -EAGAIN)
536 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
537 } while (++cnt < max_retries);
539 if (cnt >= max_retries) {
541 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
542 p_mb_params->cmd, p_mb_params->param);
543 qed_mcp_print_cpu_info(p_hwfn, p_ptt);
545 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
546 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
547 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
549 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
550 qed_mcp_cmd_set_blocking(p_hwfn, true);
552 qed_hw_err_notify(p_hwfn, p_ptt,
553 QED_HW_ERR_MFW_RESP_FAIL, NULL);
557 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
558 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
562 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
563 p_mb_params->mcp_resp,
564 p_mb_params->mcp_param,
565 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
567 /* Clear the sequence number from the MFW response */
568 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
573 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
577 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
578 struct qed_ptt *p_ptt,
579 struct qed_mcp_mb_params *p_mb_params)
581 size_t union_data_size = sizeof(union drv_union_data);
582 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
583 u32 usecs = QED_MCP_RESP_ITER_US;
585 /* MCP not initialized */
586 if (!qed_mcp_is_init(p_hwfn)) {
587 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
591 if (p_hwfn->mcp_info->b_block_cmd) {
593 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
594 p_mb_params->cmd, p_mb_params->param);
598 if (p_mb_params->data_src_size > union_data_size ||
599 p_mb_params->data_dst_size > union_data_size) {
601 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
602 p_mb_params->data_src_size,
603 p_mb_params->data_dst_size, union_data_size);
607 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
608 max_retries = DIV_ROUND_UP(max_retries, 1000);
612 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
616 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
617 struct qed_ptt *p_ptt,
623 struct qed_mcp_mb_params mb_params;
626 memset(&mb_params, 0, sizeof(mb_params));
628 mb_params.param = param;
630 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
634 *o_mcp_resp = mb_params.mcp_resp;
635 *o_mcp_param = mb_params.mcp_param;
641 qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
642 struct qed_ptt *p_ptt,
646 u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
648 struct qed_mcp_mb_params mb_params;
651 memset(&mb_params, 0, sizeof(mb_params));
653 mb_params.param = param;
654 mb_params.p_data_src = i_buf;
655 mb_params.data_src_size = (u8)i_txn_size;
656 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
660 *o_mcp_resp = mb_params.mcp_resp;
661 *o_mcp_param = mb_params.mcp_param;
663 /* nvm_info needs to be updated */
664 p_hwfn->nvm_info.valid = false;
669 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
670 struct qed_ptt *p_ptt,
674 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
676 struct qed_mcp_mb_params mb_params;
677 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
680 memset(&mb_params, 0, sizeof(mb_params));
682 mb_params.param = param;
683 mb_params.p_data_dst = raw_data;
685 /* Use the maximal value since the actual one is part of the response */
686 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
688 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
692 *o_mcp_resp = mb_params.mcp_resp;
693 *o_mcp_param = mb_params.mcp_param;
695 *o_txn_size = *o_mcp_param;
696 memcpy(o_buf, raw_data, *o_txn_size);
702 qed_mcp_can_force_load(u8 drv_role,
704 enum qed_override_force_load override_force_load)
706 bool can_force_load = false;
708 switch (override_force_load) {
709 case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
710 can_force_load = true;
712 case QED_OVERRIDE_FORCE_LOAD_NEVER:
713 can_force_load = false;
716 can_force_load = (drv_role == DRV_ROLE_OS &&
717 exist_drv_role == DRV_ROLE_PREBOOT) ||
718 (drv_role == DRV_ROLE_KDUMP &&
719 exist_drv_role == DRV_ROLE_OS);
723 return can_force_load;
726 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
727 struct qed_ptt *p_ptt)
729 u32 resp = 0, param = 0;
732 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
736 "Failed to send cancel load request, rc = %d\n", rc);
741 #define CONFIG_QEDE_BITMAP_IDX BIT(0)
742 #define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
743 #define CONFIG_QEDR_BITMAP_IDX BIT(2)
744 #define CONFIG_QEDF_BITMAP_IDX BIT(4)
745 #define CONFIG_QEDI_BITMAP_IDX BIT(5)
746 #define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
748 static u32 qed_get_config_bitmap(void)
750 u32 config_bitmap = 0x0;
752 if (IS_ENABLED(CONFIG_QEDE))
753 config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
755 if (IS_ENABLED(CONFIG_QED_SRIOV))
756 config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
758 if (IS_ENABLED(CONFIG_QED_RDMA))
759 config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
761 if (IS_ENABLED(CONFIG_QED_FCOE))
762 config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
764 if (IS_ENABLED(CONFIG_QED_ISCSI))
765 config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
767 if (IS_ENABLED(CONFIG_QED_LL2))
768 config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
770 return config_bitmap;
773 struct qed_load_req_in_params {
775 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0
776 #define QED_LOAD_REQ_HSI_VER_1 1
783 bool avoid_eng_reset;
786 struct qed_load_req_out_params {
797 __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
798 struct qed_ptt *p_ptt,
799 struct qed_load_req_in_params *p_in_params,
800 struct qed_load_req_out_params *p_out_params)
802 struct qed_mcp_mb_params mb_params;
803 struct load_req_stc load_req;
804 struct load_rsp_stc load_rsp;
808 memset(&load_req, 0, sizeof(load_req));
809 load_req.drv_ver_0 = p_in_params->drv_ver_0;
810 load_req.drv_ver_1 = p_in_params->drv_ver_1;
811 load_req.fw_ver = p_in_params->fw_ver;
812 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
813 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
814 p_in_params->timeout_val);
815 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
816 p_in_params->force_cmd);
817 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
818 p_in_params->avoid_eng_reset);
820 hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
821 DRV_ID_MCP_HSI_VER_CURRENT :
822 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
824 memset(&mb_params, 0, sizeof(mb_params));
825 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
826 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
827 mb_params.p_data_src = &load_req;
828 mb_params.data_src_size = sizeof(load_req);
829 mb_params.p_data_dst = &load_rsp;
830 mb_params.data_dst_size = sizeof(load_rsp);
831 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
833 DP_VERBOSE(p_hwfn, QED_MSG_SP,
834 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
836 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
837 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
838 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
839 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
841 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
842 DP_VERBOSE(p_hwfn, QED_MSG_SP,
843 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
848 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
849 QED_MFW_GET_FIELD(load_req.misc0,
851 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
852 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
855 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
857 DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
861 DP_VERBOSE(p_hwfn, QED_MSG_SP,
862 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
863 p_out_params->load_code = mb_params.mcp_resp;
865 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
866 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
869 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
874 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
875 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
876 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
878 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
879 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
880 p_out_params->exist_fw_ver = load_rsp.fw_ver;
881 p_out_params->exist_drv_role =
882 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
883 p_out_params->mfw_hsi_ver =
884 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
885 p_out_params->drv_exists =
886 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
887 LOAD_RSP_FLAGS0_DRV_EXISTS;
893 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
894 enum qed_drv_role drv_role,
898 case QED_DRV_ROLE_OS:
899 *p_mfw_drv_role = DRV_ROLE_OS;
901 case QED_DRV_ROLE_KDUMP:
902 *p_mfw_drv_role = DRV_ROLE_KDUMP;
905 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
912 enum qed_load_req_force {
913 QED_LOAD_REQ_FORCE_NONE,
914 QED_LOAD_REQ_FORCE_PF,
915 QED_LOAD_REQ_FORCE_ALL,
918 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
920 enum qed_load_req_force force_cmd,
924 case QED_LOAD_REQ_FORCE_NONE:
925 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
927 case QED_LOAD_REQ_FORCE_PF:
928 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
930 case QED_LOAD_REQ_FORCE_ALL:
931 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
936 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
937 struct qed_ptt *p_ptt,
938 struct qed_load_req_params *p_params)
940 struct qed_load_req_out_params out_params;
941 struct qed_load_req_in_params in_params;
942 u8 mfw_drv_role, mfw_force_cmd;
945 memset(&in_params, 0, sizeof(in_params));
946 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
947 in_params.drv_ver_0 = QED_VERSION;
948 in_params.drv_ver_1 = qed_get_config_bitmap();
949 in_params.fw_ver = STORM_FW_VERSION;
950 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
954 in_params.drv_role = mfw_drv_role;
955 in_params.timeout_val = p_params->timeout_val;
956 qed_get_mfw_force_cmd(p_hwfn,
957 QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
959 in_params.force_cmd = mfw_force_cmd;
960 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
962 memset(&out_params, 0, sizeof(out_params));
963 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
967 /* First handle cases where another load request should/might be sent:
968 * - MFW expects the old interface [HSI version = 1]
969 * - MFW responds that a force load request is required
971 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
973 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
975 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
976 memset(&out_params, 0, sizeof(out_params));
977 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
980 } else if (out_params.load_code ==
981 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
982 if (qed_mcp_can_force_load(in_params.drv_role,
983 out_params.exist_drv_role,
984 p_params->override_force_load)) {
986 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
987 in_params.drv_role, in_params.fw_ver,
988 in_params.drv_ver_0, in_params.drv_ver_1,
989 out_params.exist_drv_role,
990 out_params.exist_fw_ver,
991 out_params.exist_drv_ver_0,
992 out_params.exist_drv_ver_1);
994 qed_get_mfw_force_cmd(p_hwfn,
995 QED_LOAD_REQ_FORCE_ALL,
998 in_params.force_cmd = mfw_force_cmd;
999 memset(&out_params, 0, sizeof(out_params));
1000 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1006 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1007 in_params.drv_role, in_params.fw_ver,
1008 in_params.drv_ver_0, in_params.drv_ver_1,
1009 out_params.exist_drv_role,
1010 out_params.exist_fw_ver,
1011 out_params.exist_drv_ver_0,
1012 out_params.exist_drv_ver_1);
1014 "Avoid sending a force load request to prevent disruption of active PFs\n");
1016 qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1021 /* Now handle the other types of responses.
1022 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1023 * expected here after the additional revised load requests were sent.
1025 switch (out_params.load_code) {
1026 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1027 case FW_MSG_CODE_DRV_LOAD_PORT:
1028 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1029 if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1030 out_params.drv_exists) {
1031 /* The role and fw/driver version match, but the PF is
1032 * already loaded and has not been unloaded gracefully.
1035 "PF is already loaded\n");
1041 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1042 out_params.load_code);
1046 p_params->load_code = out_params.load_code;
1051 int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1053 u32 resp = 0, param = 0;
1056 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1060 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1064 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1065 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1067 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1072 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1074 struct qed_mcp_mb_params mb_params;
1077 switch (p_hwfn->cdev->wol_config) {
1078 case QED_OV_WOL_DISABLED:
1079 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1081 case QED_OV_WOL_ENABLED:
1082 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1086 "Unknown WoL configuration %02x\n",
1087 p_hwfn->cdev->wol_config);
1089 case QED_OV_WOL_DEFAULT:
1090 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1093 memset(&mb_params, 0, sizeof(mb_params));
1094 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1095 mb_params.param = wol_param;
1096 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1098 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1101 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1103 struct qed_mcp_mb_params mb_params;
1104 struct mcp_mac wol_mac;
1106 memset(&mb_params, 0, sizeof(mb_params));
1107 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1109 /* Set the primary MAC if WoL is enabled */
1110 if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1111 u8 *p_mac = p_hwfn->cdev->wol_mac;
1113 memset(&wol_mac, 0, sizeof(wol_mac));
1114 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1115 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1116 p_mac[4] << 8 | p_mac[5];
1119 (QED_MSG_SP | NETIF_MSG_IFDOWN),
1120 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1121 p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1123 mb_params.p_data_src = &wol_mac;
1124 mb_params.data_src_size = sizeof(wol_mac);
1127 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1130 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1131 struct qed_ptt *p_ptt)
1133 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1135 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1136 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1137 QED_PATH_ID(p_hwfn));
1138 u32 disabled_vfs[VF_MAX_STATIC / 32];
1143 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1144 mfw_path_offsize, path_addr);
1146 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1147 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1149 offsetof(struct public_path,
1152 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1153 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1154 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1157 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1158 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1161 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1162 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1164 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1166 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1167 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1169 struct qed_mcp_mb_params mb_params;
1173 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1174 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1175 "Acking VFs [%08x,...,%08x] - %08x\n",
1176 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1178 memset(&mb_params, 0, sizeof(mb_params));
1179 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1180 mb_params.p_data_src = vfs_to_ack;
1181 mb_params.data_src_size = VF_MAX_STATIC / 8;
1182 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1184 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1188 /* Clear the ACK bits */
1189 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1190 qed_wr(p_hwfn, p_ptt,
1192 offsetof(struct public_func, drv_ack_vf_disabled) +
1193 i * sizeof(u32), 0);
1198 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1199 struct qed_ptt *p_ptt)
1201 u32 transceiver_state;
1203 transceiver_state = qed_rd(p_hwfn, p_ptt,
1204 p_hwfn->mcp_info->port_addr +
1205 offsetof(struct public_port,
1209 (NETIF_MSG_HW | QED_MSG_SP),
1210 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1212 (u32)(p_hwfn->mcp_info->port_addr +
1213 offsetof(struct public_port, transceiver_data)));
1215 transceiver_state = GET_FIELD(transceiver_state,
1216 ETH_TRANSCEIVER_STATE);
1218 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1219 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1221 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1224 static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1225 struct qed_ptt *p_ptt,
1226 struct qed_mcp_link_state *p_link)
1228 u32 eee_status, val;
1230 p_link->eee_adv_caps = 0;
1231 p_link->eee_lp_adv_caps = 0;
1232 eee_status = qed_rd(p_hwfn,
1234 p_hwfn->mcp_info->port_addr +
1235 offsetof(struct public_port, eee_status));
1236 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1237 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1238 if (val & EEE_1G_ADV)
1239 p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1240 if (val & EEE_10G_ADV)
1241 p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1242 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1243 if (val & EEE_1G_ADV)
1244 p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1245 if (val & EEE_10G_ADV)
1246 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1249 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1250 struct qed_ptt *p_ptt,
1251 struct public_func *p_data, int pfid)
1253 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1255 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1259 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1260 memset(p_data, 0, sizeof(*p_data));
1262 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1263 for (i = 0; i < size / sizeof(u32); i++)
1264 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1265 func_addr + (i << 2));
1269 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1270 struct public_func *p_shmem_info)
1272 struct qed_mcp_function_info *p_info;
1274 p_info = &p_hwfn->mcp_info->func_info;
1276 p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1277 FUNC_MF_CFG_MIN_BW);
1278 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1280 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1281 p_info->bandwidth_min);
1282 p_info->bandwidth_min = 1;
1285 p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1286 FUNC_MF_CFG_MAX_BW);
1287 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1289 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1290 p_info->bandwidth_max);
1291 p_info->bandwidth_max = 100;
1295 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1296 struct qed_ptt *p_ptt, bool b_reset)
1298 struct qed_mcp_link_state *p_link;
1302 /* Prevent SW/attentions from doing this at the same time */
1303 spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1305 p_link = &p_hwfn->mcp_info->link_output;
1306 memset(p_link, 0, sizeof(*p_link));
1308 status = qed_rd(p_hwfn, p_ptt,
1309 p_hwfn->mcp_info->port_addr +
1310 offsetof(struct public_port, link_status));
1311 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1312 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1314 (u32)(p_hwfn->mcp_info->port_addr +
1315 offsetof(struct public_port, link_status)));
1317 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1318 "Resetting link indications\n");
1322 if (p_hwfn->b_drv_link_init) {
1323 /* Link indication with modern MFW arrives as per-PF
1326 if (p_hwfn->mcp_info->capabilities &
1327 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1328 struct public_func shmem_info;
1330 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1332 p_link->link_up = !!(shmem_info.status &
1333 FUNC_STATUS_VIRTUAL_LINK_UP);
1334 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1335 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1336 "Virtual link_up = %d\n", p_link->link_up);
1338 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1339 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1340 "Physical link_up = %d\n", p_link->link_up);
1343 p_link->link_up = false;
1346 p_link->full_duplex = true;
1347 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1348 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1349 p_link->speed = 100000;
1351 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1352 p_link->speed = 50000;
1354 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1355 p_link->speed = 40000;
1357 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1358 p_link->speed = 25000;
1360 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1361 p_link->speed = 20000;
1363 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1364 p_link->speed = 10000;
1366 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1367 p_link->full_duplex = false;
1369 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1370 p_link->speed = 1000;
1374 p_link->link_up = 0;
1377 if (p_link->link_up && p_link->speed)
1378 p_link->line_speed = p_link->speed;
1380 p_link->line_speed = 0;
1382 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1383 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1385 /* Max bandwidth configuration */
1386 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1388 /* Min bandwidth configuration */
1389 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1390 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1391 p_link->min_pf_rate);
1393 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1394 p_link->an_complete = !!(status &
1395 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1396 p_link->parallel_detection = !!(status &
1397 LINK_STATUS_PARALLEL_DETECTION_USED);
1398 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1400 p_link->partner_adv_speed |=
1401 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1402 QED_LINK_PARTNER_SPEED_1G_FD : 0;
1403 p_link->partner_adv_speed |=
1404 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1405 QED_LINK_PARTNER_SPEED_1G_HD : 0;
1406 p_link->partner_adv_speed |=
1407 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1408 QED_LINK_PARTNER_SPEED_10G : 0;
1409 p_link->partner_adv_speed |=
1410 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1411 QED_LINK_PARTNER_SPEED_20G : 0;
1412 p_link->partner_adv_speed |=
1413 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1414 QED_LINK_PARTNER_SPEED_25G : 0;
1415 p_link->partner_adv_speed |=
1416 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1417 QED_LINK_PARTNER_SPEED_40G : 0;
1418 p_link->partner_adv_speed |=
1419 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1420 QED_LINK_PARTNER_SPEED_50G : 0;
1421 p_link->partner_adv_speed |=
1422 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1423 QED_LINK_PARTNER_SPEED_100G : 0;
1425 p_link->partner_tx_flow_ctrl_en =
1426 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1427 p_link->partner_rx_flow_ctrl_en =
1428 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1430 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1431 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1432 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1434 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1435 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1437 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1438 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1441 p_link->partner_adv_pause = 0;
1444 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1446 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1447 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1449 qed_link_update(p_hwfn, p_ptt);
1451 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1454 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1456 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1457 struct qed_mcp_mb_params mb_params;
1458 struct eth_phy_cfg phy_cfg;
1462 /* Set the shmem configuration according to params */
1463 memset(&phy_cfg, 0, sizeof(phy_cfg));
1464 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1465 if (!params->speed.autoneg)
1466 phy_cfg.speed = params->speed.forced_speed;
1467 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1468 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1469 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1470 phy_cfg.adv_speed = params->speed.advertised_speeds;
1471 phy_cfg.loopback_mode = params->loopback_mode;
1473 /* There are MFWs that share this capability regardless of whether
1474 * this is feasible or not. And given that at the very least adv_caps
1475 * would be set internally by qed, we want to make sure LFA would
1478 if ((p_hwfn->mcp_info->capabilities &
1479 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1480 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1481 if (params->eee.tx_lpi_enable)
1482 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1483 if (params->eee.adv_caps & QED_EEE_1G_ADV)
1484 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1485 if (params->eee.adv_caps & QED_EEE_10G_ADV)
1486 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1487 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1488 EEE_TX_TIMER_USEC_OFFSET) &
1489 EEE_TX_TIMER_USEC_MASK;
1492 p_hwfn->b_drv_link_init = b_up;
1495 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1496 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1500 phy_cfg.loopback_mode,
1501 phy_cfg.feature_config_flags);
1503 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1504 "Resetting link\n");
1507 memset(&mb_params, 0, sizeof(mb_params));
1508 mb_params.cmd = cmd;
1509 mb_params.p_data_src = &phy_cfg;
1510 mb_params.data_src_size = sizeof(phy_cfg);
1511 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1513 /* if mcp fails to respond we must abort */
1515 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1519 /* Mimic link-change attention, done for several reasons:
1520 * - On reset, there's no guarantee MFW would trigger
1522 * - On initialization, older MFWs might not indicate link change
1523 * during LFA, so we'll never get an UP indication.
1525 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1530 u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1531 struct qed_ptt *p_ptt)
1533 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1535 if (IS_VF(p_hwfn->cdev))
1538 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1540 path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1541 path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1543 proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1545 offsetof(struct public_path, process_kill)) &
1546 PROCESS_KILL_COUNTER_MASK;
1548 return proc_kill_cnt;
1551 static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1552 struct qed_ptt *p_ptt)
1554 struct qed_dev *cdev = p_hwfn->cdev;
1557 /* Prevent possible attentions/interrupts during the recovery handling
1558 * and till its load phase, during which they will be re-enabled.
1560 qed_int_igu_disable_int(p_hwfn, p_ptt);
1562 DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1564 /* The following operations should be done once, and thus in CMT mode
1565 * are carried out by only the first HW function.
1567 if (p_hwfn != QED_LEADING_HWFN(cdev))
1570 if (cdev->recov_in_prog) {
1572 "Ignoring the indication since a recovery process is already in progress\n");
1576 cdev->recov_in_prog = true;
1578 proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1579 DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1581 qed_schedule_recovery_handler(p_hwfn);
1584 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1585 struct qed_ptt *p_ptt,
1586 enum MFW_DRV_MSG_TYPE type)
1588 enum qed_mcp_protocol_type stats_type;
1589 union qed_mcp_protocol_stats stats;
1590 struct qed_mcp_mb_params mb_params;
1594 case MFW_DRV_MSG_GET_LAN_STATS:
1595 stats_type = QED_MCP_LAN_STATS;
1596 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1598 case MFW_DRV_MSG_GET_FCOE_STATS:
1599 stats_type = QED_MCP_FCOE_STATS;
1600 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1602 case MFW_DRV_MSG_GET_ISCSI_STATS:
1603 stats_type = QED_MCP_ISCSI_STATS;
1604 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1606 case MFW_DRV_MSG_GET_RDMA_STATS:
1607 stats_type = QED_MCP_RDMA_STATS;
1608 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1611 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1615 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1617 memset(&mb_params, 0, sizeof(mb_params));
1618 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1619 mb_params.param = hsi_param;
1620 mb_params.p_data_src = &stats;
1621 mb_params.data_src_size = sizeof(stats);
1622 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1625 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1627 struct qed_mcp_function_info *p_info;
1628 struct public_func shmem_info;
1629 u32 resp = 0, param = 0;
1631 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1633 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1635 p_info = &p_hwfn->mcp_info->func_info;
1637 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1638 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1640 /* Acknowledge the MFW */
1641 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1645 static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1647 struct public_func shmem_info;
1648 u32 resp = 0, param = 0;
1650 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1652 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1653 FUNC_MF_CFG_OV_STAG_MASK;
1654 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1655 if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1656 if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1657 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1658 p_hwfn->hw_info.ovlan);
1659 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1661 /* Configure DB to add external vlan to EDPM packets */
1662 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1663 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1664 p_hwfn->hw_info.ovlan);
1666 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1667 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1668 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1669 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1672 qed_sp_pf_update_stag(p_hwfn);
1675 DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1676 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1678 /* Acknowledge the MFW */
1679 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1683 static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
1684 struct qed_ptt *p_ptt)
1686 /* A single notification should be sent to upper driver in CMT mode */
1687 if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1690 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
1691 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1694 struct qed_mdump_cmd_params {
1704 qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
1705 struct qed_ptt *p_ptt,
1706 struct qed_mdump_cmd_params *p_mdump_cmd_params)
1708 struct qed_mcp_mb_params mb_params;
1711 memset(&mb_params, 0, sizeof(mb_params));
1712 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1713 mb_params.param = p_mdump_cmd_params->cmd;
1714 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1715 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1716 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1717 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1718 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1722 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1724 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1726 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1727 p_mdump_cmd_params->cmd);
1729 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1731 "The mdump command is not supported by the MFW\n");
1738 static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1740 struct qed_mdump_cmd_params mdump_cmd_params;
1742 memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1743 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1745 return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1749 qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
1750 struct qed_ptt *p_ptt,
1751 struct mdump_retain_data_stc *p_mdump_retain)
1753 struct qed_mdump_cmd_params mdump_cmd_params;
1756 memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1757 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1758 mdump_cmd_params.p_data_dst = p_mdump_retain;
1759 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
1761 rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1765 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1767 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1768 mdump_cmd_params.mcp_resp);
1775 static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
1776 struct qed_ptt *p_ptt)
1778 struct mdump_retain_data_stc mdump_retain;
1781 /* In CMT mode - no need for more than a single acknowledgment to the
1782 * MFW, and no more than a single notification to the upper driver.
1784 if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1787 rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1788 if (rc == 0 && mdump_retain.valid)
1790 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1792 mdump_retain.pf, mdump_retain.status);
1795 "The MFW notified that a critical error occurred in the device\n");
1798 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1799 qed_mcp_mdump_ack(p_hwfn, p_ptt);
1801 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
1804 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1806 struct public_func shmem_info;
1809 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1812 memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1813 port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1814 offsetof(struct public_port, oem_cfg_port));
1815 val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1816 OEM_CFG_CHANNEL_TYPE_OFFSET;
1817 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1819 "Incorrect UFP Channel type %d port_id 0x%02x\n",
1820 val, MFW_PORT(p_hwfn));
1822 val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1823 if (val == OEM_CFG_SCHED_TYPE_ETS) {
1824 p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1825 } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1826 p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1828 p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1830 "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1831 val, MFW_PORT(p_hwfn));
1834 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1835 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1836 OEM_CFG_FUNC_TC_OFFSET;
1837 p_hwfn->ufp_info.tc = (u8)val;
1838 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1839 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1840 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1841 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1842 } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1843 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1845 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1847 "Unknown Host priority control %d port_id 0x%02x\n",
1848 val, MFW_PORT(p_hwfn));
1852 "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1853 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1854 p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
1858 qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1860 qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1862 if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1863 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1864 qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
1865 p_hwfn->ufp_info.tc);
1867 qed_qm_reconf(p_hwfn, p_ptt);
1868 } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1869 /* Merge UFP TC with the dcbx TC data */
1870 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1871 QED_DCBX_OPERATIONAL_MIB);
1873 DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1877 /* update storm FW with negotiation results */
1878 qed_sp_pf_update_ufp(p_hwfn);
1880 /* update stag pcp value */
1881 qed_sp_pf_update_stag(p_hwfn);
1886 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1887 struct qed_ptt *p_ptt)
1889 struct qed_mcp_info *info = p_hwfn->mcp_info;
1894 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1896 /* Read Messages from MFW */
1897 qed_mcp_read_mb(p_hwfn, p_ptt);
1899 /* Compare current messages to old ones */
1900 for (i = 0; i < info->mfw_mb_length; i++) {
1901 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1906 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1907 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1908 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1911 case MFW_DRV_MSG_LINK_CHANGE:
1912 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1914 case MFW_DRV_MSG_VF_DISABLED:
1915 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1917 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1918 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1919 QED_DCBX_REMOTE_LLDP_MIB);
1921 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1922 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1923 QED_DCBX_REMOTE_MIB);
1925 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1926 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1927 QED_DCBX_OPERATIONAL_MIB);
1929 case MFW_DRV_MSG_OEM_CFG_UPDATE:
1930 qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
1932 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1933 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1935 case MFW_DRV_MSG_ERROR_RECOVERY:
1936 qed_mcp_handle_process_kill(p_hwfn, p_ptt);
1938 case MFW_DRV_MSG_GET_LAN_STATS:
1939 case MFW_DRV_MSG_GET_FCOE_STATS:
1940 case MFW_DRV_MSG_GET_ISCSI_STATS:
1941 case MFW_DRV_MSG_GET_RDMA_STATS:
1942 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1944 case MFW_DRV_MSG_BW_UPDATE:
1945 qed_mcp_update_bw(p_hwfn, p_ptt);
1947 case MFW_DRV_MSG_S_TAG_UPDATE:
1948 qed_mcp_update_stag(p_hwfn, p_ptt);
1950 case MFW_DRV_MSG_FAILURE_DETECTED:
1951 qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
1953 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1954 qed_mcp_handle_critical_error(p_hwfn, p_ptt);
1956 case MFW_DRV_MSG_GET_TLV_REQ:
1957 qed_mfw_tlv_req(p_hwfn);
1960 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1965 /* ACK everything */
1966 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1967 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1969 /* MFW expect answer in BE, so we force write in that format */
1970 qed_wr(p_hwfn, p_ptt,
1971 info->mfw_mb_addr + sizeof(u32) +
1972 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1973 sizeof(u32) + i * sizeof(u32),
1979 "Received an MFW message indication but no new message!\n");
1983 /* Copy the new mfw messages into the shadow */
1984 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1989 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1990 struct qed_ptt *p_ptt,
1991 u32 *p_mfw_ver, u32 *p_running_bundle_id)
1995 if (IS_VF(p_hwfn->cdev)) {
1996 if (p_hwfn->vf_iov_info) {
1997 struct pfvf_acquire_resp_tlv *p_resp;
1999 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2000 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2005 "VF requested MFW version prior to ACQUIRE\n");
2010 global_offsize = qed_rd(p_hwfn, p_ptt,
2011 SECTION_OFFSIZE_ADDR(p_hwfn->
2012 mcp_info->public_base,
2015 qed_rd(p_hwfn, p_ptt,
2016 SECTION_ADDR(global_offsize,
2017 0) + offsetof(struct public_global, mfw_ver));
2019 if (p_running_bundle_id != NULL) {
2020 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
2021 SECTION_ADDR(global_offsize, 0) +
2022 offsetof(struct public_global,
2023 running_bundle_id));
2029 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
2030 struct qed_ptt *p_ptt, u32 *p_mbi_ver)
2032 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2034 if (IS_VF(p_hwfn->cdev))
2037 /* Read the address of the nvm_cfg */
2038 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2039 if (!nvm_cfg_addr) {
2040 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
2044 /* Read the offset of nvm_cfg1 */
2045 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2047 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2048 offsetof(struct nvm_cfg1, glob) +
2049 offsetof(struct nvm_cfg1_glob, mbi_version);
2050 *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
2052 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2053 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2054 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2059 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
2060 struct qed_ptt *p_ptt, u32 *p_media_type)
2062 *p_media_type = MEDIA_UNSPECIFIED;
2064 if (IS_VF(p_hwfn->cdev))
2067 if (!qed_mcp_is_init(p_hwfn)) {
2068 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2073 *p_media_type = MEDIA_UNSPECIFIED;
2077 *p_media_type = qed_rd(p_hwfn, p_ptt,
2078 p_hwfn->mcp_info->port_addr +
2079 offsetof(struct public_port,
2085 int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
2086 struct qed_ptt *p_ptt,
2087 u32 *p_transceiver_state,
2088 u32 *p_transceiver_type)
2090 u32 transceiver_info;
2092 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2093 *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2095 if (IS_VF(p_hwfn->cdev))
2098 if (!qed_mcp_is_init(p_hwfn)) {
2099 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2103 transceiver_info = qed_rd(p_hwfn, p_ptt,
2104 p_hwfn->mcp_info->port_addr +
2105 offsetof(struct public_port,
2108 *p_transceiver_state = (transceiver_info &
2109 ETH_TRANSCEIVER_STATE_MASK) >>
2110 ETH_TRANSCEIVER_STATE_OFFSET;
2112 if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2113 *p_transceiver_type = (transceiver_info &
2114 ETH_TRANSCEIVER_TYPE_MASK) >>
2115 ETH_TRANSCEIVER_TYPE_OFFSET;
2117 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2121 static bool qed_is_transceiver_ready(u32 transceiver_state,
2122 u32 transceiver_type)
2124 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2125 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2126 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2132 int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2133 struct qed_ptt *p_ptt, u32 *p_speed_mask)
2135 u32 transceiver_type, transceiver_state;
2138 ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2143 if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2147 switch (transceiver_type) {
2148 case ETH_TRANSCEIVER_TYPE_1G_LX:
2149 case ETH_TRANSCEIVER_TYPE_1G_SX:
2150 case ETH_TRANSCEIVER_TYPE_1G_PCC:
2151 case ETH_TRANSCEIVER_TYPE_1G_ACC:
2152 case ETH_TRANSCEIVER_TYPE_1000BASET:
2153 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2155 case ETH_TRANSCEIVER_TYPE_10G_SR:
2156 case ETH_TRANSCEIVER_TYPE_10G_LR:
2157 case ETH_TRANSCEIVER_TYPE_10G_LRM:
2158 case ETH_TRANSCEIVER_TYPE_10G_ER:
2159 case ETH_TRANSCEIVER_TYPE_10G_PCC:
2160 case ETH_TRANSCEIVER_TYPE_10G_ACC:
2161 case ETH_TRANSCEIVER_TYPE_4x10G:
2162 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2164 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2165 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2166 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2167 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2168 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2169 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2171 case ETH_TRANSCEIVER_TYPE_100G_AOC:
2172 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2173 case ETH_TRANSCEIVER_TYPE_100G_LR4:
2174 case ETH_TRANSCEIVER_TYPE_100G_ER4:
2175 case ETH_TRANSCEIVER_TYPE_100G_ACC:
2177 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2178 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2180 case ETH_TRANSCEIVER_TYPE_25G_SR:
2181 case ETH_TRANSCEIVER_TYPE_25G_LR:
2182 case ETH_TRANSCEIVER_TYPE_25G_AOC:
2183 case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2184 case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2185 case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2186 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2188 case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2189 case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2190 case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2191 case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2192 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2193 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2194 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2196 case ETH_TRANSCEIVER_TYPE_40G_CR4:
2197 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2198 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2199 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2200 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2202 case ETH_TRANSCEIVER_TYPE_100G_CR4:
2203 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2205 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2206 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2207 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2208 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2209 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2210 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2211 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2213 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2214 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2215 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2217 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2218 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2219 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2220 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2222 case ETH_TRANSCEIVER_TYPE_XLPPI:
2223 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2225 case ETH_TRANSCEIVER_TYPE_10G_BASET:
2226 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2227 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2230 DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2232 *p_speed_mask = 0xff;
2239 int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2240 struct qed_ptt *p_ptt, u32 *p_board_config)
2242 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2244 if (IS_VF(p_hwfn->cdev))
2247 if (!qed_mcp_is_init(p_hwfn)) {
2248 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2252 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2256 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2257 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2258 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2259 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2260 *p_board_config = qed_rd(p_hwfn, p_ptt,
2262 offsetof(struct nvm_cfg1_port,
2268 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2270 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
2271 enum qed_pci_personality *p_proto)
2273 /* There wasn't ever a legacy MFW that published iwarp.
2274 * So at this point, this is either plain l2 or RoCE.
2276 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
2277 *p_proto = QED_PCI_ETH_ROCE;
2279 *p_proto = QED_PCI_ETH;
2281 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2282 "According to Legacy capabilities, L2 personality is %08x\n",
2287 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
2288 struct qed_ptt *p_ptt,
2289 enum qed_pci_personality *p_proto)
2291 u32 resp = 0, param = 0;
2294 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2295 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2298 if (resp != FW_MSG_CODE_OK) {
2299 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2300 "MFW lacks support for command; Returns %08x\n",
2306 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2307 *p_proto = QED_PCI_ETH;
2309 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2310 *p_proto = QED_PCI_ETH_ROCE;
2312 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2313 *p_proto = QED_PCI_ETH_IWARP;
2315 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2316 *p_proto = QED_PCI_ETH_RDMA;
2320 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2327 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2328 (u32) *p_proto, resp, param);
2333 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
2334 struct public_func *p_info,
2335 struct qed_ptt *p_ptt,
2336 enum qed_pci_personality *p_proto)
2340 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2341 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2342 if (!IS_ENABLED(CONFIG_QED_RDMA))
2343 *p_proto = QED_PCI_ETH;
2344 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
2345 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2347 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2348 *p_proto = QED_PCI_ISCSI;
2350 case FUNC_MF_CFG_PROTOCOL_FCOE:
2351 *p_proto = QED_PCI_FCOE;
2353 case FUNC_MF_CFG_PROTOCOL_ROCE:
2354 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
2363 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
2364 struct qed_ptt *p_ptt)
2366 struct qed_mcp_function_info *info;
2367 struct public_func shmem_info;
2369 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2370 info = &p_hwfn->mcp_info->func_info;
2372 info->pause_on_host = (shmem_info.config &
2373 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2375 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2377 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2378 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2382 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
2384 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2385 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2386 info->mac[1] = (u8)(shmem_info.mac_upper);
2387 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2388 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2389 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2390 info->mac[5] = (u8)(shmem_info.mac_lower);
2392 /* Store primary MAC for later possible WoL */
2393 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
2395 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
2398 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2399 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2400 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2401 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2403 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2405 info->mtu = (u16)shmem_info.mtu_size;
2407 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
2408 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
2409 if (qed_mcp_is_init(p_hwfn)) {
2410 u32 resp = 0, param = 0;
2413 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2414 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2417 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2418 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
2421 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2422 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2423 info->pause_on_host, info->protocol,
2424 info->bandwidth_min, info->bandwidth_max,
2425 info->mac[0], info->mac[1], info->mac[2],
2426 info->mac[3], info->mac[4], info->mac[5],
2427 info->wwn_port, info->wwn_node,
2428 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
2433 struct qed_mcp_link_params
2434 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
2436 if (!p_hwfn || !p_hwfn->mcp_info)
2438 return &p_hwfn->mcp_info->link_input;
2441 struct qed_mcp_link_state
2442 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
2444 if (!p_hwfn || !p_hwfn->mcp_info)
2446 return &p_hwfn->mcp_info->link_output;
2449 struct qed_mcp_link_capabilities
2450 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
2452 if (!p_hwfn || !p_hwfn->mcp_info)
2454 return &p_hwfn->mcp_info->link_capabilities;
2457 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2459 u32 resp = 0, param = 0;
2462 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2463 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2465 /* Wait for the drain to complete before returning */
2471 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
2472 struct qed_ptt *p_ptt, u32 *p_flash_size)
2476 if (IS_VF(p_hwfn->cdev))
2479 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2480 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2481 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2482 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2484 *p_flash_size = flash_size;
2489 int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2491 struct qed_dev *cdev = p_hwfn->cdev;
2493 if (cdev->recov_in_prog) {
2495 "Avoid triggering a recovery since such a process is already in progress\n");
2499 DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2500 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2505 #define QED_RECOVERY_PROLOG_SLEEP_MS 100
2507 int qed_recovery_prolog(struct qed_dev *cdev)
2509 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2510 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2513 /* Allow ongoing PCIe transactions to complete */
2514 msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2516 /* Clear the PF's internal FID_enable in the PXP */
2517 rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2520 "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2527 qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
2528 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2530 u32 resp = 0, param = 0, rc_param = 0;
2533 /* Only Leader can configure MSIX, and need to take CMT into account */
2534 if (!IS_LEAD_HWFN(p_hwfn))
2536 num *= p_hwfn->cdev->num_hwfns;
2538 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2539 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2540 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2541 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2543 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2546 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2547 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2550 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2551 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2559 qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2560 struct qed_ptt *p_ptt, u8 num)
2562 u32 resp = 0, param = num, rc_param = 0;
2565 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2566 param, &resp, &rc_param);
2568 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2569 DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2572 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2573 "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2579 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2580 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2582 if (QED_IS_BB(p_hwfn->cdev))
2583 return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2585 return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2589 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2590 struct qed_ptt *p_ptt,
2591 struct qed_mcp_drv_version *p_ver)
2593 struct qed_mcp_mb_params mb_params;
2594 struct drv_version_stc drv_version;
2599 memset(&drv_version, 0, sizeof(drv_version));
2600 drv_version.version = p_ver->version;
2601 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2602 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2603 *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2606 memset(&mb_params, 0, sizeof(mb_params));
2607 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2608 mb_params.p_data_src = &drv_version;
2609 mb_params.data_src_size = sizeof(drv_version);
2610 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2612 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2617 /* A maximal 100 msec waiting time for the MCP to halt */
2618 #define QED_MCP_HALT_SLEEP_MS 10
2619 #define QED_MCP_HALT_MAX_RETRIES 10
2621 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2623 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2626 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2629 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2634 msleep(QED_MCP_HALT_SLEEP_MS);
2635 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2636 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2638 } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2640 if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2642 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2643 qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2647 qed_mcp_cmd_set_blocking(p_hwfn, true);
2652 #define QED_MCP_RESUME_SLEEP_MS 10
2654 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2656 u32 cpu_mode, cpu_state;
2658 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2660 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2661 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2662 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2663 msleep(QED_MCP_RESUME_SLEEP_MS);
2664 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2666 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2668 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2669 cpu_mode, cpu_state);
2673 qed_mcp_cmd_set_blocking(p_hwfn, false);
2678 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2679 struct qed_ptt *p_ptt,
2680 enum qed_ov_client client)
2682 u32 resp = 0, param = 0;
2687 case QED_OV_CLIENT_DRV:
2688 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2690 case QED_OV_CLIENT_USER:
2691 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2693 case QED_OV_CLIENT_VENDOR_SPEC:
2694 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2697 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2701 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2702 drv_mb_param, &resp, ¶m);
2704 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2709 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2710 struct qed_ptt *p_ptt,
2711 enum qed_ov_driver_state drv_state)
2713 u32 resp = 0, param = 0;
2717 switch (drv_state) {
2718 case QED_OV_DRIVER_STATE_NOT_LOADED:
2719 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2721 case QED_OV_DRIVER_STATE_DISABLED:
2722 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2724 case QED_OV_DRIVER_STATE_ACTIVE:
2725 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2728 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2732 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2733 drv_mb_param, &resp, ¶m);
2735 DP_ERR(p_hwfn, "Failed to send driver state\n");
2740 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2741 struct qed_ptt *p_ptt, u16 mtu)
2743 u32 resp = 0, param = 0;
2747 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2748 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2749 drv_mb_param, &resp, ¶m);
2751 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2756 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2757 struct qed_ptt *p_ptt, u8 *mac)
2759 struct qed_mcp_mb_params mb_params;
2763 memset(&mb_params, 0, sizeof(mb_params));
2764 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2765 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2766 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2767 mb_params.param |= MCP_PF_ID(p_hwfn);
2769 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2770 * in 32-bit granularity.
2771 * So the MAC has to be set in native order [and not byte order],
2772 * otherwise it would be read incorrectly by MFW after swap.
2774 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2775 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2777 mb_params.p_data_src = (u8 *)mfw_mac;
2778 mb_params.data_src_size = 8;
2779 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2781 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2783 /* Store primary MAC for later possible WoL */
2784 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2789 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2790 struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2792 u32 resp = 0, param = 0;
2796 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2797 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2798 "Can't change WoL configuration when WoL isn't supported\n");
2803 case QED_OV_WOL_DEFAULT:
2804 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2806 case QED_OV_WOL_DISABLED:
2807 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2809 case QED_OV_WOL_ENABLED:
2810 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2813 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2817 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2818 drv_mb_param, &resp, ¶m);
2820 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2822 /* Store the WoL update for a future unload */
2823 p_hwfn->cdev->wol_config = (u8)wol;
2828 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2829 struct qed_ptt *p_ptt,
2830 enum qed_ov_eswitch eswitch)
2832 u32 resp = 0, param = 0;
2837 case QED_OV_ESWITCH_NONE:
2838 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2840 case QED_OV_ESWITCH_VEB:
2841 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2843 case QED_OV_ESWITCH_VEPA:
2844 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2847 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2851 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2852 drv_mb_param, &resp, ¶m);
2854 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2859 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2860 struct qed_ptt *p_ptt, enum qed_led_mode mode)
2862 u32 resp = 0, param = 0, drv_mb_param;
2866 case QED_LED_MODE_ON:
2867 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2869 case QED_LED_MODE_OFF:
2870 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2872 case QED_LED_MODE_RESTORE:
2873 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2876 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2880 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2881 drv_mb_param, &resp, ¶m);
2886 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2887 struct qed_ptt *p_ptt, u32 mask_parities)
2889 u32 resp = 0, param = 0;
2892 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2893 mask_parities, &resp, ¶m);
2897 "MCP response failure for mask parities, aborting\n");
2898 } else if (resp != FW_MSG_CODE_OK) {
2900 "MCP did not acknowledge mask parity request. Old MFW?\n");
2907 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2909 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2910 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2911 u32 resp = 0, resp_param = 0;
2912 struct qed_ptt *p_ptt;
2915 p_ptt = qed_ptt_acquire(p_hwfn);
2919 while (bytes_left > 0) {
2920 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2922 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2923 DRV_MSG_CODE_NVM_READ_NVRAM,
2926 DRV_MB_PARAM_NVM_LEN_OFFSET),
2929 (u32 *)(p_buf + offset));
2931 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2932 DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2936 /* This can be a lengthy process, and it's possible scheduler
2937 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2939 if (bytes_left % 0x1000 <
2940 (bytes_left - read_len) % 0x1000)
2941 usleep_range(1000, 2000);
2944 bytes_left -= read_len;
2947 cdev->mcp_nvm_resp = resp;
2948 qed_ptt_release(p_hwfn, p_ptt);
2953 int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
2955 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2956 struct qed_ptt *p_ptt;
2958 p_ptt = qed_ptt_acquire(p_hwfn);
2962 memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
2963 qed_ptt_release(p_hwfn, p_ptt);
2968 int qed_mcp_nvm_write(struct qed_dev *cdev,
2969 u32 cmd, u32 addr, u8 *p_buf, u32 len)
2971 u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
2972 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2973 struct qed_ptt *p_ptt;
2976 p_ptt = qed_ptt_acquire(p_hwfn);
2981 case QED_PUT_FILE_BEGIN:
2982 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2984 case QED_PUT_FILE_DATA:
2985 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2987 case QED_NVM_WRITE_NVRAM:
2988 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2991 DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
2996 buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
2997 while (buf_idx < len) {
2998 if (cmd == QED_PUT_FILE_BEGIN)
3001 nvm_offset = ((buf_size <<
3002 DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
3004 rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3005 &resp, ¶m, buf_size,
3006 (u32 *)&p_buf[buf_idx]);
3008 DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
3009 resp = FW_MSG_CODE_ERROR;
3013 if (resp != FW_MSG_CODE_OK &&
3014 resp != FW_MSG_CODE_NVM_OK &&
3015 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3017 "nvm write failed, resp = 0x%08x\n", resp);
3022 /* This can be a lengthy process, and it's possible scheduler
3023 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
3025 if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
3026 usleep_range(1000, 2000);
3028 /* For MBI upgrade, MFW response includes the next buffer offset
3029 * to be delivered to MFW.
3031 if (param && cmd == QED_PUT_FILE_DATA) {
3032 buf_idx = QED_MFW_GET_FIELD(param,
3033 FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
3034 buf_size = QED_MFW_GET_FIELD(param,
3035 FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
3037 buf_idx += buf_size;
3038 buf_size = min_t(u32, (len - buf_idx),
3039 MCP_DRV_NVM_BUF_LEN);
3043 cdev->mcp_nvm_resp = resp;
3045 qed_ptt_release(p_hwfn, p_ptt);
3050 int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3051 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
3053 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
3057 nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
3058 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
3059 nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
3060 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
3065 while (bytes_left > 0) {
3066 bytes_to_copy = min_t(u32, bytes_left,
3067 MAX_I2C_TRANSACTION_SIZE);
3068 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3069 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3070 nvm_offset |= ((addr + offset) <<
3071 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
3072 DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
3073 nvm_offset |= (bytes_to_copy <<
3074 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
3075 DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
3076 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3077 DRV_MSG_CODE_TRANSCEIVER_READ,
3078 nvm_offset, &resp, ¶m, &buf_size,
3079 (u32 *)(p_buf + offset));
3082 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3087 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3089 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3093 bytes_left -= buf_size;
3099 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3101 u32 drv_mb_param = 0, rsp, param;
3104 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3105 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3107 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3108 drv_mb_param, &rsp, ¶m);
3113 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3114 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3120 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3122 u32 drv_mb_param, rsp, param;
3125 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3126 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3128 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3129 drv_mb_param, &rsp, ¶m);
3134 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3135 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3141 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
3142 struct qed_ptt *p_ptt,
3145 u32 drv_mb_param = 0, rsp;
3148 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3149 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3151 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3152 drv_mb_param, &rsp, num_images);
3156 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3162 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
3163 struct qed_ptt *p_ptt,
3164 struct bist_nvm_image_att *p_image_att,
3167 u32 buf_size = 0, param, resp = 0, resp_param = 0;
3170 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3171 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
3172 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
3174 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3175 DRV_MSG_CODE_BIST_TEST, param,
3178 (u32 *)p_image_att);
3182 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3183 (p_image_att->return_code != 1))
3189 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
3191 struct qed_nvm_image_info nvm_info;
3192 struct qed_ptt *p_ptt;
3196 if (p_hwfn->nvm_info.valid)
3199 p_ptt = qed_ptt_acquire(p_hwfn);
3201 DP_ERR(p_hwfn, "failed to acquire ptt\n");
3205 /* Acquire from MFW the amount of available images */
3206 nvm_info.num_images = 0;
3207 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
3208 p_ptt, &nvm_info.num_images);
3209 if (rc == -EOPNOTSUPP) {
3210 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3212 } else if (rc || !nvm_info.num_images) {
3213 DP_ERR(p_hwfn, "Failed getting number of images\n");
3217 nvm_info.image_att = kmalloc_array(nvm_info.num_images,
3218 sizeof(struct bist_nvm_image_att),
3220 if (!nvm_info.image_att) {
3225 /* Iterate over images and get their attributes */
3226 for (i = 0; i < nvm_info.num_images; i++) {
3227 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3228 &nvm_info.image_att[i], i);
3231 "Failed getting image index %d attributes\n", i);
3235 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
3236 nvm_info.image_att[i].len);
3239 /* Update hwfn's nvm_info */
3240 if (nvm_info.num_images) {
3241 p_hwfn->nvm_info.num_images = nvm_info.num_images;
3242 kfree(p_hwfn->nvm_info.image_att);
3243 p_hwfn->nvm_info.image_att = nvm_info.image_att;
3244 p_hwfn->nvm_info.valid = true;
3247 qed_ptt_release(p_hwfn, p_ptt);
3251 kfree(nvm_info.image_att);
3253 qed_ptt_release(p_hwfn, p_ptt);
3257 void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
3259 kfree(p_hwfn->nvm_info.image_att);
3260 p_hwfn->nvm_info.image_att = NULL;
3261 p_hwfn->nvm_info.valid = false;
3265 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3266 enum qed_nvm_images image_id,
3267 struct qed_nvm_image_att *p_image_att)
3269 enum nvm_image_type type;
3272 /* Translate image_id into MFW definitions */
3274 case QED_NVM_IMAGE_ISCSI_CFG:
3275 type = NVM_TYPE_ISCSI_CFG;
3277 case QED_NVM_IMAGE_FCOE_CFG:
3278 type = NVM_TYPE_FCOE_CFG;
3280 case QED_NVM_IMAGE_MDUMP:
3281 type = NVM_TYPE_MDUMP;
3283 case QED_NVM_IMAGE_NVM_CFG1:
3284 type = NVM_TYPE_NVM_CFG1;
3286 case QED_NVM_IMAGE_DEFAULT_CFG:
3287 type = NVM_TYPE_DEFAULT_CFG;
3289 case QED_NVM_IMAGE_NVM_META:
3290 type = NVM_TYPE_META;
3293 DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
3298 qed_mcp_nvm_info_populate(p_hwfn);
3299 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3300 if (type == p_hwfn->nvm_info.image_att[i].image_type)
3302 if (i == p_hwfn->nvm_info.num_images) {
3303 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3304 "Failed to find nvram image of type %08x\n",
3309 p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3310 p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3315 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
3316 enum qed_nvm_images image_id,
3317 u8 *p_buffer, u32 buffer_len)
3319 struct qed_nvm_image_att image_att;
3322 memset(p_buffer, 0, buffer_len);
3324 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3328 /* Validate sizes - both the image's and the supplied buffer's */
3329 if (image_att.length <= 4) {
3330 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3331 "Image [%d] is too small - only %d bytes\n",
3332 image_id, image_att.length);
3336 if (image_att.length > buffer_len) {
3339 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3340 image_id, image_att.length, buffer_len);
3344 return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
3345 p_buffer, image_att.length);
3348 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
3350 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3354 mfw_res_id = RESOURCE_NUM_SB_E;
3357 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3360 mfw_res_id = RESOURCE_NUM_VPORT_E;
3363 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3366 mfw_res_id = RESOURCE_NUM_PQ_E;
3369 mfw_res_id = RESOURCE_NUM_RL_E;
3373 /* Each VFC resource can accommodate both a MAC and a VLAN */
3374 mfw_res_id = RESOURCE_VFC_FILTER_E;
3377 mfw_res_id = RESOURCE_ILT_E;
3379 case QED_LL2_RAM_QUEUE:
3380 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3382 case QED_LL2_CTX_QUEUE:
3383 mfw_res_id = RESOURCE_LL2_CQS_E;
3385 case QED_RDMA_CNQ_RAM:
3387 /* CNQ/CMDQS are the same resource */
3388 mfw_res_id = RESOURCE_CQS_E;
3390 case QED_RDMA_STATS_QUEUE:
3391 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3394 mfw_res_id = RESOURCE_BDQ_E;
3403 #define QED_RESC_ALLOC_VERSION_MAJOR 2
3404 #define QED_RESC_ALLOC_VERSION_MINOR 0
3405 #define QED_RESC_ALLOC_VERSION \
3406 ((QED_RESC_ALLOC_VERSION_MAJOR << \
3407 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3408 (QED_RESC_ALLOC_VERSION_MINOR << \
3409 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3411 struct qed_resc_alloc_in_params {
3413 enum qed_resources res_id;
3417 struct qed_resc_alloc_out_params {
3428 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
3429 struct qed_ptt *p_ptt,
3430 struct qed_resc_alloc_in_params *p_in_params,
3431 struct qed_resc_alloc_out_params *p_out_params)
3433 struct qed_mcp_mb_params mb_params;
3434 struct resource_info mfw_resc_info;
3437 memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
3439 mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
3440 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3442 "Failed to match resource %d [%s] with the MFW resources\n",
3443 p_in_params->res_id,
3444 qed_hw_get_resc_name(p_in_params->res_id));
3448 switch (p_in_params->cmd) {
3449 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3450 mfw_resc_info.size = p_in_params->resc_max_val;
3452 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3455 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3460 memset(&mb_params, 0, sizeof(mb_params));
3461 mb_params.cmd = p_in_params->cmd;
3462 mb_params.param = QED_RESC_ALLOC_VERSION;
3463 mb_params.p_data_src = &mfw_resc_info;
3464 mb_params.data_src_size = sizeof(mfw_resc_info);
3465 mb_params.p_data_dst = mb_params.p_data_src;
3466 mb_params.data_dst_size = mb_params.data_src_size;
3470 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3472 p_in_params->res_id,
3473 qed_hw_get_resc_name(p_in_params->res_id),
3474 QED_MFW_GET_FIELD(mb_params.param,
3475 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3476 QED_MFW_GET_FIELD(mb_params.param,
3477 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3478 p_in_params->resc_max_val);
3480 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3484 p_out_params->mcp_resp = mb_params.mcp_resp;
3485 p_out_params->mcp_param = mb_params.mcp_param;
3486 p_out_params->resc_num = mfw_resc_info.size;
3487 p_out_params->resc_start = mfw_resc_info.offset;
3488 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3489 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3490 p_out_params->flags = mfw_resc_info.flags;
3494 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3495 QED_MFW_GET_FIELD(p_out_params->mcp_param,
3496 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3497 QED_MFW_GET_FIELD(p_out_params->mcp_param,
3498 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3499 p_out_params->resc_num,
3500 p_out_params->resc_start,
3501 p_out_params->vf_resc_num,
3502 p_out_params->vf_resc_start, p_out_params->flags);
3508 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
3509 struct qed_ptt *p_ptt,
3510 enum qed_resources res_id,
3511 u32 resc_max_val, u32 *p_mcp_resp)
3513 struct qed_resc_alloc_out_params out_params;
3514 struct qed_resc_alloc_in_params in_params;
3517 memset(&in_params, 0, sizeof(in_params));
3518 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3519 in_params.res_id = res_id;
3520 in_params.resc_max_val = resc_max_val;
3521 memset(&out_params, 0, sizeof(out_params));
3522 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3527 *p_mcp_resp = out_params.mcp_resp;
3533 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
3534 struct qed_ptt *p_ptt,
3535 enum qed_resources res_id,
3536 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
3538 struct qed_resc_alloc_out_params out_params;
3539 struct qed_resc_alloc_in_params in_params;
3542 memset(&in_params, 0, sizeof(in_params));
3543 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3544 in_params.res_id = res_id;
3545 memset(&out_params, 0, sizeof(out_params));
3546 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3551 *p_mcp_resp = out_params.mcp_resp;
3553 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3554 *p_resc_num = out_params.resc_num;
3555 *p_resc_start = out_params.resc_start;
3561 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3563 u32 mcp_resp, mcp_param;
3565 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3566 &mcp_resp, &mcp_param);
3569 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
3570 struct qed_ptt *p_ptt,
3571 u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
3575 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3576 p_mcp_resp, p_mcp_param);
3580 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3582 "The resource command is unsupported by the MFW\n");
3586 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3587 u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3590 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3599 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3600 struct qed_ptt *p_ptt,
3601 struct qed_resc_lock_params *p_params)
3603 u32 param = 0, mcp_resp, mcp_param;
3607 switch (p_params->timeout) {
3608 case QED_MCP_RESC_LOCK_TO_DEFAULT:
3609 opcode = RESOURCE_OPCODE_REQ;
3610 p_params->timeout = 0;
3612 case QED_MCP_RESC_LOCK_TO_NONE:
3613 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3614 p_params->timeout = 0;
3617 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3621 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3622 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3623 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3627 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3628 param, p_params->timeout, opcode, p_params->resource);
3630 /* Attempt to acquire the resource */
3631 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3635 /* Analyze the response */
3636 p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3637 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3641 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3642 mcp_param, opcode, p_params->owner);
3645 case RESOURCE_OPCODE_GNT:
3646 p_params->b_granted = true;
3648 case RESOURCE_OPCODE_BUSY:
3649 p_params->b_granted = false;
3653 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3662 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3663 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3669 /* No need for an interval before the first iteration */
3671 if (p_params->sleep_b4_retry) {
3672 u16 retry_interval_in_ms =
3673 DIV_ROUND_UP(p_params->retry_interval,
3676 msleep(retry_interval_in_ms);
3678 udelay(p_params->retry_interval);
3682 rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3686 if (p_params->b_granted)
3688 } while (retry_cnt++ < p_params->retry_num);
3694 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3695 struct qed_ptt *p_ptt,
3696 struct qed_resc_unlock_params *p_params)
3698 u32 param = 0, mcp_resp, mcp_param;
3702 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3703 : RESOURCE_OPCODE_RELEASE;
3704 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3705 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3707 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3708 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3709 param, opcode, p_params->resource);
3711 /* Attempt to release the resource */
3712 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3716 /* Analyze the response */
3717 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3719 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3720 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3724 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3726 "Resource unlock request for an already released resource [%d]\n",
3727 p_params->resource);
3729 case RESOURCE_OPCODE_RELEASED:
3730 p_params->b_released = true;
3732 case RESOURCE_OPCODE_WRONG_OWNER:
3733 p_params->b_released = false;
3737 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3745 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3746 struct qed_resc_unlock_params *p_unlock,
3748 resource, bool b_is_permanent)
3751 memset(p_lock, 0, sizeof(*p_lock));
3753 /* Permanent resources don't require aging, and there's no
3754 * point in trying to acquire them more than once since it's
3755 * unexpected another entity would release them.
3757 if (b_is_permanent) {
3758 p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3760 p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3761 p_lock->retry_interval =
3762 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3763 p_lock->sleep_b4_retry = true;
3766 p_lock->resource = resource;
3770 memset(p_unlock, 0, sizeof(*p_unlock));
3771 p_unlock->resource = resource;
3775 bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
3777 return !!(p_hwfn->mcp_info->capabilities &
3778 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3781 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3786 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3787 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3789 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3790 "MFW supported features: %08x\n",
3791 p_hwfn->mcp_info->capabilities);
3796 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3798 u32 mcp_resp, mcp_param, features;
3800 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3801 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
3803 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3804 features, &mcp_resp, &mcp_param);
3807 int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3809 struct qed_mcp_mb_params mb_params = {0};
3810 struct qed_dev *cdev = p_hwfn->cdev;
3811 u8 fir_valid, l2_valid;
3814 mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
3815 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3819 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3821 "The get_engine_config command is unsupported by the MFW\n");
3825 fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3826 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
3829 QED_MFW_GET_FIELD(mb_params.mcp_param,
3830 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
3832 l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3833 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
3835 cdev->l2_affin_hint =
3836 QED_MFW_GET_FIELD(mb_params.mcp_param,
3837 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
3840 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3841 fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
3846 int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3848 struct qed_mcp_mb_params mb_params = {0};
3849 struct qed_dev *cdev = p_hwfn->cdev;
3852 mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
3853 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3857 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3859 "The get_ppfid_bitmap command is unsupported by the MFW\n");
3863 cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
3864 FW_MB_PARAM_PPFID_BITMAP);
3866 DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
3867 cdev->ppfid_bitmap);
3872 int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3873 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3876 u32 mb_param = 0, resp, param;
3879 QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3880 if (flags & QED_NVM_CFG_OPTION_INIT)
3881 QED_MFW_SET_FIELD(mb_param,
3882 DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3883 if (flags & QED_NVM_CFG_OPTION_FREE)
3884 QED_MFW_SET_FIELD(mb_param,
3885 DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3886 if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3887 QED_MFW_SET_FIELD(mb_param,
3888 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3889 QED_MFW_SET_FIELD(mb_param,
3890 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3894 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3895 DRV_MSG_CODE_GET_NVM_CFG_OPTION,
3896 mb_param, &resp, ¶m, p_len, (u32 *)p_buf);
3901 int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3902 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3905 u32 mb_param = 0, resp, param;
3907 QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3908 if (flags & QED_NVM_CFG_OPTION_ALL)
3909 QED_MFW_SET_FIELD(mb_param,
3910 DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
3911 if (flags & QED_NVM_CFG_OPTION_INIT)
3912 QED_MFW_SET_FIELD(mb_param,
3913 DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3914 if (flags & QED_NVM_CFG_OPTION_COMMIT)
3915 QED_MFW_SET_FIELD(mb_param,
3916 DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
3917 if (flags & QED_NVM_CFG_OPTION_FREE)
3918 QED_MFW_SET_FIELD(mb_param,
3919 DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3920 if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3921 QED_MFW_SET_FIELD(mb_param,
3922 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3923 QED_MFW_SET_FIELD(mb_param,
3924 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3928 return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3929 DRV_MSG_CODE_SET_NVM_CFG_OPTION,
3930 mb_param, &resp, ¶m, len, (u32 *)p_buf);
3933 #define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
3934 #define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
3935 #define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
3936 (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
3939 __qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
3940 struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
3942 struct qed_mcp_mb_params mb_params;
3945 if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
3947 "Debug data size is %d while it should not exceed %d\n",
3948 size, QED_MCP_DBG_DATA_MAX_SIZE);
3952 memset(&mb_params, 0, sizeof(mb_params));
3953 mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
3954 SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
3955 mb_params.p_data_src = p_buf;
3956 mb_params.data_src_size = size;
3957 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3961 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3963 "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
3965 } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
3966 DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
3968 } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
3970 "Failed to send debug data to the MFW [resp 0x%08x]\n",
3971 mb_params.mcp_resp);
3978 enum qed_mcp_dbg_data_type {
3979 QED_MCP_DBG_DATA_TYPE_RAW,
3982 /* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
3983 #define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
3984 #define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
3985 #define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
3986 #define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
3987 #define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
3988 #define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
3989 #define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
3990 #define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
3992 #define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
3993 #define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
3996 qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
3997 struct qed_ptt *p_ptt,
3998 enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
4000 u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
4001 u32 tmp_size = size, *p_header, *p_payload;
4006 p_header = (u32 *)raw_data;
4007 p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
4009 seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
4011 /* First chunk is marked as 'first' */
4012 flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4015 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
4016 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
4017 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4018 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
4020 while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
4021 memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
4022 rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4023 QED_MCP_DBG_DATA_MAX_SIZE);
4027 /* Clear the 'first' marking after sending the first chunk */
4028 if (p_tmp_buf == p_buf) {
4029 flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4030 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
4034 p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4035 tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4038 /* Last chunk is marked as 'last' */
4039 flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
4040 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4041 memcpy(p_payload, p_tmp_buf, tmp_size);
4043 /* Casting the left size to u8 is ok since at this point it is <= 32 */
4044 return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4045 (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
4050 qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
4051 struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
4053 return qed_mcp_send_debug_data(p_hwfn, p_ptt,
4054 QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);