1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
6 #include <linux/types.h>
7 #include <asm/byteorder.h>
8 #include <linux/delay.h>
9 #include <linux/errno.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/string.h>
14 #include <linux/etherdevice.h>
21 #include "qed_reg_addr.h"
22 #include "qed_sriov.h"
24 #define GRCBASE_MCP 0xe00000
26 #define QED_MCP_RESP_ITER_US 10
28 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
29 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
31 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
32 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
35 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
36 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
38 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
39 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
40 offsetof(struct public_drv_mb, _field), _val)
42 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
43 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
44 offsetof(struct public_drv_mb, _field))
46 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
47 DRV_ID_PDA_COMP_VER_SHIFT)
49 #define MCP_BYTES_PER_MBIT_SHIFT 17
51 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
53 if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
58 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
60 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
62 u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
64 p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
66 DP_VERBOSE(p_hwfn, QED_MSG_SP,
67 "port_addr = 0x%x, port_id 0x%02x\n",
68 p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
71 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
73 u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
76 if (!p_hwfn->mcp_info->public_base)
79 for (i = 0; i < length; i++) {
80 tmp = qed_rd(p_hwfn, p_ptt,
81 p_hwfn->mcp_info->mfw_mb_addr +
82 (i << 2) + sizeof(u32));
84 /* The MB data is actually BE; Need to force it to cpu */
85 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
86 be32_to_cpu((__force __be32)tmp);
90 struct qed_mcp_cmd_elem {
91 struct list_head list;
92 struct qed_mcp_mb_params *p_mb_params;
97 /* Must be called while cmd_lock is acquired */
98 static struct qed_mcp_cmd_elem *
99 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
100 struct qed_mcp_mb_params *p_mb_params,
101 u16 expected_seq_num)
103 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
105 p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
109 p_cmd_elem->p_mb_params = p_mb_params;
110 p_cmd_elem->expected_seq_num = expected_seq_num;
111 list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
116 /* Must be called while cmd_lock is acquired */
117 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
118 struct qed_mcp_cmd_elem *p_cmd_elem)
120 list_del(&p_cmd_elem->list);
124 /* Must be called while cmd_lock is acquired */
125 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
128 struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
130 list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
131 if (p_cmd_elem->expected_seq_num == seq_num)
138 int qed_mcp_free(struct qed_hwfn *p_hwfn)
140 if (p_hwfn->mcp_info) {
141 struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
143 kfree(p_hwfn->mcp_info->mfw_mb_cur);
144 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
146 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
147 list_for_each_entry_safe(p_cmd_elem,
149 &p_hwfn->mcp_info->cmd_list, list) {
150 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
152 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
155 kfree(p_hwfn->mcp_info);
156 p_hwfn->mcp_info = NULL;
161 /* Maximum of 1 sec to wait for the SHMEM ready indication */
162 #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
163 #define QED_MCP_SHMEM_RDY_ITER_MS 50
165 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
167 struct qed_mcp_info *p_info = p_hwfn->mcp_info;
168 u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
169 u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
170 u32 drv_mb_offsize, mfw_mb_offsize;
171 u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
173 p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
174 if (!p_info->public_base) {
176 "The address of the MCP scratch-pad is not configured\n");
180 p_info->public_base |= GRCBASE_MCP;
182 /* Get the MFW MB address and number of supported messages */
183 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
184 SECTION_OFFSIZE_ADDR(p_info->public_base,
186 p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
187 p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
188 p_info->mfw_mb_addr +
189 offsetof(struct public_mfw_mb,
192 /* The driver can notify that there was an MCP reset, and might read the
193 * SHMEM values before the MFW has completed initializing them.
194 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
195 * data ready indication.
197 while (!p_info->mfw_mb_length && --cnt) {
199 p_info->mfw_mb_length =
200 (u16)qed_rd(p_hwfn, p_ptt,
201 p_info->mfw_mb_addr +
202 offsetof(struct public_mfw_mb, sup_msgs));
207 "Failed to get the SHMEM ready notification after %d msec\n",
208 QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
212 /* Calculate the driver and MFW mailbox address */
213 drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
214 SECTION_OFFSIZE_ADDR(p_info->public_base,
216 p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
217 DP_VERBOSE(p_hwfn, QED_MSG_SP,
218 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
219 drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
221 /* Get the current driver mailbox sequence before sending
224 p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
225 DRV_MSG_SEQ_NUMBER_MASK;
227 /* Get current FW pulse sequence */
228 p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
231 p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
236 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
238 struct qed_mcp_info *p_info;
241 /* Allocate mcp_info structure */
242 p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
243 if (!p_hwfn->mcp_info)
245 p_info = p_hwfn->mcp_info;
247 /* Initialize the MFW spinlock */
248 spin_lock_init(&p_info->cmd_lock);
249 spin_lock_init(&p_info->link_lock);
251 INIT_LIST_HEAD(&p_info->cmd_list);
253 if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
254 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
255 /* Do not free mcp_info here, since public_base indicate that
256 * the MCP is not initialized
261 size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
262 p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
263 p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
264 if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
270 qed_mcp_free(p_hwfn);
274 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
275 struct qed_ptt *p_ptt)
277 u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
279 /* Use MCP history register to check if MCP reset occurred between init
282 if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
285 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
286 p_hwfn->mcp_info->mcp_hist, generic_por_0);
288 qed_load_mcp_offsets(p_hwfn, p_ptt);
289 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
293 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
295 u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
298 if (p_hwfn->mcp_info->b_block_cmd) {
300 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
304 /* Ensure that only a single thread is accessing the mailbox */
305 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
307 org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
309 /* Set drv command along with the updated sequence */
310 qed_mcp_reread_offsets(p_hwfn, p_ptt);
311 seq = ++p_hwfn->mcp_info->drv_mb_seq;
312 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
315 /* Wait for MFW response */
317 /* Give the FW up to 500 second (50*1000*10usec) */
318 } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
319 MISCS_REG_GENERIC_POR_0)) &&
320 (cnt++ < QED_MCP_RESET_RETRIES));
322 if (org_mcp_reset_seq !=
323 qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
324 DP_VERBOSE(p_hwfn, QED_MSG_SP,
325 "MCP was reset after %d usec\n", cnt * delay);
327 DP_ERR(p_hwfn, "Failed to reset MCP\n");
331 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
336 /* Must be called while cmd_lock is acquired */
337 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
339 struct qed_mcp_cmd_elem *p_cmd_elem;
341 /* There is at most one pending command at a certain time, and if it
342 * exists - it is placed at the HEAD of the list.
344 if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
345 p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
346 struct qed_mcp_cmd_elem, list);
347 return !p_cmd_elem->b_is_completed;
353 /* Must be called while cmd_lock is acquired */
355 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
357 struct qed_mcp_mb_params *p_mb_params;
358 struct qed_mcp_cmd_elem *p_cmd_elem;
362 mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
363 seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
365 /* Return if no new non-handled response has been received */
366 if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
369 p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
372 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
377 p_mb_params = p_cmd_elem->p_mb_params;
379 /* Get the MFW response along with the sequence number */
380 p_mb_params->mcp_resp = mcp_resp;
382 /* Get the MFW param */
383 p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
385 /* Get the union data */
386 if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
387 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
388 offsetof(struct public_drv_mb,
390 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
391 union_data_addr, p_mb_params->data_dst_size);
394 p_cmd_elem->b_is_completed = true;
399 /* Must be called while cmd_lock is acquired */
400 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
401 struct qed_ptt *p_ptt,
402 struct qed_mcp_mb_params *p_mb_params,
405 union drv_union_data union_data;
408 /* Set the union data */
409 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
410 offsetof(struct public_drv_mb, union_data);
411 memset(&union_data, 0, sizeof(union_data));
412 if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
413 memcpy(&union_data, p_mb_params->p_data_src,
414 p_mb_params->data_src_size);
415 qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
418 /* Set the drv param */
419 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
421 /* Set the drv command along with the sequence number */
422 DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
424 DP_VERBOSE(p_hwfn, QED_MSG_SP,
425 "MFW mailbox: command 0x%08x param 0x%08x\n",
426 (p_mb_params->cmd | seq_num), p_mb_params->param);
429 static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
431 p_hwfn->mcp_info->b_block_cmd = block_cmd;
433 DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
434 block_cmd ? "Block" : "Unblock");
437 static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
438 struct qed_ptt *p_ptt)
440 u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
441 u32 delay = QED_MCP_RESP_ITER_US;
443 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
444 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
445 cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
447 cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
449 cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
452 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
453 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
457 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
458 struct qed_ptt *p_ptt,
459 struct qed_mcp_mb_params *p_mb_params,
460 u32 max_retries, u32 usecs)
462 u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
463 struct qed_mcp_cmd_elem *p_cmd_elem;
467 /* Wait until the mailbox is non-occupied */
469 /* Exit the loop if there is no pending command, or if the
470 * pending command is completed during this iteration.
471 * The spinlock stays locked until the command is sent.
474 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
476 if (!qed_mcp_has_pending_cmd(p_hwfn))
479 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
482 else if (rc != -EAGAIN)
485 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
487 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
491 } while (++cnt < max_retries);
493 if (cnt >= max_retries) {
495 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
496 p_mb_params->cmd, p_mb_params->param);
500 /* Send the mailbox command */
501 qed_mcp_reread_offsets(p_hwfn, p_ptt);
502 seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
503 p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
509 __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
510 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
512 /* Wait for the MFW response */
514 /* Exit the loop if the command is already completed, or if the
515 * command is completed during this iteration.
516 * The spinlock stays locked until the list element is removed.
519 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
524 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
526 if (p_cmd_elem->b_is_completed)
529 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
532 else if (rc != -EAGAIN)
535 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
536 } while (++cnt < max_retries);
538 if (cnt >= max_retries) {
540 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
541 p_mb_params->cmd, p_mb_params->param);
542 qed_mcp_print_cpu_info(p_hwfn, p_ptt);
544 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
545 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
546 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
548 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
549 qed_mcp_cmd_set_blocking(p_hwfn, true);
551 qed_hw_err_notify(p_hwfn, p_ptt,
552 QED_HW_ERR_MFW_RESP_FAIL, NULL);
556 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
557 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
561 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
562 p_mb_params->mcp_resp,
563 p_mb_params->mcp_param,
564 (cnt * usecs) / 1000, (cnt * usecs) % 1000);
566 /* Clear the sequence number from the MFW response */
567 p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
572 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
576 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
577 struct qed_ptt *p_ptt,
578 struct qed_mcp_mb_params *p_mb_params)
580 size_t union_data_size = sizeof(union drv_union_data);
581 u32 max_retries = QED_DRV_MB_MAX_RETRIES;
582 u32 usecs = QED_MCP_RESP_ITER_US;
584 /* MCP not initialized */
585 if (!qed_mcp_is_init(p_hwfn)) {
586 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
590 if (p_hwfn->mcp_info->b_block_cmd) {
592 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
593 p_mb_params->cmd, p_mb_params->param);
597 if (p_mb_params->data_src_size > union_data_size ||
598 p_mb_params->data_dst_size > union_data_size) {
600 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
601 p_mb_params->data_src_size,
602 p_mb_params->data_dst_size, union_data_size);
606 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
607 max_retries = DIV_ROUND_UP(max_retries, 1000);
611 return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
615 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
616 struct qed_ptt *p_ptt,
622 struct qed_mcp_mb_params mb_params;
625 memset(&mb_params, 0, sizeof(mb_params));
627 mb_params.param = param;
629 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
633 *o_mcp_resp = mb_params.mcp_resp;
634 *o_mcp_param = mb_params.mcp_param;
640 qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
641 struct qed_ptt *p_ptt,
645 u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
647 struct qed_mcp_mb_params mb_params;
650 memset(&mb_params, 0, sizeof(mb_params));
652 mb_params.param = param;
653 mb_params.p_data_src = i_buf;
654 mb_params.data_src_size = (u8)i_txn_size;
655 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
659 *o_mcp_resp = mb_params.mcp_resp;
660 *o_mcp_param = mb_params.mcp_param;
662 /* nvm_info needs to be updated */
663 p_hwfn->nvm_info.valid = false;
668 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
669 struct qed_ptt *p_ptt,
673 u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
675 struct qed_mcp_mb_params mb_params;
676 u8 raw_data[MCP_DRV_NVM_BUF_LEN];
679 memset(&mb_params, 0, sizeof(mb_params));
681 mb_params.param = param;
682 mb_params.p_data_dst = raw_data;
684 /* Use the maximal value since the actual one is part of the response */
685 mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
687 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
691 *o_mcp_resp = mb_params.mcp_resp;
692 *o_mcp_param = mb_params.mcp_param;
694 *o_txn_size = *o_mcp_param;
695 memcpy(o_buf, raw_data, *o_txn_size);
701 qed_mcp_can_force_load(u8 drv_role,
703 enum qed_override_force_load override_force_load)
705 bool can_force_load = false;
707 switch (override_force_load) {
708 case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
709 can_force_load = true;
711 case QED_OVERRIDE_FORCE_LOAD_NEVER:
712 can_force_load = false;
715 can_force_load = (drv_role == DRV_ROLE_OS &&
716 exist_drv_role == DRV_ROLE_PREBOOT) ||
717 (drv_role == DRV_ROLE_KDUMP &&
718 exist_drv_role == DRV_ROLE_OS);
722 return can_force_load;
725 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
726 struct qed_ptt *p_ptt)
728 u32 resp = 0, param = 0;
731 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
735 "Failed to send cancel load request, rc = %d\n", rc);
740 #define CONFIG_QEDE_BITMAP_IDX BIT(0)
741 #define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
742 #define CONFIG_QEDR_BITMAP_IDX BIT(2)
743 #define CONFIG_QEDF_BITMAP_IDX BIT(4)
744 #define CONFIG_QEDI_BITMAP_IDX BIT(5)
745 #define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
747 static u32 qed_get_config_bitmap(void)
749 u32 config_bitmap = 0x0;
751 if (IS_ENABLED(CONFIG_QEDE))
752 config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
754 if (IS_ENABLED(CONFIG_QED_SRIOV))
755 config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
757 if (IS_ENABLED(CONFIG_QED_RDMA))
758 config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
760 if (IS_ENABLED(CONFIG_QED_FCOE))
761 config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
763 if (IS_ENABLED(CONFIG_QED_ISCSI))
764 config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
766 if (IS_ENABLED(CONFIG_QED_LL2))
767 config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
769 return config_bitmap;
772 struct qed_load_req_in_params {
774 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0
775 #define QED_LOAD_REQ_HSI_VER_1 1
782 bool avoid_eng_reset;
785 struct qed_load_req_out_params {
796 __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
797 struct qed_ptt *p_ptt,
798 struct qed_load_req_in_params *p_in_params,
799 struct qed_load_req_out_params *p_out_params)
801 struct qed_mcp_mb_params mb_params;
802 struct load_req_stc load_req;
803 struct load_rsp_stc load_rsp;
807 memset(&load_req, 0, sizeof(load_req));
808 load_req.drv_ver_0 = p_in_params->drv_ver_0;
809 load_req.drv_ver_1 = p_in_params->drv_ver_1;
810 load_req.fw_ver = p_in_params->fw_ver;
811 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
812 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
813 p_in_params->timeout_val);
814 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
815 p_in_params->force_cmd);
816 QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
817 p_in_params->avoid_eng_reset);
819 hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
820 DRV_ID_MCP_HSI_VER_CURRENT :
821 (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
823 memset(&mb_params, 0, sizeof(mb_params));
824 mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
825 mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
826 mb_params.p_data_src = &load_req;
827 mb_params.data_src_size = sizeof(load_req);
828 mb_params.p_data_dst = &load_rsp;
829 mb_params.data_dst_size = sizeof(load_rsp);
830 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
832 DP_VERBOSE(p_hwfn, QED_MSG_SP,
833 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
835 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
836 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
837 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
838 QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
840 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
841 DP_VERBOSE(p_hwfn, QED_MSG_SP,
842 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
847 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
848 QED_MFW_GET_FIELD(load_req.misc0,
850 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
851 QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
854 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
856 DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
860 DP_VERBOSE(p_hwfn, QED_MSG_SP,
861 "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
862 p_out_params->load_code = mb_params.mcp_resp;
864 if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
865 p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
868 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
873 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
874 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
875 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
877 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
878 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
879 p_out_params->exist_fw_ver = load_rsp.fw_ver;
880 p_out_params->exist_drv_role =
881 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
882 p_out_params->mfw_hsi_ver =
883 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
884 p_out_params->drv_exists =
885 QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
886 LOAD_RSP_FLAGS0_DRV_EXISTS;
892 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
893 enum qed_drv_role drv_role,
897 case QED_DRV_ROLE_OS:
898 *p_mfw_drv_role = DRV_ROLE_OS;
900 case QED_DRV_ROLE_KDUMP:
901 *p_mfw_drv_role = DRV_ROLE_KDUMP;
904 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
911 enum qed_load_req_force {
912 QED_LOAD_REQ_FORCE_NONE,
913 QED_LOAD_REQ_FORCE_PF,
914 QED_LOAD_REQ_FORCE_ALL,
917 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
919 enum qed_load_req_force force_cmd,
923 case QED_LOAD_REQ_FORCE_NONE:
924 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
926 case QED_LOAD_REQ_FORCE_PF:
927 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
929 case QED_LOAD_REQ_FORCE_ALL:
930 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
935 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
936 struct qed_ptt *p_ptt,
937 struct qed_load_req_params *p_params)
939 struct qed_load_req_out_params out_params;
940 struct qed_load_req_in_params in_params;
941 u8 mfw_drv_role, mfw_force_cmd;
944 memset(&in_params, 0, sizeof(in_params));
945 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
946 in_params.drv_ver_0 = QED_VERSION;
947 in_params.drv_ver_1 = qed_get_config_bitmap();
948 in_params.fw_ver = STORM_FW_VERSION;
949 rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
953 in_params.drv_role = mfw_drv_role;
954 in_params.timeout_val = p_params->timeout_val;
955 qed_get_mfw_force_cmd(p_hwfn,
956 QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
958 in_params.force_cmd = mfw_force_cmd;
959 in_params.avoid_eng_reset = p_params->avoid_eng_reset;
961 memset(&out_params, 0, sizeof(out_params));
962 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
966 /* First handle cases where another load request should/might be sent:
967 * - MFW expects the old interface [HSI version = 1]
968 * - MFW responds that a force load request is required
970 if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
972 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
974 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
975 memset(&out_params, 0, sizeof(out_params));
976 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
979 } else if (out_params.load_code ==
980 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
981 if (qed_mcp_can_force_load(in_params.drv_role,
982 out_params.exist_drv_role,
983 p_params->override_force_load)) {
985 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
986 in_params.drv_role, in_params.fw_ver,
987 in_params.drv_ver_0, in_params.drv_ver_1,
988 out_params.exist_drv_role,
989 out_params.exist_fw_ver,
990 out_params.exist_drv_ver_0,
991 out_params.exist_drv_ver_1);
993 qed_get_mfw_force_cmd(p_hwfn,
994 QED_LOAD_REQ_FORCE_ALL,
997 in_params.force_cmd = mfw_force_cmd;
998 memset(&out_params, 0, sizeof(out_params));
999 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1005 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1006 in_params.drv_role, in_params.fw_ver,
1007 in_params.drv_ver_0, in_params.drv_ver_1,
1008 out_params.exist_drv_role,
1009 out_params.exist_fw_ver,
1010 out_params.exist_drv_ver_0,
1011 out_params.exist_drv_ver_1);
1013 "Avoid sending a force load request to prevent disruption of active PFs\n");
1015 qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1020 /* Now handle the other types of responses.
1021 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1022 * expected here after the additional revised load requests were sent.
1024 switch (out_params.load_code) {
1025 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1026 case FW_MSG_CODE_DRV_LOAD_PORT:
1027 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1028 if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1029 out_params.drv_exists) {
1030 /* The role and fw/driver version match, but the PF is
1031 * already loaded and has not been unloaded gracefully.
1034 "PF is already loaded\n");
1040 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1041 out_params.load_code);
1045 p_params->load_code = out_params.load_code;
1050 int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1052 u32 resp = 0, param = 0;
1055 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1059 "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1063 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1064 if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1066 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1071 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1073 struct qed_mcp_mb_params mb_params;
1076 switch (p_hwfn->cdev->wol_config) {
1077 case QED_OV_WOL_DISABLED:
1078 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1080 case QED_OV_WOL_ENABLED:
1081 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1085 "Unknown WoL configuration %02x\n",
1086 p_hwfn->cdev->wol_config);
1088 case QED_OV_WOL_DEFAULT:
1089 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1092 memset(&mb_params, 0, sizeof(mb_params));
1093 mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1094 mb_params.param = wol_param;
1095 mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1097 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1100 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1102 struct qed_mcp_mb_params mb_params;
1103 struct mcp_mac wol_mac;
1105 memset(&mb_params, 0, sizeof(mb_params));
1106 mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1108 /* Set the primary MAC if WoL is enabled */
1109 if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1110 u8 *p_mac = p_hwfn->cdev->wol_mac;
1112 memset(&wol_mac, 0, sizeof(wol_mac));
1113 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1114 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1115 p_mac[4] << 8 | p_mac[5];
1118 (QED_MSG_SP | NETIF_MSG_IFDOWN),
1119 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1120 p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1122 mb_params.p_data_src = &wol_mac;
1123 mb_params.data_src_size = sizeof(wol_mac);
1126 return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1129 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1130 struct qed_ptt *p_ptt)
1132 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1134 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1135 u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1136 QED_PATH_ID(p_hwfn));
1137 u32 disabled_vfs[VF_MAX_STATIC / 32];
1142 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1143 mfw_path_offsize, path_addr);
1145 for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1146 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1148 offsetof(struct public_path,
1151 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1152 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1153 i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1156 if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1157 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1160 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1161 struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1163 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1165 u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1166 u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1168 struct qed_mcp_mb_params mb_params;
1172 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1173 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1174 "Acking VFs [%08x,...,%08x] - %08x\n",
1175 i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1177 memset(&mb_params, 0, sizeof(mb_params));
1178 mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1179 mb_params.p_data_src = vfs_to_ack;
1180 mb_params.data_src_size = VF_MAX_STATIC / 8;
1181 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1183 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1187 /* Clear the ACK bits */
1188 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1189 qed_wr(p_hwfn, p_ptt,
1191 offsetof(struct public_func, drv_ack_vf_disabled) +
1192 i * sizeof(u32), 0);
1197 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1198 struct qed_ptt *p_ptt)
1200 u32 transceiver_state;
1202 transceiver_state = qed_rd(p_hwfn, p_ptt,
1203 p_hwfn->mcp_info->port_addr +
1204 offsetof(struct public_port,
1208 (NETIF_MSG_HW | QED_MSG_SP),
1209 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1211 (u32)(p_hwfn->mcp_info->port_addr +
1212 offsetof(struct public_port, transceiver_data)));
1214 transceiver_state = GET_FIELD(transceiver_state,
1215 ETH_TRANSCEIVER_STATE);
1217 if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1218 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1220 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1223 static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1224 struct qed_ptt *p_ptt,
1225 struct qed_mcp_link_state *p_link)
1227 u32 eee_status, val;
1229 p_link->eee_adv_caps = 0;
1230 p_link->eee_lp_adv_caps = 0;
1231 eee_status = qed_rd(p_hwfn,
1233 p_hwfn->mcp_info->port_addr +
1234 offsetof(struct public_port, eee_status));
1235 p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1236 val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1237 if (val & EEE_1G_ADV)
1238 p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1239 if (val & EEE_10G_ADV)
1240 p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1241 val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1242 if (val & EEE_1G_ADV)
1243 p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1244 if (val & EEE_10G_ADV)
1245 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1248 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1249 struct qed_ptt *p_ptt,
1250 struct public_func *p_data, int pfid)
1252 u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1254 u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1258 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1259 memset(p_data, 0, sizeof(*p_data));
1261 size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1262 for (i = 0; i < size / sizeof(u32); i++)
1263 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1264 func_addr + (i << 2));
1268 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1269 struct public_func *p_shmem_info)
1271 struct qed_mcp_function_info *p_info;
1273 p_info = &p_hwfn->mcp_info->func_info;
1275 p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1276 FUNC_MF_CFG_MIN_BW);
1277 if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1279 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1280 p_info->bandwidth_min);
1281 p_info->bandwidth_min = 1;
1284 p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1285 FUNC_MF_CFG_MAX_BW);
1286 if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1288 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1289 p_info->bandwidth_max);
1290 p_info->bandwidth_max = 100;
1294 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1295 struct qed_ptt *p_ptt, bool b_reset)
1297 struct qed_mcp_link_state *p_link;
1301 /* Prevent SW/attentions from doing this at the same time */
1302 spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1304 p_link = &p_hwfn->mcp_info->link_output;
1305 memset(p_link, 0, sizeof(*p_link));
1307 status = qed_rd(p_hwfn, p_ptt,
1308 p_hwfn->mcp_info->port_addr +
1309 offsetof(struct public_port, link_status));
1310 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1311 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1313 (u32)(p_hwfn->mcp_info->port_addr +
1314 offsetof(struct public_port, link_status)));
1316 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1317 "Resetting link indications\n");
1321 if (p_hwfn->b_drv_link_init) {
1322 /* Link indication with modern MFW arrives as per-PF
1325 if (p_hwfn->mcp_info->capabilities &
1326 FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1327 struct public_func shmem_info;
1329 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1331 p_link->link_up = !!(shmem_info.status &
1332 FUNC_STATUS_VIRTUAL_LINK_UP);
1333 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1334 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1335 "Virtual link_up = %d\n", p_link->link_up);
1337 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1338 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1339 "Physical link_up = %d\n", p_link->link_up);
1342 p_link->link_up = false;
1345 p_link->full_duplex = true;
1346 switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1347 case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1348 p_link->speed = 100000;
1350 case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1351 p_link->speed = 50000;
1353 case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1354 p_link->speed = 40000;
1356 case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1357 p_link->speed = 25000;
1359 case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1360 p_link->speed = 20000;
1362 case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1363 p_link->speed = 10000;
1365 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1366 p_link->full_duplex = false;
1368 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1369 p_link->speed = 1000;
1373 p_link->link_up = 0;
1376 if (p_link->link_up && p_link->speed)
1377 p_link->line_speed = p_link->speed;
1379 p_link->line_speed = 0;
1381 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1382 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1384 /* Max bandwidth configuration */
1385 __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1387 /* Min bandwidth configuration */
1388 __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1389 qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1390 p_link->min_pf_rate);
1392 p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1393 p_link->an_complete = !!(status &
1394 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1395 p_link->parallel_detection = !!(status &
1396 LINK_STATUS_PARALLEL_DETECTION_USED);
1397 p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1399 p_link->partner_adv_speed |=
1400 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1401 QED_LINK_PARTNER_SPEED_1G_FD : 0;
1402 p_link->partner_adv_speed |=
1403 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1404 QED_LINK_PARTNER_SPEED_1G_HD : 0;
1405 p_link->partner_adv_speed |=
1406 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1407 QED_LINK_PARTNER_SPEED_10G : 0;
1408 p_link->partner_adv_speed |=
1409 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1410 QED_LINK_PARTNER_SPEED_20G : 0;
1411 p_link->partner_adv_speed |=
1412 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1413 QED_LINK_PARTNER_SPEED_25G : 0;
1414 p_link->partner_adv_speed |=
1415 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1416 QED_LINK_PARTNER_SPEED_40G : 0;
1417 p_link->partner_adv_speed |=
1418 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1419 QED_LINK_PARTNER_SPEED_50G : 0;
1420 p_link->partner_adv_speed |=
1421 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1422 QED_LINK_PARTNER_SPEED_100G : 0;
1424 p_link->partner_tx_flow_ctrl_en =
1425 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1426 p_link->partner_rx_flow_ctrl_en =
1427 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1429 switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1430 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1431 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1433 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1434 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1436 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1437 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1440 p_link->partner_adv_pause = 0;
1443 p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1445 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1446 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1448 qed_link_update(p_hwfn, p_ptt);
1450 spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1453 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1455 struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1456 struct qed_mcp_mb_params mb_params;
1457 struct eth_phy_cfg phy_cfg;
1461 /* Set the shmem configuration according to params */
1462 memset(&phy_cfg, 0, sizeof(phy_cfg));
1463 cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1464 if (!params->speed.autoneg)
1465 phy_cfg.speed = params->speed.forced_speed;
1466 phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1467 phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1468 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1469 phy_cfg.adv_speed = params->speed.advertised_speeds;
1470 phy_cfg.loopback_mode = params->loopback_mode;
1472 /* There are MFWs that share this capability regardless of whether
1473 * this is feasible or not. And given that at the very least adv_caps
1474 * would be set internally by qed, we want to make sure LFA would
1477 if ((p_hwfn->mcp_info->capabilities &
1478 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1479 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1480 if (params->eee.tx_lpi_enable)
1481 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1482 if (params->eee.adv_caps & QED_EEE_1G_ADV)
1483 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1484 if (params->eee.adv_caps & QED_EEE_10G_ADV)
1485 phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1486 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1487 EEE_TX_TIMER_USEC_OFFSET) &
1488 EEE_TX_TIMER_USEC_MASK;
1491 p_hwfn->b_drv_link_init = b_up;
1494 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1495 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1499 phy_cfg.loopback_mode,
1500 phy_cfg.feature_config_flags);
1502 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1503 "Resetting link\n");
1506 memset(&mb_params, 0, sizeof(mb_params));
1507 mb_params.cmd = cmd;
1508 mb_params.p_data_src = &phy_cfg;
1509 mb_params.data_src_size = sizeof(phy_cfg);
1510 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1512 /* if mcp fails to respond we must abort */
1514 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1518 /* Mimic link-change attention, done for several reasons:
1519 * - On reset, there's no guarantee MFW would trigger
1521 * - On initialization, older MFWs might not indicate link change
1522 * during LFA, so we'll never get an UP indication.
1524 qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1529 u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1530 struct qed_ptt *p_ptt)
1532 u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1534 if (IS_VF(p_hwfn->cdev))
1537 path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1539 path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1540 path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1542 proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1544 offsetof(struct public_path, process_kill)) &
1545 PROCESS_KILL_COUNTER_MASK;
1547 return proc_kill_cnt;
1550 static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1551 struct qed_ptt *p_ptt)
1553 struct qed_dev *cdev = p_hwfn->cdev;
1556 /* Prevent possible attentions/interrupts during the recovery handling
1557 * and till its load phase, during which they will be re-enabled.
1559 qed_int_igu_disable_int(p_hwfn, p_ptt);
1561 DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1563 /* The following operations should be done once, and thus in CMT mode
1564 * are carried out by only the first HW function.
1566 if (p_hwfn != QED_LEADING_HWFN(cdev))
1569 if (cdev->recov_in_prog) {
1571 "Ignoring the indication since a recovery process is already in progress\n");
1575 cdev->recov_in_prog = true;
1577 proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1578 DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1580 qed_schedule_recovery_handler(p_hwfn);
1583 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1584 struct qed_ptt *p_ptt,
1585 enum MFW_DRV_MSG_TYPE type)
1587 enum qed_mcp_protocol_type stats_type;
1588 union qed_mcp_protocol_stats stats;
1589 struct qed_mcp_mb_params mb_params;
1593 case MFW_DRV_MSG_GET_LAN_STATS:
1594 stats_type = QED_MCP_LAN_STATS;
1595 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1597 case MFW_DRV_MSG_GET_FCOE_STATS:
1598 stats_type = QED_MCP_FCOE_STATS;
1599 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1601 case MFW_DRV_MSG_GET_ISCSI_STATS:
1602 stats_type = QED_MCP_ISCSI_STATS;
1603 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1605 case MFW_DRV_MSG_GET_RDMA_STATS:
1606 stats_type = QED_MCP_RDMA_STATS;
1607 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1610 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1614 qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1616 memset(&mb_params, 0, sizeof(mb_params));
1617 mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1618 mb_params.param = hsi_param;
1619 mb_params.p_data_src = &stats;
1620 mb_params.data_src_size = sizeof(stats);
1621 qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1624 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1626 struct qed_mcp_function_info *p_info;
1627 struct public_func shmem_info;
1628 u32 resp = 0, param = 0;
1630 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1632 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1634 p_info = &p_hwfn->mcp_info->func_info;
1636 qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1637 qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1639 /* Acknowledge the MFW */
1640 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1644 static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1646 struct public_func shmem_info;
1647 u32 resp = 0, param = 0;
1649 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1651 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1652 FUNC_MF_CFG_OV_STAG_MASK;
1653 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1654 if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1655 if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1656 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1657 p_hwfn->hw_info.ovlan);
1658 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1660 /* Configure DB to add external vlan to EDPM packets */
1661 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1662 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1663 p_hwfn->hw_info.ovlan);
1665 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1666 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1667 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1668 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1671 qed_sp_pf_update_stag(p_hwfn);
1674 DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1675 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1677 /* Acknowledge the MFW */
1678 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1682 static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
1683 struct qed_ptt *p_ptt)
1685 /* A single notification should be sent to upper driver in CMT mode */
1686 if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1689 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
1690 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1693 struct qed_mdump_cmd_params {
1703 qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
1704 struct qed_ptt *p_ptt,
1705 struct qed_mdump_cmd_params *p_mdump_cmd_params)
1707 struct qed_mcp_mb_params mb_params;
1710 memset(&mb_params, 0, sizeof(mb_params));
1711 mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1712 mb_params.param = p_mdump_cmd_params->cmd;
1713 mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1714 mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1715 mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1716 mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1717 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1721 p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1723 if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1725 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1726 p_mdump_cmd_params->cmd);
1728 } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1730 "The mdump command is not supported by the MFW\n");
1737 static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1739 struct qed_mdump_cmd_params mdump_cmd_params;
1741 memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1742 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1744 return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1748 qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
1749 struct qed_ptt *p_ptt,
1750 struct mdump_retain_data_stc *p_mdump_retain)
1752 struct qed_mdump_cmd_params mdump_cmd_params;
1755 memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1756 mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1757 mdump_cmd_params.p_data_dst = p_mdump_retain;
1758 mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
1760 rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1764 if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1766 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1767 mdump_cmd_params.mcp_resp);
1774 static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
1775 struct qed_ptt *p_ptt)
1777 struct mdump_retain_data_stc mdump_retain;
1780 /* In CMT mode - no need for more than a single acknowledgment to the
1781 * MFW, and no more than a single notification to the upper driver.
1783 if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1786 rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1787 if (rc == 0 && mdump_retain.valid)
1789 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1791 mdump_retain.pf, mdump_retain.status);
1794 "The MFW notified that a critical error occurred in the device\n");
1797 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1798 qed_mcp_mdump_ack(p_hwfn, p_ptt);
1800 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
1803 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1805 struct public_func shmem_info;
1808 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1811 memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1812 port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1813 offsetof(struct public_port, oem_cfg_port));
1814 val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1815 OEM_CFG_CHANNEL_TYPE_OFFSET;
1816 if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1818 "Incorrect UFP Channel type %d port_id 0x%02x\n",
1819 val, MFW_PORT(p_hwfn));
1821 val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1822 if (val == OEM_CFG_SCHED_TYPE_ETS) {
1823 p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1824 } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1825 p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1827 p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1829 "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1830 val, MFW_PORT(p_hwfn));
1833 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1834 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1835 OEM_CFG_FUNC_TC_OFFSET;
1836 p_hwfn->ufp_info.tc = (u8)val;
1837 val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1838 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1839 if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1840 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1841 } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1842 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1844 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1846 "Unknown Host priority control %d port_id 0x%02x\n",
1847 val, MFW_PORT(p_hwfn));
1851 "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1852 p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1853 p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
1857 qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1859 qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1861 if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1862 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1863 qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
1864 p_hwfn->ufp_info.tc);
1866 qed_qm_reconf(p_hwfn, p_ptt);
1867 } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1868 /* Merge UFP TC with the dcbx TC data */
1869 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1870 QED_DCBX_OPERATIONAL_MIB);
1872 DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1876 /* update storm FW with negotiation results */
1877 qed_sp_pf_update_ufp(p_hwfn);
1879 /* update stag pcp value */
1880 qed_sp_pf_update_stag(p_hwfn);
1885 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1886 struct qed_ptt *p_ptt)
1888 struct qed_mcp_info *info = p_hwfn->mcp_info;
1893 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1895 /* Read Messages from MFW */
1896 qed_mcp_read_mb(p_hwfn, p_ptt);
1898 /* Compare current messages to old ones */
1899 for (i = 0; i < info->mfw_mb_length; i++) {
1900 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1905 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1906 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1907 i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1910 case MFW_DRV_MSG_LINK_CHANGE:
1911 qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1913 case MFW_DRV_MSG_VF_DISABLED:
1914 qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1916 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1917 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1918 QED_DCBX_REMOTE_LLDP_MIB);
1920 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1921 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1922 QED_DCBX_REMOTE_MIB);
1924 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1925 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1926 QED_DCBX_OPERATIONAL_MIB);
1928 case MFW_DRV_MSG_OEM_CFG_UPDATE:
1929 qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
1931 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1932 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1934 case MFW_DRV_MSG_ERROR_RECOVERY:
1935 qed_mcp_handle_process_kill(p_hwfn, p_ptt);
1937 case MFW_DRV_MSG_GET_LAN_STATS:
1938 case MFW_DRV_MSG_GET_FCOE_STATS:
1939 case MFW_DRV_MSG_GET_ISCSI_STATS:
1940 case MFW_DRV_MSG_GET_RDMA_STATS:
1941 qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1943 case MFW_DRV_MSG_BW_UPDATE:
1944 qed_mcp_update_bw(p_hwfn, p_ptt);
1946 case MFW_DRV_MSG_S_TAG_UPDATE:
1947 qed_mcp_update_stag(p_hwfn, p_ptt);
1949 case MFW_DRV_MSG_FAILURE_DETECTED:
1950 qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
1952 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1953 qed_mcp_handle_critical_error(p_hwfn, p_ptt);
1955 case MFW_DRV_MSG_GET_TLV_REQ:
1956 qed_mfw_tlv_req(p_hwfn);
1959 DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1964 /* ACK everything */
1965 for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1966 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1968 /* MFW expect answer in BE, so we force write in that format */
1969 qed_wr(p_hwfn, p_ptt,
1970 info->mfw_mb_addr + sizeof(u32) +
1971 MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1972 sizeof(u32) + i * sizeof(u32),
1978 "Received an MFW message indication but no new message!\n");
1982 /* Copy the new mfw messages into the shadow */
1983 memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1988 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1989 struct qed_ptt *p_ptt,
1990 u32 *p_mfw_ver, u32 *p_running_bundle_id)
1994 if (IS_VF(p_hwfn->cdev)) {
1995 if (p_hwfn->vf_iov_info) {
1996 struct pfvf_acquire_resp_tlv *p_resp;
1998 p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1999 *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2004 "VF requested MFW version prior to ACQUIRE\n");
2009 global_offsize = qed_rd(p_hwfn, p_ptt,
2010 SECTION_OFFSIZE_ADDR(p_hwfn->
2011 mcp_info->public_base,
2014 qed_rd(p_hwfn, p_ptt,
2015 SECTION_ADDR(global_offsize,
2016 0) + offsetof(struct public_global, mfw_ver));
2018 if (p_running_bundle_id != NULL) {
2019 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
2020 SECTION_ADDR(global_offsize, 0) +
2021 offsetof(struct public_global,
2022 running_bundle_id));
2028 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
2029 struct qed_ptt *p_ptt, u32 *p_mbi_ver)
2031 u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2033 if (IS_VF(p_hwfn->cdev))
2036 /* Read the address of the nvm_cfg */
2037 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2038 if (!nvm_cfg_addr) {
2039 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
2043 /* Read the offset of nvm_cfg1 */
2044 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2046 mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2047 offsetof(struct nvm_cfg1, glob) +
2048 offsetof(struct nvm_cfg1_glob, mbi_version);
2049 *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
2051 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2052 NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2053 NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2058 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
2059 struct qed_ptt *p_ptt, u32 *p_media_type)
2061 *p_media_type = MEDIA_UNSPECIFIED;
2063 if (IS_VF(p_hwfn->cdev))
2066 if (!qed_mcp_is_init(p_hwfn)) {
2067 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2072 *p_media_type = MEDIA_UNSPECIFIED;
2076 *p_media_type = qed_rd(p_hwfn, p_ptt,
2077 p_hwfn->mcp_info->port_addr +
2078 offsetof(struct public_port,
2084 int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
2085 struct qed_ptt *p_ptt,
2086 u32 *p_transceiver_state,
2087 u32 *p_transceiver_type)
2089 u32 transceiver_info;
2091 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2092 *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2094 if (IS_VF(p_hwfn->cdev))
2097 if (!qed_mcp_is_init(p_hwfn)) {
2098 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2102 transceiver_info = qed_rd(p_hwfn, p_ptt,
2103 p_hwfn->mcp_info->port_addr +
2104 offsetof(struct public_port,
2107 *p_transceiver_state = (transceiver_info &
2108 ETH_TRANSCEIVER_STATE_MASK) >>
2109 ETH_TRANSCEIVER_STATE_OFFSET;
2111 if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2112 *p_transceiver_type = (transceiver_info &
2113 ETH_TRANSCEIVER_TYPE_MASK) >>
2114 ETH_TRANSCEIVER_TYPE_OFFSET;
2116 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2120 static bool qed_is_transceiver_ready(u32 transceiver_state,
2121 u32 transceiver_type)
2123 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2124 ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2125 (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2131 int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2132 struct qed_ptt *p_ptt, u32 *p_speed_mask)
2134 u32 transceiver_type, transceiver_state;
2137 ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2142 if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2146 switch (transceiver_type) {
2147 case ETH_TRANSCEIVER_TYPE_1G_LX:
2148 case ETH_TRANSCEIVER_TYPE_1G_SX:
2149 case ETH_TRANSCEIVER_TYPE_1G_PCC:
2150 case ETH_TRANSCEIVER_TYPE_1G_ACC:
2151 case ETH_TRANSCEIVER_TYPE_1000BASET:
2152 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2154 case ETH_TRANSCEIVER_TYPE_10G_SR:
2155 case ETH_TRANSCEIVER_TYPE_10G_LR:
2156 case ETH_TRANSCEIVER_TYPE_10G_LRM:
2157 case ETH_TRANSCEIVER_TYPE_10G_ER:
2158 case ETH_TRANSCEIVER_TYPE_10G_PCC:
2159 case ETH_TRANSCEIVER_TYPE_10G_ACC:
2160 case ETH_TRANSCEIVER_TYPE_4x10G:
2161 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2163 case ETH_TRANSCEIVER_TYPE_40G_LR4:
2164 case ETH_TRANSCEIVER_TYPE_40G_SR4:
2165 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2166 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2167 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2168 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2170 case ETH_TRANSCEIVER_TYPE_100G_AOC:
2171 case ETH_TRANSCEIVER_TYPE_100G_SR4:
2172 case ETH_TRANSCEIVER_TYPE_100G_LR4:
2173 case ETH_TRANSCEIVER_TYPE_100G_ER4:
2174 case ETH_TRANSCEIVER_TYPE_100G_ACC:
2176 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2177 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2179 case ETH_TRANSCEIVER_TYPE_25G_SR:
2180 case ETH_TRANSCEIVER_TYPE_25G_LR:
2181 case ETH_TRANSCEIVER_TYPE_25G_AOC:
2182 case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2183 case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2184 case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2185 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2187 case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2188 case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2189 case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2190 case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2191 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2192 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2193 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2195 case ETH_TRANSCEIVER_TYPE_40G_CR4:
2196 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2197 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2198 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2199 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2201 case ETH_TRANSCEIVER_TYPE_100G_CR4:
2202 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2204 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2205 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2206 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2207 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2208 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2209 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2210 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2212 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2213 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2214 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2216 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2217 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2218 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2219 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2221 case ETH_TRANSCEIVER_TYPE_XLPPI:
2222 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2224 case ETH_TRANSCEIVER_TYPE_10G_BASET:
2225 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2226 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2229 DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2231 *p_speed_mask = 0xff;
2238 int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2239 struct qed_ptt *p_ptt, u32 *p_board_config)
2241 u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2243 if (IS_VF(p_hwfn->cdev))
2246 if (!qed_mcp_is_init(p_hwfn)) {
2247 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2251 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2255 nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2256 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2257 port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2258 offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2259 *p_board_config = qed_rd(p_hwfn, p_ptt,
2261 offsetof(struct nvm_cfg1_port,
2267 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2269 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
2270 enum qed_pci_personality *p_proto)
2272 /* There wasn't ever a legacy MFW that published iwarp.
2273 * So at this point, this is either plain l2 or RoCE.
2275 if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
2276 *p_proto = QED_PCI_ETH_ROCE;
2278 *p_proto = QED_PCI_ETH;
2280 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2281 "According to Legacy capabilities, L2 personality is %08x\n",
2286 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
2287 struct qed_ptt *p_ptt,
2288 enum qed_pci_personality *p_proto)
2290 u32 resp = 0, param = 0;
2293 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2294 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m);
2297 if (resp != FW_MSG_CODE_OK) {
2298 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2299 "MFW lacks support for command; Returns %08x\n",
2305 case FW_MB_PARAM_GET_PF_RDMA_NONE:
2306 *p_proto = QED_PCI_ETH;
2308 case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2309 *p_proto = QED_PCI_ETH_ROCE;
2311 case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2312 *p_proto = QED_PCI_ETH_IWARP;
2314 case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2315 *p_proto = QED_PCI_ETH_RDMA;
2319 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2326 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2327 (u32) *p_proto, resp, param);
2332 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
2333 struct public_func *p_info,
2334 struct qed_ptt *p_ptt,
2335 enum qed_pci_personality *p_proto)
2339 switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2340 case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2341 if (!IS_ENABLED(CONFIG_QED_RDMA))
2342 *p_proto = QED_PCI_ETH;
2343 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
2344 qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2346 case FUNC_MF_CFG_PROTOCOL_ISCSI:
2347 *p_proto = QED_PCI_ISCSI;
2349 case FUNC_MF_CFG_PROTOCOL_FCOE:
2350 *p_proto = QED_PCI_FCOE;
2352 case FUNC_MF_CFG_PROTOCOL_ROCE:
2353 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
2362 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
2363 struct qed_ptt *p_ptt)
2365 struct qed_mcp_function_info *info;
2366 struct public_func shmem_info;
2368 qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2369 info = &p_hwfn->mcp_info->func_info;
2371 info->pause_on_host = (shmem_info.config &
2372 FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2374 if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2376 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2377 (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2381 qed_read_pf_bandwidth(p_hwfn, &shmem_info);
2383 if (shmem_info.mac_upper || shmem_info.mac_lower) {
2384 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2385 info->mac[1] = (u8)(shmem_info.mac_upper);
2386 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2387 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2388 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2389 info->mac[5] = (u8)(shmem_info.mac_lower);
2391 /* Store primary MAC for later possible WoL */
2392 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
2394 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
2397 info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2398 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2399 info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2400 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2402 info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2404 info->mtu = (u16)shmem_info.mtu_size;
2406 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
2407 p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
2408 if (qed_mcp_is_init(p_hwfn)) {
2409 u32 resp = 0, param = 0;
2412 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2413 DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m);
2416 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2417 p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
2420 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2421 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2422 info->pause_on_host, info->protocol,
2423 info->bandwidth_min, info->bandwidth_max,
2424 info->mac[0], info->mac[1], info->mac[2],
2425 info->mac[3], info->mac[4], info->mac[5],
2426 info->wwn_port, info->wwn_node,
2427 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
2432 struct qed_mcp_link_params
2433 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
2435 if (!p_hwfn || !p_hwfn->mcp_info)
2437 return &p_hwfn->mcp_info->link_input;
2440 struct qed_mcp_link_state
2441 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
2443 if (!p_hwfn || !p_hwfn->mcp_info)
2445 return &p_hwfn->mcp_info->link_output;
2448 struct qed_mcp_link_capabilities
2449 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
2451 if (!p_hwfn || !p_hwfn->mcp_info)
2453 return &p_hwfn->mcp_info->link_capabilities;
2456 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2458 u32 resp = 0, param = 0;
2461 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2462 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m);
2464 /* Wait for the drain to complete before returning */
2470 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
2471 struct qed_ptt *p_ptt, u32 *p_flash_size)
2475 if (IS_VF(p_hwfn->cdev))
2478 flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2479 flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2480 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2481 flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2483 *p_flash_size = flash_size;
2488 int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2490 struct qed_dev *cdev = p_hwfn->cdev;
2492 if (cdev->recov_in_prog) {
2494 "Avoid triggering a recovery since such a process is already in progress\n");
2498 DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2499 qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2504 #define QED_RECOVERY_PROLOG_SLEEP_MS 100
2506 int qed_recovery_prolog(struct qed_dev *cdev)
2508 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2509 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2512 /* Allow ongoing PCIe transactions to complete */
2513 msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2515 /* Clear the PF's internal FID_enable in the PXP */
2516 rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2519 "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2526 qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
2527 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2529 u32 resp = 0, param = 0, rc_param = 0;
2532 /* Only Leader can configure MSIX, and need to take CMT into account */
2533 if (!IS_LEAD_HWFN(p_hwfn))
2535 num *= p_hwfn->cdev->num_hwfns;
2537 param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2538 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2539 param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2540 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2542 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2545 if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2546 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2549 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2550 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2558 qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2559 struct qed_ptt *p_ptt, u8 num)
2561 u32 resp = 0, param = num, rc_param = 0;
2564 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2565 param, &resp, &rc_param);
2567 if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2568 DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2571 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2572 "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2578 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2579 struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2581 if (QED_IS_BB(p_hwfn->cdev))
2582 return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2584 return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2588 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2589 struct qed_ptt *p_ptt,
2590 struct qed_mcp_drv_version *p_ver)
2592 struct qed_mcp_mb_params mb_params;
2593 struct drv_version_stc drv_version;
2598 memset(&drv_version, 0, sizeof(drv_version));
2599 drv_version.version = p_ver->version;
2600 for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2601 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2602 *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2605 memset(&mb_params, 0, sizeof(mb_params));
2606 mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2607 mb_params.p_data_src = &drv_version;
2608 mb_params.data_src_size = sizeof(drv_version);
2609 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2611 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2616 /* A maximal 100 msec waiting time for the MCP to halt */
2617 #define QED_MCP_HALT_SLEEP_MS 10
2618 #define QED_MCP_HALT_MAX_RETRIES 10
2620 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2622 u32 resp = 0, param = 0, cpu_state, cnt = 0;
2625 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2628 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2633 msleep(QED_MCP_HALT_SLEEP_MS);
2634 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2635 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2637 } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2639 if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2641 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2642 qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2646 qed_mcp_cmd_set_blocking(p_hwfn, true);
2651 #define QED_MCP_RESUME_SLEEP_MS 10
2653 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2655 u32 cpu_mode, cpu_state;
2657 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2659 cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2660 cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2661 qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2662 msleep(QED_MCP_RESUME_SLEEP_MS);
2663 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2665 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2667 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2668 cpu_mode, cpu_state);
2672 qed_mcp_cmd_set_blocking(p_hwfn, false);
2677 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2678 struct qed_ptt *p_ptt,
2679 enum qed_ov_client client)
2681 u32 resp = 0, param = 0;
2686 case QED_OV_CLIENT_DRV:
2687 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2689 case QED_OV_CLIENT_USER:
2690 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2692 case QED_OV_CLIENT_VENDOR_SPEC:
2693 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2696 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2700 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2701 drv_mb_param, &resp, ¶m);
2703 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2708 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2709 struct qed_ptt *p_ptt,
2710 enum qed_ov_driver_state drv_state)
2712 u32 resp = 0, param = 0;
2716 switch (drv_state) {
2717 case QED_OV_DRIVER_STATE_NOT_LOADED:
2718 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2720 case QED_OV_DRIVER_STATE_DISABLED:
2721 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2723 case QED_OV_DRIVER_STATE_ACTIVE:
2724 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2727 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2731 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2732 drv_mb_param, &resp, ¶m);
2734 DP_ERR(p_hwfn, "Failed to send driver state\n");
2739 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2740 struct qed_ptt *p_ptt, u16 mtu)
2742 u32 resp = 0, param = 0;
2746 drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2747 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2748 drv_mb_param, &resp, ¶m);
2750 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2755 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2756 struct qed_ptt *p_ptt, u8 *mac)
2758 struct qed_mcp_mb_params mb_params;
2762 memset(&mb_params, 0, sizeof(mb_params));
2763 mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2764 mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2765 DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2766 mb_params.param |= MCP_PF_ID(p_hwfn);
2768 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2769 * in 32-bit granularity.
2770 * So the MAC has to be set in native order [and not byte order],
2771 * otherwise it would be read incorrectly by MFW after swap.
2773 mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2774 mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2776 mb_params.p_data_src = (u8 *)mfw_mac;
2777 mb_params.data_src_size = 8;
2778 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2780 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2782 /* Store primary MAC for later possible WoL */
2783 memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2788 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2789 struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2791 u32 resp = 0, param = 0;
2795 if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2796 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2797 "Can't change WoL configuration when WoL isn't supported\n");
2802 case QED_OV_WOL_DEFAULT:
2803 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2805 case QED_OV_WOL_DISABLED:
2806 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2808 case QED_OV_WOL_ENABLED:
2809 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2812 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2816 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2817 drv_mb_param, &resp, ¶m);
2819 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2821 /* Store the WoL update for a future unload */
2822 p_hwfn->cdev->wol_config = (u8)wol;
2827 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2828 struct qed_ptt *p_ptt,
2829 enum qed_ov_eswitch eswitch)
2831 u32 resp = 0, param = 0;
2836 case QED_OV_ESWITCH_NONE:
2837 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2839 case QED_OV_ESWITCH_VEB:
2840 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2842 case QED_OV_ESWITCH_VEPA:
2843 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2846 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2850 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2851 drv_mb_param, &resp, ¶m);
2853 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2858 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2859 struct qed_ptt *p_ptt, enum qed_led_mode mode)
2861 u32 resp = 0, param = 0, drv_mb_param;
2865 case QED_LED_MODE_ON:
2866 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2868 case QED_LED_MODE_OFF:
2869 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2871 case QED_LED_MODE_RESTORE:
2872 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2875 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2879 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2880 drv_mb_param, &resp, ¶m);
2885 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2886 struct qed_ptt *p_ptt, u32 mask_parities)
2888 u32 resp = 0, param = 0;
2891 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2892 mask_parities, &resp, ¶m);
2896 "MCP response failure for mask parities, aborting\n");
2897 } else if (resp != FW_MSG_CODE_OK) {
2899 "MCP did not acknowledge mask parity request. Old MFW?\n");
2906 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2908 u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2909 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2910 u32 resp = 0, resp_param = 0;
2911 struct qed_ptt *p_ptt;
2914 p_ptt = qed_ptt_acquire(p_hwfn);
2918 while (bytes_left > 0) {
2919 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2921 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2922 DRV_MSG_CODE_NVM_READ_NVRAM,
2925 DRV_MB_PARAM_NVM_LEN_OFFSET),
2928 (u32 *)(p_buf + offset));
2930 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2931 DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2935 /* This can be a lengthy process, and it's possible scheduler
2936 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2938 if (bytes_left % 0x1000 <
2939 (bytes_left - read_len) % 0x1000)
2940 usleep_range(1000, 2000);
2943 bytes_left -= read_len;
2946 cdev->mcp_nvm_resp = resp;
2947 qed_ptt_release(p_hwfn, p_ptt);
2952 int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
2954 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2955 struct qed_ptt *p_ptt;
2957 p_ptt = qed_ptt_acquire(p_hwfn);
2961 memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
2962 qed_ptt_release(p_hwfn, p_ptt);
2967 int qed_mcp_nvm_write(struct qed_dev *cdev,
2968 u32 cmd, u32 addr, u8 *p_buf, u32 len)
2970 u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
2971 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2972 struct qed_ptt *p_ptt;
2975 p_ptt = qed_ptt_acquire(p_hwfn);
2980 case QED_PUT_FILE_BEGIN:
2981 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2983 case QED_PUT_FILE_DATA:
2984 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2986 case QED_NVM_WRITE_NVRAM:
2987 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2990 DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
2995 buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
2996 while (buf_idx < len) {
2997 if (cmd == QED_PUT_FILE_BEGIN)
3000 nvm_offset = ((buf_size <<
3001 DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
3003 rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3004 &resp, ¶m, buf_size,
3005 (u32 *)&p_buf[buf_idx]);
3007 DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
3008 resp = FW_MSG_CODE_ERROR;
3012 if (resp != FW_MSG_CODE_OK &&
3013 resp != FW_MSG_CODE_NVM_OK &&
3014 resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3016 "nvm write failed, resp = 0x%08x\n", resp);
3021 /* This can be a lengthy process, and it's possible scheduler
3022 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
3024 if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
3025 usleep_range(1000, 2000);
3027 /* For MBI upgrade, MFW response includes the next buffer offset
3028 * to be delivered to MFW.
3030 if (param && cmd == QED_PUT_FILE_DATA) {
3031 buf_idx = QED_MFW_GET_FIELD(param,
3032 FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
3033 buf_size = QED_MFW_GET_FIELD(param,
3034 FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
3036 buf_idx += buf_size;
3037 buf_size = min_t(u32, (len - buf_idx),
3038 MCP_DRV_NVM_BUF_LEN);
3042 cdev->mcp_nvm_resp = resp;
3044 qed_ptt_release(p_hwfn, p_ptt);
3049 int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3050 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
3052 u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
3056 nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
3057 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
3058 nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
3059 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
3064 while (bytes_left > 0) {
3065 bytes_to_copy = min_t(u32, bytes_left,
3066 MAX_I2C_TRANSACTION_SIZE);
3067 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3068 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3069 nvm_offset |= ((addr + offset) <<
3070 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
3071 DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
3072 nvm_offset |= (bytes_to_copy <<
3073 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
3074 DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
3075 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3076 DRV_MSG_CODE_TRANSCEIVER_READ,
3077 nvm_offset, &resp, ¶m, &buf_size,
3078 (u32 *)(p_buf + offset));
3081 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3086 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3088 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3092 bytes_left -= buf_size;
3098 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3100 u32 drv_mb_param = 0, rsp, param;
3103 drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3104 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3106 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3107 drv_mb_param, &rsp, ¶m);
3112 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3113 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3119 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3121 u32 drv_mb_param, rsp, param;
3124 drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3125 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3127 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3128 drv_mb_param, &rsp, ¶m);
3133 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3134 (param != DRV_MB_PARAM_BIST_RC_PASSED))
3140 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
3141 struct qed_ptt *p_ptt,
3144 u32 drv_mb_param = 0, rsp;
3147 drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3148 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3150 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3151 drv_mb_param, &rsp, num_images);
3155 if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3161 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
3162 struct qed_ptt *p_ptt,
3163 struct bist_nvm_image_att *p_image_att,
3166 u32 buf_size = 0, param, resp = 0, resp_param = 0;
3169 param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3170 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
3171 param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
3173 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3174 DRV_MSG_CODE_BIST_TEST, param,
3177 (u32 *)p_image_att);
3181 if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3182 (p_image_att->return_code != 1))
3188 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
3190 struct qed_nvm_image_info nvm_info;
3191 struct qed_ptt *p_ptt;
3195 if (p_hwfn->nvm_info.valid)
3198 p_ptt = qed_ptt_acquire(p_hwfn);
3200 DP_ERR(p_hwfn, "failed to acquire ptt\n");
3204 /* Acquire from MFW the amount of available images */
3205 nvm_info.num_images = 0;
3206 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
3207 p_ptt, &nvm_info.num_images);
3208 if (rc == -EOPNOTSUPP) {
3209 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3211 } else if (rc || !nvm_info.num_images) {
3212 DP_ERR(p_hwfn, "Failed getting number of images\n");
3216 nvm_info.image_att = kmalloc_array(nvm_info.num_images,
3217 sizeof(struct bist_nvm_image_att),
3219 if (!nvm_info.image_att) {
3224 /* Iterate over images and get their attributes */
3225 for (i = 0; i < nvm_info.num_images; i++) {
3226 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3227 &nvm_info.image_att[i], i);
3230 "Failed getting image index %d attributes\n", i);
3234 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
3235 nvm_info.image_att[i].len);
3238 /* Update hwfn's nvm_info */
3239 if (nvm_info.num_images) {
3240 p_hwfn->nvm_info.num_images = nvm_info.num_images;
3241 kfree(p_hwfn->nvm_info.image_att);
3242 p_hwfn->nvm_info.image_att = nvm_info.image_att;
3243 p_hwfn->nvm_info.valid = true;
3246 qed_ptt_release(p_hwfn, p_ptt);
3250 kfree(nvm_info.image_att);
3252 qed_ptt_release(p_hwfn, p_ptt);
3257 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3258 enum qed_nvm_images image_id,
3259 struct qed_nvm_image_att *p_image_att)
3261 enum nvm_image_type type;
3264 /* Translate image_id into MFW definitions */
3266 case QED_NVM_IMAGE_ISCSI_CFG:
3267 type = NVM_TYPE_ISCSI_CFG;
3269 case QED_NVM_IMAGE_FCOE_CFG:
3270 type = NVM_TYPE_FCOE_CFG;
3272 case QED_NVM_IMAGE_MDUMP:
3273 type = NVM_TYPE_MDUMP;
3275 case QED_NVM_IMAGE_NVM_CFG1:
3276 type = NVM_TYPE_NVM_CFG1;
3278 case QED_NVM_IMAGE_DEFAULT_CFG:
3279 type = NVM_TYPE_DEFAULT_CFG;
3281 case QED_NVM_IMAGE_NVM_META:
3282 type = NVM_TYPE_META;
3285 DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
3290 qed_mcp_nvm_info_populate(p_hwfn);
3291 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3292 if (type == p_hwfn->nvm_info.image_att[i].image_type)
3294 if (i == p_hwfn->nvm_info.num_images) {
3295 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3296 "Failed to find nvram image of type %08x\n",
3301 p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3302 p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3307 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
3308 enum qed_nvm_images image_id,
3309 u8 *p_buffer, u32 buffer_len)
3311 struct qed_nvm_image_att image_att;
3314 memset(p_buffer, 0, buffer_len);
3316 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3320 /* Validate sizes - both the image's and the supplied buffer's */
3321 if (image_att.length <= 4) {
3322 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3323 "Image [%d] is too small - only %d bytes\n",
3324 image_id, image_att.length);
3328 if (image_att.length > buffer_len) {
3331 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3332 image_id, image_att.length, buffer_len);
3336 return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
3337 p_buffer, image_att.length);
3340 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
3342 enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3346 mfw_res_id = RESOURCE_NUM_SB_E;
3349 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3352 mfw_res_id = RESOURCE_NUM_VPORT_E;
3355 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3358 mfw_res_id = RESOURCE_NUM_PQ_E;
3361 mfw_res_id = RESOURCE_NUM_RL_E;
3365 /* Each VFC resource can accommodate both a MAC and a VLAN */
3366 mfw_res_id = RESOURCE_VFC_FILTER_E;
3369 mfw_res_id = RESOURCE_ILT_E;
3371 case QED_LL2_RAM_QUEUE:
3372 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3374 case QED_LL2_CTX_QUEUE:
3375 mfw_res_id = RESOURCE_LL2_CQS_E;
3377 case QED_RDMA_CNQ_RAM:
3379 /* CNQ/CMDQS are the same resource */
3380 mfw_res_id = RESOURCE_CQS_E;
3382 case QED_RDMA_STATS_QUEUE:
3383 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3386 mfw_res_id = RESOURCE_BDQ_E;
3395 #define QED_RESC_ALLOC_VERSION_MAJOR 2
3396 #define QED_RESC_ALLOC_VERSION_MINOR 0
3397 #define QED_RESC_ALLOC_VERSION \
3398 ((QED_RESC_ALLOC_VERSION_MAJOR << \
3399 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3400 (QED_RESC_ALLOC_VERSION_MINOR << \
3401 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3403 struct qed_resc_alloc_in_params {
3405 enum qed_resources res_id;
3409 struct qed_resc_alloc_out_params {
3420 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
3421 struct qed_ptt *p_ptt,
3422 struct qed_resc_alloc_in_params *p_in_params,
3423 struct qed_resc_alloc_out_params *p_out_params)
3425 struct qed_mcp_mb_params mb_params;
3426 struct resource_info mfw_resc_info;
3429 memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
3431 mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
3432 if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3434 "Failed to match resource %d [%s] with the MFW resources\n",
3435 p_in_params->res_id,
3436 qed_hw_get_resc_name(p_in_params->res_id));
3440 switch (p_in_params->cmd) {
3441 case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3442 mfw_resc_info.size = p_in_params->resc_max_val;
3444 case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3447 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3452 memset(&mb_params, 0, sizeof(mb_params));
3453 mb_params.cmd = p_in_params->cmd;
3454 mb_params.param = QED_RESC_ALLOC_VERSION;
3455 mb_params.p_data_src = &mfw_resc_info;
3456 mb_params.data_src_size = sizeof(mfw_resc_info);
3457 mb_params.p_data_dst = mb_params.p_data_src;
3458 mb_params.data_dst_size = mb_params.data_src_size;
3462 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3464 p_in_params->res_id,
3465 qed_hw_get_resc_name(p_in_params->res_id),
3466 QED_MFW_GET_FIELD(mb_params.param,
3467 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3468 QED_MFW_GET_FIELD(mb_params.param,
3469 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3470 p_in_params->resc_max_val);
3472 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3476 p_out_params->mcp_resp = mb_params.mcp_resp;
3477 p_out_params->mcp_param = mb_params.mcp_param;
3478 p_out_params->resc_num = mfw_resc_info.size;
3479 p_out_params->resc_start = mfw_resc_info.offset;
3480 p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3481 p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3482 p_out_params->flags = mfw_resc_info.flags;
3486 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3487 QED_MFW_GET_FIELD(p_out_params->mcp_param,
3488 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3489 QED_MFW_GET_FIELD(p_out_params->mcp_param,
3490 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3491 p_out_params->resc_num,
3492 p_out_params->resc_start,
3493 p_out_params->vf_resc_num,
3494 p_out_params->vf_resc_start, p_out_params->flags);
3500 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
3501 struct qed_ptt *p_ptt,
3502 enum qed_resources res_id,
3503 u32 resc_max_val, u32 *p_mcp_resp)
3505 struct qed_resc_alloc_out_params out_params;
3506 struct qed_resc_alloc_in_params in_params;
3509 memset(&in_params, 0, sizeof(in_params));
3510 in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3511 in_params.res_id = res_id;
3512 in_params.resc_max_val = resc_max_val;
3513 memset(&out_params, 0, sizeof(out_params));
3514 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3519 *p_mcp_resp = out_params.mcp_resp;
3525 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
3526 struct qed_ptt *p_ptt,
3527 enum qed_resources res_id,
3528 u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
3530 struct qed_resc_alloc_out_params out_params;
3531 struct qed_resc_alloc_in_params in_params;
3534 memset(&in_params, 0, sizeof(in_params));
3535 in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3536 in_params.res_id = res_id;
3537 memset(&out_params, 0, sizeof(out_params));
3538 rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3543 *p_mcp_resp = out_params.mcp_resp;
3545 if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3546 *p_resc_num = out_params.resc_num;
3547 *p_resc_start = out_params.resc_start;
3553 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3555 u32 mcp_resp, mcp_param;
3557 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3558 &mcp_resp, &mcp_param);
3561 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
3562 struct qed_ptt *p_ptt,
3563 u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
3567 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3568 p_mcp_resp, p_mcp_param);
3572 if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3574 "The resource command is unsupported by the MFW\n");
3578 if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3579 u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3582 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3591 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3592 struct qed_ptt *p_ptt,
3593 struct qed_resc_lock_params *p_params)
3595 u32 param = 0, mcp_resp, mcp_param;
3599 switch (p_params->timeout) {
3600 case QED_MCP_RESC_LOCK_TO_DEFAULT:
3601 opcode = RESOURCE_OPCODE_REQ;
3602 p_params->timeout = 0;
3604 case QED_MCP_RESC_LOCK_TO_NONE:
3605 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3606 p_params->timeout = 0;
3609 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3613 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3614 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3615 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3619 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3620 param, p_params->timeout, opcode, p_params->resource);
3622 /* Attempt to acquire the resource */
3623 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3627 /* Analyze the response */
3628 p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3629 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3633 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3634 mcp_param, opcode, p_params->owner);
3637 case RESOURCE_OPCODE_GNT:
3638 p_params->b_granted = true;
3640 case RESOURCE_OPCODE_BUSY:
3641 p_params->b_granted = false;
3645 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3654 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3655 struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3661 /* No need for an interval before the first iteration */
3663 if (p_params->sleep_b4_retry) {
3664 u16 retry_interval_in_ms =
3665 DIV_ROUND_UP(p_params->retry_interval,
3668 msleep(retry_interval_in_ms);
3670 udelay(p_params->retry_interval);
3674 rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3678 if (p_params->b_granted)
3680 } while (retry_cnt++ < p_params->retry_num);
3686 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3687 struct qed_ptt *p_ptt,
3688 struct qed_resc_unlock_params *p_params)
3690 u32 param = 0, mcp_resp, mcp_param;
3694 opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3695 : RESOURCE_OPCODE_RELEASE;
3696 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3697 QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3699 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3700 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3701 param, opcode, p_params->resource);
3703 /* Attempt to release the resource */
3704 rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3708 /* Analyze the response */
3709 opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3711 DP_VERBOSE(p_hwfn, QED_MSG_SP,
3712 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3716 case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3718 "Resource unlock request for an already released resource [%d]\n",
3719 p_params->resource);
3721 case RESOURCE_OPCODE_RELEASED:
3722 p_params->b_released = true;
3724 case RESOURCE_OPCODE_WRONG_OWNER:
3725 p_params->b_released = false;
3729 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3737 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3738 struct qed_resc_unlock_params *p_unlock,
3740 resource, bool b_is_permanent)
3743 memset(p_lock, 0, sizeof(*p_lock));
3745 /* Permanent resources don't require aging, and there's no
3746 * point in trying to acquire them more than once since it's
3747 * unexpected another entity would release them.
3749 if (b_is_permanent) {
3750 p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3752 p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3753 p_lock->retry_interval =
3754 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3755 p_lock->sleep_b4_retry = true;
3758 p_lock->resource = resource;
3762 memset(p_unlock, 0, sizeof(*p_unlock));
3763 p_unlock->resource = resource;
3767 bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
3769 return !!(p_hwfn->mcp_info->capabilities &
3770 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3773 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3778 rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3779 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3781 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3782 "MFW supported features: %08x\n",
3783 p_hwfn->mcp_info->capabilities);
3788 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3790 u32 mcp_resp, mcp_param, features;
3792 features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3793 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
3795 return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3796 features, &mcp_resp, &mcp_param);
3799 int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3801 struct qed_mcp_mb_params mb_params = {0};
3802 struct qed_dev *cdev = p_hwfn->cdev;
3803 u8 fir_valid, l2_valid;
3806 mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
3807 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3811 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3813 "The get_engine_config command is unsupported by the MFW\n");
3817 fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3818 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
3821 QED_MFW_GET_FIELD(mb_params.mcp_param,
3822 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
3824 l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3825 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
3827 cdev->l2_affin_hint =
3828 QED_MFW_GET_FIELD(mb_params.mcp_param,
3829 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
3832 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3833 fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
3838 int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3840 struct qed_mcp_mb_params mb_params = {0};
3841 struct qed_dev *cdev = p_hwfn->cdev;
3844 mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
3845 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3849 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3851 "The get_ppfid_bitmap command is unsupported by the MFW\n");
3855 cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
3856 FW_MB_PARAM_PPFID_BITMAP);
3858 DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
3859 cdev->ppfid_bitmap);
3864 int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3865 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3868 u32 mb_param = 0, resp, param;
3871 QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3872 if (flags & QED_NVM_CFG_OPTION_INIT)
3873 QED_MFW_SET_FIELD(mb_param,
3874 DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3875 if (flags & QED_NVM_CFG_OPTION_FREE)
3876 QED_MFW_SET_FIELD(mb_param,
3877 DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3878 if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3879 QED_MFW_SET_FIELD(mb_param,
3880 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3881 QED_MFW_SET_FIELD(mb_param,
3882 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3886 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3887 DRV_MSG_CODE_GET_NVM_CFG_OPTION,
3888 mb_param, &resp, ¶m, p_len, (u32 *)p_buf);
3893 int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3894 u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3897 u32 mb_param = 0, resp, param;
3899 QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3900 if (flags & QED_NVM_CFG_OPTION_ALL)
3901 QED_MFW_SET_FIELD(mb_param,
3902 DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
3903 if (flags & QED_NVM_CFG_OPTION_INIT)
3904 QED_MFW_SET_FIELD(mb_param,
3905 DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3906 if (flags & QED_NVM_CFG_OPTION_COMMIT)
3907 QED_MFW_SET_FIELD(mb_param,
3908 DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
3909 if (flags & QED_NVM_CFG_OPTION_FREE)
3910 QED_MFW_SET_FIELD(mb_param,
3911 DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3912 if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3913 QED_MFW_SET_FIELD(mb_param,
3914 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3915 QED_MFW_SET_FIELD(mb_param,
3916 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3920 return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3921 DRV_MSG_CODE_SET_NVM_CFG_OPTION,
3922 mb_param, &resp, ¶m, len, (u32 *)p_buf);
3925 #define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
3926 #define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
3927 #define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
3928 (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
3931 __qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
3932 struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
3934 struct qed_mcp_mb_params mb_params;
3937 if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
3939 "Debug data size is %d while it should not exceed %d\n",
3940 size, QED_MCP_DBG_DATA_MAX_SIZE);
3944 memset(&mb_params, 0, sizeof(mb_params));
3945 mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
3946 SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
3947 mb_params.p_data_src = p_buf;
3948 mb_params.data_src_size = size;
3949 rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3953 if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3955 "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
3957 } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
3958 DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
3960 } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
3962 "Failed to send debug data to the MFW [resp 0x%08x]\n",
3963 mb_params.mcp_resp);
3970 enum qed_mcp_dbg_data_type {
3971 QED_MCP_DBG_DATA_TYPE_RAW,
3974 /* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
3975 #define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
3976 #define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
3977 #define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
3978 #define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
3979 #define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
3980 #define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
3981 #define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
3982 #define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
3984 #define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
3985 #define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
3988 qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
3989 struct qed_ptt *p_ptt,
3990 enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
3992 u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
3993 u32 tmp_size = size, *p_header, *p_payload;
3998 p_header = (u32 *)raw_data;
3999 p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
4001 seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
4003 /* First chunk is marked as 'first' */
4004 flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4007 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
4008 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
4009 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4010 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
4012 while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
4013 memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
4014 rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4015 QED_MCP_DBG_DATA_MAX_SIZE);
4019 /* Clear the 'first' marking after sending the first chunk */
4020 if (p_tmp_buf == p_buf) {
4021 flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4022 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
4026 p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4027 tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4030 /* Last chunk is marked as 'last' */
4031 flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
4032 SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4033 memcpy(p_payload, p_tmp_buf, tmp_size);
4035 /* Casting the left size to u8 is ok since at this point it is <= 32 */
4036 return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4037 (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
4042 qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
4043 struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
4045 return qed_mcp_send_debug_data(p_hwfn, p_ptt,
4046 QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);