qed: add support for multi-rate transceivers
[linux-2.6-microblaze.git] / drivers / net / ethernet / qlogic / qed / qed_mcp.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/string.h>
15 #include <linux/etherdevice.h>
16 #include "qed.h"
17 #include "qed_cxt.h"
18 #include "qed_dcbx.h"
19 #include "qed_hsi.h"
20 #include "qed_hw.h"
21 #include "qed_mcp.h"
22 #include "qed_reg_addr.h"
23 #include "qed_sriov.h"
24
25 #define GRCBASE_MCP     0xe00000
26
27 #define QED_MCP_RESP_ITER_US    10
28
29 #define QED_DRV_MB_MAX_RETRIES  (500 * 1000)    /* Account for 5 sec */
30 #define QED_MCP_RESET_RETRIES   (50 * 1000)     /* Account for 500 msec */
31
32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)           \
33         qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
34                _val)
35
36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
37         qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
38
39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
40         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
41                      offsetof(struct public_drv_mb, _field), _val)
42
43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field)         \
44         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
45                      offsetof(struct public_drv_mb, _field))
46
47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
48                   DRV_ID_PDA_COMP_VER_SHIFT)
49
50 #define MCP_BYTES_PER_MBIT_SHIFT 17
51
52 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
53 {
54         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
55                 return false;
56         return true;
57 }
58
59 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
60 {
61         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
62                                         PUBLIC_PORT);
63         u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
64
65         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
66                                                    MFW_PORT(p_hwfn));
67         DP_VERBOSE(p_hwfn, QED_MSG_SP,
68                    "port_addr = 0x%x, port_id 0x%02x\n",
69                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
70 }
71
72 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
73 {
74         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
75         u32 tmp, i;
76
77         if (!p_hwfn->mcp_info->public_base)
78                 return;
79
80         for (i = 0; i < length; i++) {
81                 tmp = qed_rd(p_hwfn, p_ptt,
82                              p_hwfn->mcp_info->mfw_mb_addr +
83                              (i << 2) + sizeof(u32));
84
85                 /* The MB data is actually BE; Need to force it to cpu */
86                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
87                         be32_to_cpu((__force __be32)tmp);
88         }
89 }
90
91 struct qed_mcp_cmd_elem {
92         struct list_head list;
93         struct qed_mcp_mb_params *p_mb_params;
94         u16 expected_seq_num;
95         bool b_is_completed;
96 };
97
98 /* Must be called while cmd_lock is acquired */
99 static struct qed_mcp_cmd_elem *
100 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
101                      struct qed_mcp_mb_params *p_mb_params,
102                      u16 expected_seq_num)
103 {
104         struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
105
106         p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
107         if (!p_cmd_elem)
108                 goto out;
109
110         p_cmd_elem->p_mb_params = p_mb_params;
111         p_cmd_elem->expected_seq_num = expected_seq_num;
112         list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
113 out:
114         return p_cmd_elem;
115 }
116
117 /* Must be called while cmd_lock is acquired */
118 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
119                                  struct qed_mcp_cmd_elem *p_cmd_elem)
120 {
121         list_del(&p_cmd_elem->list);
122         kfree(p_cmd_elem);
123 }
124
125 /* Must be called while cmd_lock is acquired */
126 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
127                                                      u16 seq_num)
128 {
129         struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
130
131         list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
132                 if (p_cmd_elem->expected_seq_num == seq_num)
133                         return p_cmd_elem;
134         }
135
136         return NULL;
137 }
138
139 int qed_mcp_free(struct qed_hwfn *p_hwfn)
140 {
141         if (p_hwfn->mcp_info) {
142                 struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
143
144                 kfree(p_hwfn->mcp_info->mfw_mb_cur);
145                 kfree(p_hwfn->mcp_info->mfw_mb_shadow);
146
147                 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
148                 list_for_each_entry_safe(p_cmd_elem,
149                                          p_tmp,
150                                          &p_hwfn->mcp_info->cmd_list, list) {
151                         qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
152                 }
153                 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
154         }
155
156         kfree(p_hwfn->mcp_info);
157         p_hwfn->mcp_info = NULL;
158
159         return 0;
160 }
161
162 /* Maximum of 1 sec to wait for the SHMEM ready indication */
163 #define QED_MCP_SHMEM_RDY_MAX_RETRIES   20
164 #define QED_MCP_SHMEM_RDY_ITER_MS       50
165
166 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
167 {
168         struct qed_mcp_info *p_info = p_hwfn->mcp_info;
169         u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
170         u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
171         u32 drv_mb_offsize, mfw_mb_offsize;
172         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
173
174         p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
175         if (!p_info->public_base) {
176                 DP_NOTICE(p_hwfn,
177                           "The address of the MCP scratch-pad is not configured\n");
178                 return -EINVAL;
179         }
180
181         p_info->public_base |= GRCBASE_MCP;
182
183         /* Get the MFW MB address and number of supported messages */
184         mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
185                                 SECTION_OFFSIZE_ADDR(p_info->public_base,
186                                                      PUBLIC_MFW_MB));
187         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
188         p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
189                                             p_info->mfw_mb_addr +
190                                             offsetof(struct public_mfw_mb,
191                                                      sup_msgs));
192
193         /* The driver can notify that there was an MCP reset, and might read the
194          * SHMEM values before the MFW has completed initializing them.
195          * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
196          * data ready indication.
197          */
198         while (!p_info->mfw_mb_length && --cnt) {
199                 msleep(msec);
200                 p_info->mfw_mb_length =
201                         (u16)qed_rd(p_hwfn, p_ptt,
202                                     p_info->mfw_mb_addr +
203                                     offsetof(struct public_mfw_mb, sup_msgs));
204         }
205
206         if (!cnt) {
207                 DP_NOTICE(p_hwfn,
208                           "Failed to get the SHMEM ready notification after %d msec\n",
209                           QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
210                 return -EBUSY;
211         }
212
213         /* Calculate the driver and MFW mailbox address */
214         drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
215                                 SECTION_OFFSIZE_ADDR(p_info->public_base,
216                                                      PUBLIC_DRV_MB));
217         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
218         DP_VERBOSE(p_hwfn, QED_MSG_SP,
219                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
220                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
221
222         /* Get the current driver mailbox sequence before sending
223          * the first command
224          */
225         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
226                              DRV_MSG_SEQ_NUMBER_MASK;
227
228         /* Get current FW pulse sequence */
229         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
230                                 DRV_PULSE_SEQ_MASK;
231
232         p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
233
234         return 0;
235 }
236
237 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
238 {
239         struct qed_mcp_info *p_info;
240         u32 size;
241
242         /* Allocate mcp_info structure */
243         p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
244         if (!p_hwfn->mcp_info)
245                 goto err;
246         p_info = p_hwfn->mcp_info;
247
248         /* Initialize the MFW spinlock */
249         spin_lock_init(&p_info->cmd_lock);
250         spin_lock_init(&p_info->link_lock);
251
252         INIT_LIST_HEAD(&p_info->cmd_list);
253
254         if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
255                 DP_NOTICE(p_hwfn, "MCP is not initialized\n");
256                 /* Do not free mcp_info here, since public_base indicate that
257                  * the MCP is not initialized
258                  */
259                 return 0;
260         }
261
262         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
263         p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
264         p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
265         if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
266                 goto err;
267
268         return 0;
269
270 err:
271         qed_mcp_free(p_hwfn);
272         return -ENOMEM;
273 }
274
275 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
276                                    struct qed_ptt *p_ptt)
277 {
278         u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
279
280         /* Use MCP history register to check if MCP reset occurred between init
281          * time and now.
282          */
283         if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
284                 DP_VERBOSE(p_hwfn,
285                            QED_MSG_SP,
286                            "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
287                            p_hwfn->mcp_info->mcp_hist, generic_por_0);
288
289                 qed_load_mcp_offsets(p_hwfn, p_ptt);
290                 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
291         }
292 }
293
294 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
295 {
296         u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
297         int rc = 0;
298
299         if (p_hwfn->mcp_info->b_block_cmd) {
300                 DP_NOTICE(p_hwfn,
301                           "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
302                 return -EBUSY;
303         }
304
305         /* Ensure that only a single thread is accessing the mailbox */
306         spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
307
308         org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
309
310         /* Set drv command along with the updated sequence */
311         qed_mcp_reread_offsets(p_hwfn, p_ptt);
312         seq = ++p_hwfn->mcp_info->drv_mb_seq;
313         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
314
315         do {
316                 /* Wait for MFW response */
317                 udelay(delay);
318                 /* Give the FW up to 500 second (50*1000*10usec) */
319         } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
320                                               MISCS_REG_GENERIC_POR_0)) &&
321                  (cnt++ < QED_MCP_RESET_RETRIES));
322
323         if (org_mcp_reset_seq !=
324             qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
326                            "MCP was reset after %d usec\n", cnt * delay);
327         } else {
328                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
329                 rc = -EAGAIN;
330         }
331
332         spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
333
334         return rc;
335 }
336
337 /* Must be called while cmd_lock is acquired */
338 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
339 {
340         struct qed_mcp_cmd_elem *p_cmd_elem;
341
342         /* There is at most one pending command at a certain time, and if it
343          * exists - it is placed at the HEAD of the list.
344          */
345         if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
346                 p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
347                                               struct qed_mcp_cmd_elem, list);
348                 return !p_cmd_elem->b_is_completed;
349         }
350
351         return false;
352 }
353
354 /* Must be called while cmd_lock is acquired */
355 static int
356 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
357 {
358         struct qed_mcp_mb_params *p_mb_params;
359         struct qed_mcp_cmd_elem *p_cmd_elem;
360         u32 mcp_resp;
361         u16 seq_num;
362
363         mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
364         seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
365
366         /* Return if no new non-handled response has been received */
367         if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
368                 return -EAGAIN;
369
370         p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
371         if (!p_cmd_elem) {
372                 DP_ERR(p_hwfn,
373                        "Failed to find a pending mailbox cmd that expects sequence number %d\n",
374                        seq_num);
375                 return -EINVAL;
376         }
377
378         p_mb_params = p_cmd_elem->p_mb_params;
379
380         /* Get the MFW response along with the sequence number */
381         p_mb_params->mcp_resp = mcp_resp;
382
383         /* Get the MFW param */
384         p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
385
386         /* Get the union data */
387         if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
388                 u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
389                                       offsetof(struct public_drv_mb,
390                                                union_data);
391                 qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
392                                 union_data_addr, p_mb_params->data_dst_size);
393         }
394
395         p_cmd_elem->b_is_completed = true;
396
397         return 0;
398 }
399
400 /* Must be called while cmd_lock is acquired */
401 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
402                                     struct qed_ptt *p_ptt,
403                                     struct qed_mcp_mb_params *p_mb_params,
404                                     u16 seq_num)
405 {
406         union drv_union_data union_data;
407         u32 union_data_addr;
408
409         /* Set the union data */
410         union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
411                           offsetof(struct public_drv_mb, union_data);
412         memset(&union_data, 0, sizeof(union_data));
413         if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
414                 memcpy(&union_data, p_mb_params->p_data_src,
415                        p_mb_params->data_src_size);
416         qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
417                       sizeof(union_data));
418
419         /* Set the drv param */
420         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
421
422         /* Set the drv command along with the sequence number */
423         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
424
425         DP_VERBOSE(p_hwfn, QED_MSG_SP,
426                    "MFW mailbox: command 0x%08x param 0x%08x\n",
427                    (p_mb_params->cmd | seq_num), p_mb_params->param);
428 }
429
430 static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
431 {
432         p_hwfn->mcp_info->b_block_cmd = block_cmd;
433
434         DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
435                 block_cmd ? "Block" : "Unblock");
436 }
437
438 static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
439                                    struct qed_ptt *p_ptt)
440 {
441         u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
442         u32 delay = QED_MCP_RESP_ITER_US;
443
444         cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
445         cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
446         cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
447         udelay(delay);
448         cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
449         udelay(delay);
450         cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
451
452         DP_NOTICE(p_hwfn,
453                   "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
454                   cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
455 }
456
457 static int
458 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
459                        struct qed_ptt *p_ptt,
460                        struct qed_mcp_mb_params *p_mb_params,
461                        u32 max_retries, u32 usecs)
462 {
463         u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
464         struct qed_mcp_cmd_elem *p_cmd_elem;
465         u16 seq_num;
466         int rc = 0;
467
468         /* Wait until the mailbox is non-occupied */
469         do {
470                 /* Exit the loop if there is no pending command, or if the
471                  * pending command is completed during this iteration.
472                  * The spinlock stays locked until the command is sent.
473                  */
474
475                 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
476
477                 if (!qed_mcp_has_pending_cmd(p_hwfn))
478                         break;
479
480                 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
481                 if (!rc)
482                         break;
483                 else if (rc != -EAGAIN)
484                         goto err;
485
486                 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
487
488                 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
489                         msleep(msecs);
490                 else
491                         udelay(usecs);
492         } while (++cnt < max_retries);
493
494         if (cnt >= max_retries) {
495                 DP_NOTICE(p_hwfn,
496                           "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
497                           p_mb_params->cmd, p_mb_params->param);
498                 return -EAGAIN;
499         }
500
501         /* Send the mailbox command */
502         qed_mcp_reread_offsets(p_hwfn, p_ptt);
503         seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
504         p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
505         if (!p_cmd_elem) {
506                 rc = -ENOMEM;
507                 goto err;
508         }
509
510         __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
511         spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
512
513         /* Wait for the MFW response */
514         do {
515                 /* Exit the loop if the command is already completed, or if the
516                  * command is completed during this iteration.
517                  * The spinlock stays locked until the list element is removed.
518                  */
519
520                 if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
521                         msleep(msecs);
522                 else
523                         udelay(usecs);
524
525                 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
526
527                 if (p_cmd_elem->b_is_completed)
528                         break;
529
530                 rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
531                 if (!rc)
532                         break;
533                 else if (rc != -EAGAIN)
534                         goto err;
535
536                 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
537         } while (++cnt < max_retries);
538
539         if (cnt >= max_retries) {
540                 DP_NOTICE(p_hwfn,
541                           "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
542                           p_mb_params->cmd, p_mb_params->param);
543                 qed_mcp_print_cpu_info(p_hwfn, p_ptt);
544
545                 spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
546                 qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
547                 spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
548
549                 if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
550                         qed_mcp_cmd_set_blocking(p_hwfn, true);
551
552                 qed_hw_err_notify(p_hwfn, p_ptt,
553                                   QED_HW_ERR_MFW_RESP_FAIL, NULL);
554                 return -EAGAIN;
555         }
556
557         qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
558         spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
559
560         DP_VERBOSE(p_hwfn,
561                    QED_MSG_SP,
562                    "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
563                    p_mb_params->mcp_resp,
564                    p_mb_params->mcp_param,
565                    (cnt * usecs) / 1000, (cnt * usecs) % 1000);
566
567         /* Clear the sequence number from the MFW response */
568         p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
569
570         return 0;
571
572 err:
573         spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
574         return rc;
575 }
576
577 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
578                                  struct qed_ptt *p_ptt,
579                                  struct qed_mcp_mb_params *p_mb_params)
580 {
581         size_t union_data_size = sizeof(union drv_union_data);
582         u32 max_retries = QED_DRV_MB_MAX_RETRIES;
583         u32 usecs = QED_MCP_RESP_ITER_US;
584
585         /* MCP not initialized */
586         if (!qed_mcp_is_init(p_hwfn)) {
587                 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
588                 return -EBUSY;
589         }
590
591         if (p_hwfn->mcp_info->b_block_cmd) {
592                 DP_NOTICE(p_hwfn,
593                           "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
594                           p_mb_params->cmd, p_mb_params->param);
595                 return -EBUSY;
596         }
597
598         if (p_mb_params->data_src_size > union_data_size ||
599             p_mb_params->data_dst_size > union_data_size) {
600                 DP_ERR(p_hwfn,
601                        "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
602                        p_mb_params->data_src_size,
603                        p_mb_params->data_dst_size, union_data_size);
604                 return -EINVAL;
605         }
606
607         if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
608                 max_retries = DIV_ROUND_UP(max_retries, 1000);
609                 usecs *= 1000;
610         }
611
612         return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
613                                       usecs);
614 }
615
616 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
617                 struct qed_ptt *p_ptt,
618                 u32 cmd,
619                 u32 param,
620                 u32 *o_mcp_resp,
621                 u32 *o_mcp_param)
622 {
623         struct qed_mcp_mb_params mb_params;
624         int rc;
625
626         memset(&mb_params, 0, sizeof(mb_params));
627         mb_params.cmd = cmd;
628         mb_params.param = param;
629
630         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
631         if (rc)
632                 return rc;
633
634         *o_mcp_resp = mb_params.mcp_resp;
635         *o_mcp_param = mb_params.mcp_param;
636
637         return 0;
638 }
639
640 static int
641 qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
642                    struct qed_ptt *p_ptt,
643                    u32 cmd,
644                    u32 param,
645                    u32 *o_mcp_resp,
646                    u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
647 {
648         struct qed_mcp_mb_params mb_params;
649         int rc;
650
651         memset(&mb_params, 0, sizeof(mb_params));
652         mb_params.cmd = cmd;
653         mb_params.param = param;
654         mb_params.p_data_src = i_buf;
655         mb_params.data_src_size = (u8)i_txn_size;
656         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
657         if (rc)
658                 return rc;
659
660         *o_mcp_resp = mb_params.mcp_resp;
661         *o_mcp_param = mb_params.mcp_param;
662
663         /* nvm_info needs to be updated */
664         p_hwfn->nvm_info.valid = false;
665
666         return 0;
667 }
668
669 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
670                        struct qed_ptt *p_ptt,
671                        u32 cmd,
672                        u32 param,
673                        u32 *o_mcp_resp,
674                        u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
675 {
676         struct qed_mcp_mb_params mb_params;
677         u8 raw_data[MCP_DRV_NVM_BUF_LEN];
678         int rc;
679
680         memset(&mb_params, 0, sizeof(mb_params));
681         mb_params.cmd = cmd;
682         mb_params.param = param;
683         mb_params.p_data_dst = raw_data;
684
685         /* Use the maximal value since the actual one is part of the response */
686         mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
687
688         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
689         if (rc)
690                 return rc;
691
692         *o_mcp_resp = mb_params.mcp_resp;
693         *o_mcp_param = mb_params.mcp_param;
694
695         *o_txn_size = *o_mcp_param;
696         memcpy(o_buf, raw_data, *o_txn_size);
697
698         return 0;
699 }
700
701 static bool
702 qed_mcp_can_force_load(u8 drv_role,
703                        u8 exist_drv_role,
704                        enum qed_override_force_load override_force_load)
705 {
706         bool can_force_load = false;
707
708         switch (override_force_load) {
709         case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
710                 can_force_load = true;
711                 break;
712         case QED_OVERRIDE_FORCE_LOAD_NEVER:
713                 can_force_load = false;
714                 break;
715         default:
716                 can_force_load = (drv_role == DRV_ROLE_OS &&
717                                   exist_drv_role == DRV_ROLE_PREBOOT) ||
718                                  (drv_role == DRV_ROLE_KDUMP &&
719                                   exist_drv_role == DRV_ROLE_OS);
720                 break;
721         }
722
723         return can_force_load;
724 }
725
726 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
727                                    struct qed_ptt *p_ptt)
728 {
729         u32 resp = 0, param = 0;
730         int rc;
731
732         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
733                          &resp, &param);
734         if (rc)
735                 DP_NOTICE(p_hwfn,
736                           "Failed to send cancel load request, rc = %d\n", rc);
737
738         return rc;
739 }
740
741 #define CONFIG_QEDE_BITMAP_IDX          BIT(0)
742 #define CONFIG_QED_SRIOV_BITMAP_IDX     BIT(1)
743 #define CONFIG_QEDR_BITMAP_IDX          BIT(2)
744 #define CONFIG_QEDF_BITMAP_IDX          BIT(4)
745 #define CONFIG_QEDI_BITMAP_IDX          BIT(5)
746 #define CONFIG_QED_LL2_BITMAP_IDX       BIT(6)
747
748 static u32 qed_get_config_bitmap(void)
749 {
750         u32 config_bitmap = 0x0;
751
752         if (IS_ENABLED(CONFIG_QEDE))
753                 config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
754
755         if (IS_ENABLED(CONFIG_QED_SRIOV))
756                 config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
757
758         if (IS_ENABLED(CONFIG_QED_RDMA))
759                 config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
760
761         if (IS_ENABLED(CONFIG_QED_FCOE))
762                 config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
763
764         if (IS_ENABLED(CONFIG_QED_ISCSI))
765                 config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
766
767         if (IS_ENABLED(CONFIG_QED_LL2))
768                 config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
769
770         return config_bitmap;
771 }
772
773 struct qed_load_req_in_params {
774         u8 hsi_ver;
775 #define QED_LOAD_REQ_HSI_VER_DEFAULT    0
776 #define QED_LOAD_REQ_HSI_VER_1          1
777         u32 drv_ver_0;
778         u32 drv_ver_1;
779         u32 fw_ver;
780         u8 drv_role;
781         u8 timeout_val;
782         u8 force_cmd;
783         bool avoid_eng_reset;
784 };
785
786 struct qed_load_req_out_params {
787         u32 load_code;
788         u32 exist_drv_ver_0;
789         u32 exist_drv_ver_1;
790         u32 exist_fw_ver;
791         u8 exist_drv_role;
792         u8 mfw_hsi_ver;
793         bool drv_exists;
794 };
795
796 static int
797 __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
798                    struct qed_ptt *p_ptt,
799                    struct qed_load_req_in_params *p_in_params,
800                    struct qed_load_req_out_params *p_out_params)
801 {
802         struct qed_mcp_mb_params mb_params;
803         struct load_req_stc load_req;
804         struct load_rsp_stc load_rsp;
805         u32 hsi_ver;
806         int rc;
807
808         memset(&load_req, 0, sizeof(load_req));
809         load_req.drv_ver_0 = p_in_params->drv_ver_0;
810         load_req.drv_ver_1 = p_in_params->drv_ver_1;
811         load_req.fw_ver = p_in_params->fw_ver;
812         QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
813         QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
814                           p_in_params->timeout_val);
815         QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
816                           p_in_params->force_cmd);
817         QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
818                           p_in_params->avoid_eng_reset);
819
820         hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
821                   DRV_ID_MCP_HSI_VER_CURRENT :
822                   (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
823
824         memset(&mb_params, 0, sizeof(mb_params));
825         mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
826         mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
827         mb_params.p_data_src = &load_req;
828         mb_params.data_src_size = sizeof(load_req);
829         mb_params.p_data_dst = &load_rsp;
830         mb_params.data_dst_size = sizeof(load_rsp);
831         mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
832
833         DP_VERBOSE(p_hwfn, QED_MSG_SP,
834                    "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
835                    mb_params.param,
836                    QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
837                    QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
838                    QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
839                    QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
840
841         if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
842                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
843                            "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
844                            load_req.drv_ver_0,
845                            load_req.drv_ver_1,
846                            load_req.fw_ver,
847                            load_req.misc0,
848                            QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
849                            QED_MFW_GET_FIELD(load_req.misc0,
850                                              LOAD_REQ_LOCK_TO),
851                            QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
852                            QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
853         }
854
855         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
856         if (rc) {
857                 DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
858                 return rc;
859         }
860
861         DP_VERBOSE(p_hwfn, QED_MSG_SP,
862                    "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
863         p_out_params->load_code = mb_params.mcp_resp;
864
865         if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
866             p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
867                 DP_VERBOSE(p_hwfn,
868                            QED_MSG_SP,
869                            "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
870                            load_rsp.drv_ver_0,
871                            load_rsp.drv_ver_1,
872                            load_rsp.fw_ver,
873                            load_rsp.misc0,
874                            QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
875                            QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
876                            QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
877
878                 p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
879                 p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
880                 p_out_params->exist_fw_ver = load_rsp.fw_ver;
881                 p_out_params->exist_drv_role =
882                     QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
883                 p_out_params->mfw_hsi_ver =
884                     QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
885                 p_out_params->drv_exists =
886                     QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
887                     LOAD_RSP_FLAGS0_DRV_EXISTS;
888         }
889
890         return 0;
891 }
892
893 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
894                                   enum qed_drv_role drv_role,
895                                   u8 *p_mfw_drv_role)
896 {
897         switch (drv_role) {
898         case QED_DRV_ROLE_OS:
899                 *p_mfw_drv_role = DRV_ROLE_OS;
900                 break;
901         case QED_DRV_ROLE_KDUMP:
902                 *p_mfw_drv_role = DRV_ROLE_KDUMP;
903                 break;
904         default:
905                 DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
906                 return -EINVAL;
907         }
908
909         return 0;
910 }
911
912 enum qed_load_req_force {
913         QED_LOAD_REQ_FORCE_NONE,
914         QED_LOAD_REQ_FORCE_PF,
915         QED_LOAD_REQ_FORCE_ALL,
916 };
917
918 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
919
920                                   enum qed_load_req_force force_cmd,
921                                   u8 *p_mfw_force_cmd)
922 {
923         switch (force_cmd) {
924         case QED_LOAD_REQ_FORCE_NONE:
925                 *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
926                 break;
927         case QED_LOAD_REQ_FORCE_PF:
928                 *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
929                 break;
930         case QED_LOAD_REQ_FORCE_ALL:
931                 *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
932                 break;
933         }
934 }
935
936 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
937                      struct qed_ptt *p_ptt,
938                      struct qed_load_req_params *p_params)
939 {
940         struct qed_load_req_out_params out_params;
941         struct qed_load_req_in_params in_params;
942         u8 mfw_drv_role, mfw_force_cmd;
943         int rc;
944
945         memset(&in_params, 0, sizeof(in_params));
946         in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
947         in_params.drv_ver_0 = QED_VERSION;
948         in_params.drv_ver_1 = qed_get_config_bitmap();
949         in_params.fw_ver = STORM_FW_VERSION;
950         rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
951         if (rc)
952                 return rc;
953
954         in_params.drv_role = mfw_drv_role;
955         in_params.timeout_val = p_params->timeout_val;
956         qed_get_mfw_force_cmd(p_hwfn,
957                               QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
958
959         in_params.force_cmd = mfw_force_cmd;
960         in_params.avoid_eng_reset = p_params->avoid_eng_reset;
961
962         memset(&out_params, 0, sizeof(out_params));
963         rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
964         if (rc)
965                 return rc;
966
967         /* First handle cases where another load request should/might be sent:
968          * - MFW expects the old interface [HSI version = 1]
969          * - MFW responds that a force load request is required
970          */
971         if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
972                 DP_INFO(p_hwfn,
973                         "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
974
975                 in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
976                 memset(&out_params, 0, sizeof(out_params));
977                 rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
978                 if (rc)
979                         return rc;
980         } else if (out_params.load_code ==
981                    FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
982                 if (qed_mcp_can_force_load(in_params.drv_role,
983                                            out_params.exist_drv_role,
984                                            p_params->override_force_load)) {
985                         DP_INFO(p_hwfn,
986                                 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
987                                 in_params.drv_role, in_params.fw_ver,
988                                 in_params.drv_ver_0, in_params.drv_ver_1,
989                                 out_params.exist_drv_role,
990                                 out_params.exist_fw_ver,
991                                 out_params.exist_drv_ver_0,
992                                 out_params.exist_drv_ver_1);
993
994                         qed_get_mfw_force_cmd(p_hwfn,
995                                               QED_LOAD_REQ_FORCE_ALL,
996                                               &mfw_force_cmd);
997
998                         in_params.force_cmd = mfw_force_cmd;
999                         memset(&out_params, 0, sizeof(out_params));
1000                         rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1001                                                 &out_params);
1002                         if (rc)
1003                                 return rc;
1004                 } else {
1005                         DP_NOTICE(p_hwfn,
1006                                   "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1007                                   in_params.drv_role, in_params.fw_ver,
1008                                   in_params.drv_ver_0, in_params.drv_ver_1,
1009                                   out_params.exist_drv_role,
1010                                   out_params.exist_fw_ver,
1011                                   out_params.exist_drv_ver_0,
1012                                   out_params.exist_drv_ver_1);
1013                         DP_NOTICE(p_hwfn,
1014                                   "Avoid sending a force load request to prevent disruption of active PFs\n");
1015
1016                         qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1017                         return -EBUSY;
1018                 }
1019         }
1020
1021         /* Now handle the other types of responses.
1022          * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1023          * expected here after the additional revised load requests were sent.
1024          */
1025         switch (out_params.load_code) {
1026         case FW_MSG_CODE_DRV_LOAD_ENGINE:
1027         case FW_MSG_CODE_DRV_LOAD_PORT:
1028         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1029                 if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1030                     out_params.drv_exists) {
1031                         /* The role and fw/driver version match, but the PF is
1032                          * already loaded and has not been unloaded gracefully.
1033                          */
1034                         DP_NOTICE(p_hwfn,
1035                                   "PF is already loaded\n");
1036                         return -EINVAL;
1037                 }
1038                 break;
1039         default:
1040                 DP_NOTICE(p_hwfn,
1041                           "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1042                           out_params.load_code);
1043                 return -EBUSY;
1044         }
1045
1046         p_params->load_code = out_params.load_code;
1047
1048         return 0;
1049 }
1050
1051 int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1052 {
1053         u32 resp = 0, param = 0;
1054         int rc;
1055
1056         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1057                          &param);
1058         if (rc) {
1059                 DP_NOTICE(p_hwfn,
1060                           "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1061                 return rc;
1062         }
1063
1064         /* Check if there is a DID mismatch between nvm-cfg/efuse */
1065         if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1066                 DP_NOTICE(p_hwfn,
1067                           "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1068
1069         return 0;
1070 }
1071
1072 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1073 {
1074         struct qed_mcp_mb_params mb_params;
1075         u32 wol_param;
1076
1077         switch (p_hwfn->cdev->wol_config) {
1078         case QED_OV_WOL_DISABLED:
1079                 wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1080                 break;
1081         case QED_OV_WOL_ENABLED:
1082                 wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1083                 break;
1084         default:
1085                 DP_NOTICE(p_hwfn,
1086                           "Unknown WoL configuration %02x\n",
1087                           p_hwfn->cdev->wol_config);
1088                 /* Fallthrough */
1089         case QED_OV_WOL_DEFAULT:
1090                 wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1091         }
1092
1093         memset(&mb_params, 0, sizeof(mb_params));
1094         mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1095         mb_params.param = wol_param;
1096         mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1097
1098         return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1099 }
1100
1101 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1102 {
1103         struct qed_mcp_mb_params mb_params;
1104         struct mcp_mac wol_mac;
1105
1106         memset(&mb_params, 0, sizeof(mb_params));
1107         mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1108
1109         /* Set the primary MAC if WoL is enabled */
1110         if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1111                 u8 *p_mac = p_hwfn->cdev->wol_mac;
1112
1113                 memset(&wol_mac, 0, sizeof(wol_mac));
1114                 wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1115                 wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1116                                     p_mac[4] << 8 | p_mac[5];
1117
1118                 DP_VERBOSE(p_hwfn,
1119                            (QED_MSG_SP | NETIF_MSG_IFDOWN),
1120                            "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1121                            p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1122
1123                 mb_params.p_data_src = &wol_mac;
1124                 mb_params.data_src_size = sizeof(wol_mac);
1125         }
1126
1127         return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1128 }
1129
1130 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1131                                   struct qed_ptt *p_ptt)
1132 {
1133         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1134                                         PUBLIC_PATH);
1135         u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1136         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1137                                      QED_PATH_ID(p_hwfn));
1138         u32 disabled_vfs[VF_MAX_STATIC / 32];
1139         int i;
1140
1141         DP_VERBOSE(p_hwfn,
1142                    QED_MSG_SP,
1143                    "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1144                    mfw_path_offsize, path_addr);
1145
1146         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1147                 disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1148                                          path_addr +
1149                                          offsetof(struct public_path,
1150                                                   mcp_vf_disabled) +
1151                                          sizeof(u32) * i);
1152                 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1153                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1154                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1155         }
1156
1157         if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1158                 qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1159 }
1160
1161 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1162                        struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1163 {
1164         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1165                                         PUBLIC_FUNC);
1166         u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1167         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1168                                      MCP_PF_ID(p_hwfn));
1169         struct qed_mcp_mb_params mb_params;
1170         int rc;
1171         int i;
1172
1173         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1174                 DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1175                            "Acking VFs [%08x,...,%08x] - %08x\n",
1176                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1177
1178         memset(&mb_params, 0, sizeof(mb_params));
1179         mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1180         mb_params.p_data_src = vfs_to_ack;
1181         mb_params.data_src_size = VF_MAX_STATIC / 8;
1182         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1183         if (rc) {
1184                 DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1185                 return -EBUSY;
1186         }
1187
1188         /* Clear the ACK bits */
1189         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1190                 qed_wr(p_hwfn, p_ptt,
1191                        func_addr +
1192                        offsetof(struct public_func, drv_ack_vf_disabled) +
1193                        i * sizeof(u32), 0);
1194
1195         return rc;
1196 }
1197
1198 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1199                                               struct qed_ptt *p_ptt)
1200 {
1201         u32 transceiver_state;
1202
1203         transceiver_state = qed_rd(p_hwfn, p_ptt,
1204                                    p_hwfn->mcp_info->port_addr +
1205                                    offsetof(struct public_port,
1206                                             transceiver_data));
1207
1208         DP_VERBOSE(p_hwfn,
1209                    (NETIF_MSG_HW | QED_MSG_SP),
1210                    "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1211                    transceiver_state,
1212                    (u32)(p_hwfn->mcp_info->port_addr +
1213                           offsetof(struct public_port, transceiver_data)));
1214
1215         transceiver_state = GET_FIELD(transceiver_state,
1216                                       ETH_TRANSCEIVER_STATE);
1217
1218         if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1219                 DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1220         else
1221                 DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1222 }
1223
1224 static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1225                                     struct qed_ptt *p_ptt,
1226                                     struct qed_mcp_link_state *p_link)
1227 {
1228         u32 eee_status, val;
1229
1230         p_link->eee_adv_caps = 0;
1231         p_link->eee_lp_adv_caps = 0;
1232         eee_status = qed_rd(p_hwfn,
1233                             p_ptt,
1234                             p_hwfn->mcp_info->port_addr +
1235                             offsetof(struct public_port, eee_status));
1236         p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1237         val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1238         if (val & EEE_1G_ADV)
1239                 p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1240         if (val & EEE_10G_ADV)
1241                 p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1242         val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1243         if (val & EEE_1G_ADV)
1244                 p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1245         if (val & EEE_10G_ADV)
1246                 p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1247 }
1248
1249 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1250                                   struct qed_ptt *p_ptt,
1251                                   struct public_func *p_data, int pfid)
1252 {
1253         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1254                                         PUBLIC_FUNC);
1255         u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1256         u32 func_addr;
1257         u32 i, size;
1258
1259         func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1260         memset(p_data, 0, sizeof(*p_data));
1261
1262         size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1263         for (i = 0; i < size / sizeof(u32); i++)
1264                 ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1265                                             func_addr + (i << 2));
1266         return size;
1267 }
1268
1269 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1270                                   struct public_func *p_shmem_info)
1271 {
1272         struct qed_mcp_function_info *p_info;
1273
1274         p_info = &p_hwfn->mcp_info->func_info;
1275
1276         p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1277                                                   FUNC_MF_CFG_MIN_BW);
1278         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1279                 DP_INFO(p_hwfn,
1280                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1281                         p_info->bandwidth_min);
1282                 p_info->bandwidth_min = 1;
1283         }
1284
1285         p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1286                                                   FUNC_MF_CFG_MAX_BW);
1287         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1288                 DP_INFO(p_hwfn,
1289                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1290                         p_info->bandwidth_max);
1291                 p_info->bandwidth_max = 100;
1292         }
1293 }
1294
1295 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1296                                        struct qed_ptt *p_ptt, bool b_reset)
1297 {
1298         struct qed_mcp_link_state *p_link;
1299         u8 max_bw, min_bw;
1300         u32 status = 0;
1301
1302         /* Prevent SW/attentions from doing this at the same time */
1303         spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1304
1305         p_link = &p_hwfn->mcp_info->link_output;
1306         memset(p_link, 0, sizeof(*p_link));
1307         if (!b_reset) {
1308                 status = qed_rd(p_hwfn, p_ptt,
1309                                 p_hwfn->mcp_info->port_addr +
1310                                 offsetof(struct public_port, link_status));
1311                 DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1312                            "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1313                            status,
1314                            (u32)(p_hwfn->mcp_info->port_addr +
1315                                  offsetof(struct public_port, link_status)));
1316         } else {
1317                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1318                            "Resetting link indications\n");
1319                 goto out;
1320         }
1321
1322         if (p_hwfn->b_drv_link_init) {
1323                 /* Link indication with modern MFW arrives as per-PF
1324                  * indication.
1325                  */
1326                 if (p_hwfn->mcp_info->capabilities &
1327                     FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1328                         struct public_func shmem_info;
1329
1330                         qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1331                                                MCP_PF_ID(p_hwfn));
1332                         p_link->link_up = !!(shmem_info.status &
1333                                              FUNC_STATUS_VIRTUAL_LINK_UP);
1334                         qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1335                         DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1336                                    "Virtual link_up = %d\n", p_link->link_up);
1337                 } else {
1338                         p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1339                         DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1340                                    "Physical link_up = %d\n", p_link->link_up);
1341                 }
1342         } else {
1343                 p_link->link_up = false;
1344         }
1345
1346         p_link->full_duplex = true;
1347         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1348         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1349                 p_link->speed = 100000;
1350                 break;
1351         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1352                 p_link->speed = 50000;
1353                 break;
1354         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1355                 p_link->speed = 40000;
1356                 break;
1357         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1358                 p_link->speed = 25000;
1359                 break;
1360         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1361                 p_link->speed = 20000;
1362                 break;
1363         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1364                 p_link->speed = 10000;
1365                 break;
1366         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1367                 p_link->full_duplex = false;
1368         /* Fall-through */
1369         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1370                 p_link->speed = 1000;
1371                 break;
1372         default:
1373                 p_link->speed = 0;
1374                 p_link->link_up = 0;
1375         }
1376
1377         if (p_link->link_up && p_link->speed)
1378                 p_link->line_speed = p_link->speed;
1379         else
1380                 p_link->line_speed = 0;
1381
1382         max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1383         min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1384
1385         /* Max bandwidth configuration */
1386         __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1387
1388         /* Min bandwidth configuration */
1389         __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1390         qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1391                                             p_link->min_pf_rate);
1392
1393         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1394         p_link->an_complete = !!(status &
1395                                  LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1396         p_link->parallel_detection = !!(status &
1397                                         LINK_STATUS_PARALLEL_DETECTION_USED);
1398         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1399
1400         p_link->partner_adv_speed |=
1401                 (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1402                 QED_LINK_PARTNER_SPEED_1G_FD : 0;
1403         p_link->partner_adv_speed |=
1404                 (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1405                 QED_LINK_PARTNER_SPEED_1G_HD : 0;
1406         p_link->partner_adv_speed |=
1407                 (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1408                 QED_LINK_PARTNER_SPEED_10G : 0;
1409         p_link->partner_adv_speed |=
1410                 (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1411                 QED_LINK_PARTNER_SPEED_20G : 0;
1412         p_link->partner_adv_speed |=
1413                 (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1414                 QED_LINK_PARTNER_SPEED_25G : 0;
1415         p_link->partner_adv_speed |=
1416                 (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1417                 QED_LINK_PARTNER_SPEED_40G : 0;
1418         p_link->partner_adv_speed |=
1419                 (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1420                 QED_LINK_PARTNER_SPEED_50G : 0;
1421         p_link->partner_adv_speed |=
1422                 (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1423                 QED_LINK_PARTNER_SPEED_100G : 0;
1424
1425         p_link->partner_tx_flow_ctrl_en =
1426                 !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1427         p_link->partner_rx_flow_ctrl_en =
1428                 !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1429
1430         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1431         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1432                 p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1433                 break;
1434         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1435                 p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1436                 break;
1437         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1438                 p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1439                 break;
1440         default:
1441                 p_link->partner_adv_pause = 0;
1442         }
1443
1444         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1445
1446         if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1447                 qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1448
1449         qed_link_update(p_hwfn, p_ptt);
1450 out:
1451         spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1452 }
1453
1454 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1455 {
1456         struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1457         struct qed_mcp_mb_params mb_params;
1458         struct eth_phy_cfg phy_cfg;
1459         int rc = 0;
1460         u32 cmd;
1461
1462         /* Set the shmem configuration according to params */
1463         memset(&phy_cfg, 0, sizeof(phy_cfg));
1464         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1465         if (!params->speed.autoneg)
1466                 phy_cfg.speed = params->speed.forced_speed;
1467         phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1468         phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1469         phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1470         phy_cfg.adv_speed = params->speed.advertised_speeds;
1471         phy_cfg.loopback_mode = params->loopback_mode;
1472
1473         /* There are MFWs that share this capability regardless of whether
1474          * this is feasible or not. And given that at the very least adv_caps
1475          * would be set internally by qed, we want to make sure LFA would
1476          * still work.
1477          */
1478         if ((p_hwfn->mcp_info->capabilities &
1479              FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1480                 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1481                 if (params->eee.tx_lpi_enable)
1482                         phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1483                 if (params->eee.adv_caps & QED_EEE_1G_ADV)
1484                         phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1485                 if (params->eee.adv_caps & QED_EEE_10G_ADV)
1486                         phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1487                 phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1488                                     EEE_TX_TIMER_USEC_OFFSET) &
1489                                    EEE_TX_TIMER_USEC_MASK;
1490         }
1491
1492         p_hwfn->b_drv_link_init = b_up;
1493
1494         if (b_up) {
1495                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1496                            "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1497                            phy_cfg.speed,
1498                            phy_cfg.pause,
1499                            phy_cfg.adv_speed,
1500                            phy_cfg.loopback_mode,
1501                            phy_cfg.feature_config_flags);
1502         } else {
1503                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1504                            "Resetting link\n");
1505         }
1506
1507         memset(&mb_params, 0, sizeof(mb_params));
1508         mb_params.cmd = cmd;
1509         mb_params.p_data_src = &phy_cfg;
1510         mb_params.data_src_size = sizeof(phy_cfg);
1511         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1512
1513         /* if mcp fails to respond we must abort */
1514         if (rc) {
1515                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1516                 return rc;
1517         }
1518
1519         /* Mimic link-change attention, done for several reasons:
1520          *  - On reset, there's no guarantee MFW would trigger
1521          *    an attention.
1522          *  - On initialization, older MFWs might not indicate link change
1523          *    during LFA, so we'll never get an UP indication.
1524          */
1525         qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1526
1527         return 0;
1528 }
1529
1530 u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1531                                  struct qed_ptt *p_ptt)
1532 {
1533         u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1534
1535         if (IS_VF(p_hwfn->cdev))
1536                 return -EINVAL;
1537
1538         path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1539                                                  PUBLIC_PATH);
1540         path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1541         path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1542
1543         proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1544                                path_addr +
1545                                offsetof(struct public_path, process_kill)) &
1546                         PROCESS_KILL_COUNTER_MASK;
1547
1548         return proc_kill_cnt;
1549 }
1550
1551 static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1552                                         struct qed_ptt *p_ptt)
1553 {
1554         struct qed_dev *cdev = p_hwfn->cdev;
1555         u32 proc_kill_cnt;
1556
1557         /* Prevent possible attentions/interrupts during the recovery handling
1558          * and till its load phase, during which they will be re-enabled.
1559          */
1560         qed_int_igu_disable_int(p_hwfn, p_ptt);
1561
1562         DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1563
1564         /* The following operations should be done once, and thus in CMT mode
1565          * are carried out by only the first HW function.
1566          */
1567         if (p_hwfn != QED_LEADING_HWFN(cdev))
1568                 return;
1569
1570         if (cdev->recov_in_prog) {
1571                 DP_NOTICE(p_hwfn,
1572                           "Ignoring the indication since a recovery process is already in progress\n");
1573                 return;
1574         }
1575
1576         cdev->recov_in_prog = true;
1577
1578         proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1579         DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1580
1581         qed_schedule_recovery_handler(p_hwfn);
1582 }
1583
1584 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1585                                         struct qed_ptt *p_ptt,
1586                                         enum MFW_DRV_MSG_TYPE type)
1587 {
1588         enum qed_mcp_protocol_type stats_type;
1589         union qed_mcp_protocol_stats stats;
1590         struct qed_mcp_mb_params mb_params;
1591         u32 hsi_param;
1592
1593         switch (type) {
1594         case MFW_DRV_MSG_GET_LAN_STATS:
1595                 stats_type = QED_MCP_LAN_STATS;
1596                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1597                 break;
1598         case MFW_DRV_MSG_GET_FCOE_STATS:
1599                 stats_type = QED_MCP_FCOE_STATS;
1600                 hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1601                 break;
1602         case MFW_DRV_MSG_GET_ISCSI_STATS:
1603                 stats_type = QED_MCP_ISCSI_STATS;
1604                 hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1605                 break;
1606         case MFW_DRV_MSG_GET_RDMA_STATS:
1607                 stats_type = QED_MCP_RDMA_STATS;
1608                 hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1609                 break;
1610         default:
1611                 DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1612                 return;
1613         }
1614
1615         qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1616
1617         memset(&mb_params, 0, sizeof(mb_params));
1618         mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1619         mb_params.param = hsi_param;
1620         mb_params.p_data_src = &stats;
1621         mb_params.data_src_size = sizeof(stats);
1622         qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1623 }
1624
1625 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1626 {
1627         struct qed_mcp_function_info *p_info;
1628         struct public_func shmem_info;
1629         u32 resp = 0, param = 0;
1630
1631         qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1632
1633         qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1634
1635         p_info = &p_hwfn->mcp_info->func_info;
1636
1637         qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1638         qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1639
1640         /* Acknowledge the MFW */
1641         qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1642                     &param);
1643 }
1644
1645 static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1646 {
1647         struct public_func shmem_info;
1648         u32 resp = 0, param = 0;
1649
1650         qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1651
1652         p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1653                                                  FUNC_MF_CFG_OV_STAG_MASK;
1654         p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1655         if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1656                 if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1657                         qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1658                                p_hwfn->hw_info.ovlan);
1659                         qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1660
1661                         /* Configure DB to add external vlan to EDPM packets */
1662                         qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1663                         qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1664                                p_hwfn->hw_info.ovlan);
1665                 } else {
1666                         qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1667                         qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1668                         qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1669                         qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1670                 }
1671
1672                 qed_sp_pf_update_stag(p_hwfn);
1673         }
1674
1675         DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1676                    p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1677
1678         /* Acknowledge the MFW */
1679         qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1680                     &resp, &param);
1681 }
1682
1683 static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
1684                                        struct qed_ptt *p_ptt)
1685 {
1686         /* A single notification should be sent to upper driver in CMT mode */
1687         if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1688                 return;
1689
1690         qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
1691                           "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1692 }
1693
1694 struct qed_mdump_cmd_params {
1695         u32 cmd;
1696         void *p_data_src;
1697         u8 data_src_size;
1698         void *p_data_dst;
1699         u8 data_dst_size;
1700         u32 mcp_resp;
1701 };
1702
1703 static int
1704 qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
1705                   struct qed_ptt *p_ptt,
1706                   struct qed_mdump_cmd_params *p_mdump_cmd_params)
1707 {
1708         struct qed_mcp_mb_params mb_params;
1709         int rc;
1710
1711         memset(&mb_params, 0, sizeof(mb_params));
1712         mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1713         mb_params.param = p_mdump_cmd_params->cmd;
1714         mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1715         mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1716         mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1717         mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1718         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1719         if (rc)
1720                 return rc;
1721
1722         p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1723
1724         if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1725                 DP_INFO(p_hwfn,
1726                         "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1727                         p_mdump_cmd_params->cmd);
1728                 rc = -EOPNOTSUPP;
1729         } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1730                 DP_INFO(p_hwfn,
1731                         "The mdump command is not supported by the MFW\n");
1732                 rc = -EOPNOTSUPP;
1733         }
1734
1735         return rc;
1736 }
1737
1738 static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1739 {
1740         struct qed_mdump_cmd_params mdump_cmd_params;
1741
1742         memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1743         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1744
1745         return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1746 }
1747
1748 int
1749 qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
1750                          struct qed_ptt *p_ptt,
1751                          struct mdump_retain_data_stc *p_mdump_retain)
1752 {
1753         struct qed_mdump_cmd_params mdump_cmd_params;
1754         int rc;
1755
1756         memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1757         mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1758         mdump_cmd_params.p_data_dst = p_mdump_retain;
1759         mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
1760
1761         rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1762         if (rc)
1763                 return rc;
1764
1765         if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1766                 DP_INFO(p_hwfn,
1767                         "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1768                         mdump_cmd_params.mcp_resp);
1769                 return -EINVAL;
1770         }
1771
1772         return 0;
1773 }
1774
1775 static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
1776                                           struct qed_ptt *p_ptt)
1777 {
1778         struct mdump_retain_data_stc mdump_retain;
1779         int rc;
1780
1781         /* In CMT mode - no need for more than a single acknowledgment to the
1782          * MFW, and no more than a single notification to the upper driver.
1783          */
1784         if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1785                 return;
1786
1787         rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1788         if (rc == 0 && mdump_retain.valid)
1789                 DP_NOTICE(p_hwfn,
1790                           "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1791                           mdump_retain.epoch,
1792                           mdump_retain.pf, mdump_retain.status);
1793         else
1794                 DP_NOTICE(p_hwfn,
1795                           "The MFW notified that a critical error occurred in the device\n");
1796
1797         DP_NOTICE(p_hwfn,
1798                   "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1799         qed_mcp_mdump_ack(p_hwfn, p_ptt);
1800
1801         qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
1802 }
1803
1804 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1805 {
1806         struct public_func shmem_info;
1807         u32 port_cfg, val;
1808
1809         if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1810                 return;
1811
1812         memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1813         port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1814                           offsetof(struct public_port, oem_cfg_port));
1815         val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1816                 OEM_CFG_CHANNEL_TYPE_OFFSET;
1817         if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1818                 DP_NOTICE(p_hwfn,
1819                           "Incorrect UFP Channel type  %d port_id 0x%02x\n",
1820                           val, MFW_PORT(p_hwfn));
1821
1822         val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1823         if (val == OEM_CFG_SCHED_TYPE_ETS) {
1824                 p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1825         } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1826                 p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1827         } else {
1828                 p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1829                 DP_NOTICE(p_hwfn,
1830                           "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1831                           val, MFW_PORT(p_hwfn));
1832         }
1833
1834         qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1835         val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1836                 OEM_CFG_FUNC_TC_OFFSET;
1837         p_hwfn->ufp_info.tc = (u8)val;
1838         val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1839                 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1840         if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1841                 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1842         } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1843                 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1844         } else {
1845                 p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1846                 DP_NOTICE(p_hwfn,
1847                           "Unknown Host priority control %d port_id 0x%02x\n",
1848                           val, MFW_PORT(p_hwfn));
1849         }
1850
1851         DP_NOTICE(p_hwfn,
1852                   "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1853                   p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1854                   p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
1855 }
1856
1857 static int
1858 qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1859 {
1860         qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1861
1862         if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1863                 p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1864                 qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
1865                                            p_hwfn->ufp_info.tc);
1866
1867                 qed_qm_reconf(p_hwfn, p_ptt);
1868         } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1869                 /* Merge UFP TC with the dcbx TC data */
1870                 qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1871                                           QED_DCBX_OPERATIONAL_MIB);
1872         } else {
1873                 DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1874                 return -EINVAL;
1875         }
1876
1877         /* update storm FW with negotiation results */
1878         qed_sp_pf_update_ufp(p_hwfn);
1879
1880         /* update stag pcp value */
1881         qed_sp_pf_update_stag(p_hwfn);
1882
1883         return 0;
1884 }
1885
1886 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1887                           struct qed_ptt *p_ptt)
1888 {
1889         struct qed_mcp_info *info = p_hwfn->mcp_info;
1890         int rc = 0;
1891         bool found = false;
1892         u16 i;
1893
1894         DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1895
1896         /* Read Messages from MFW */
1897         qed_mcp_read_mb(p_hwfn, p_ptt);
1898
1899         /* Compare current messages to old ones */
1900         for (i = 0; i < info->mfw_mb_length; i++) {
1901                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1902                         continue;
1903
1904                 found = true;
1905
1906                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1907                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1908                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1909
1910                 switch (i) {
1911                 case MFW_DRV_MSG_LINK_CHANGE:
1912                         qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1913                         break;
1914                 case MFW_DRV_MSG_VF_DISABLED:
1915                         qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1916                         break;
1917                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1918                         qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1919                                                   QED_DCBX_REMOTE_LLDP_MIB);
1920                         break;
1921                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1922                         qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1923                                                   QED_DCBX_REMOTE_MIB);
1924                         break;
1925                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1926                         qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1927                                                   QED_DCBX_OPERATIONAL_MIB);
1928                         break;
1929                 case MFW_DRV_MSG_OEM_CFG_UPDATE:
1930                         qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
1931                         break;
1932                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1933                         qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1934                         break;
1935                 case MFW_DRV_MSG_ERROR_RECOVERY:
1936                         qed_mcp_handle_process_kill(p_hwfn, p_ptt);
1937                         break;
1938                 case MFW_DRV_MSG_GET_LAN_STATS:
1939                 case MFW_DRV_MSG_GET_FCOE_STATS:
1940                 case MFW_DRV_MSG_GET_ISCSI_STATS:
1941                 case MFW_DRV_MSG_GET_RDMA_STATS:
1942                         qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1943                         break;
1944                 case MFW_DRV_MSG_BW_UPDATE:
1945                         qed_mcp_update_bw(p_hwfn, p_ptt);
1946                         break;
1947                 case MFW_DRV_MSG_S_TAG_UPDATE:
1948                         qed_mcp_update_stag(p_hwfn, p_ptt);
1949                         break;
1950                 case MFW_DRV_MSG_FAILURE_DETECTED:
1951                         qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
1952                         break;
1953                 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
1954                         qed_mcp_handle_critical_error(p_hwfn, p_ptt);
1955                         break;
1956                 case MFW_DRV_MSG_GET_TLV_REQ:
1957                         qed_mfw_tlv_req(p_hwfn);
1958                         break;
1959                 default:
1960                         DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1961                         rc = -EINVAL;
1962                 }
1963         }
1964
1965         /* ACK everything */
1966         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1967                 __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1968
1969                 /* MFW expect answer in BE, so we force write in that format */
1970                 qed_wr(p_hwfn, p_ptt,
1971                        info->mfw_mb_addr + sizeof(u32) +
1972                        MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1973                        sizeof(u32) + i * sizeof(u32),
1974                        (__force u32)val);
1975         }
1976
1977         if (!found) {
1978                 DP_NOTICE(p_hwfn,
1979                           "Received an MFW message indication but no new message!\n");
1980                 rc = -EINVAL;
1981         }
1982
1983         /* Copy the new mfw messages into the shadow */
1984         memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1985
1986         return rc;
1987 }
1988
1989 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1990                         struct qed_ptt *p_ptt,
1991                         u32 *p_mfw_ver, u32 *p_running_bundle_id)
1992 {
1993         u32 global_offsize;
1994
1995         if (IS_VF(p_hwfn->cdev)) {
1996                 if (p_hwfn->vf_iov_info) {
1997                         struct pfvf_acquire_resp_tlv *p_resp;
1998
1999                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2000                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2001                         return 0;
2002                 } else {
2003                         DP_VERBOSE(p_hwfn,
2004                                    QED_MSG_IOV,
2005                                    "VF requested MFW version prior to ACQUIRE\n");
2006                         return -EINVAL;
2007                 }
2008         }
2009
2010         global_offsize = qed_rd(p_hwfn, p_ptt,
2011                                 SECTION_OFFSIZE_ADDR(p_hwfn->
2012                                                      mcp_info->public_base,
2013                                                      PUBLIC_GLOBAL));
2014         *p_mfw_ver =
2015             qed_rd(p_hwfn, p_ptt,
2016                    SECTION_ADDR(global_offsize,
2017                                 0) + offsetof(struct public_global, mfw_ver));
2018
2019         if (p_running_bundle_id != NULL) {
2020                 *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
2021                                               SECTION_ADDR(global_offsize, 0) +
2022                                               offsetof(struct public_global,
2023                                                        running_bundle_id));
2024         }
2025
2026         return 0;
2027 }
2028
2029 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
2030                         struct qed_ptt *p_ptt, u32 *p_mbi_ver)
2031 {
2032         u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2033
2034         if (IS_VF(p_hwfn->cdev))
2035                 return -EINVAL;
2036
2037         /* Read the address of the nvm_cfg */
2038         nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2039         if (!nvm_cfg_addr) {
2040                 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
2041                 return -EINVAL;
2042         }
2043
2044         /* Read the offset of nvm_cfg1 */
2045         nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2046
2047         mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2048                        offsetof(struct nvm_cfg1, glob) +
2049                        offsetof(struct nvm_cfg1_glob, mbi_version);
2050         *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
2051                             mbi_ver_addr) &
2052                      (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2053                       NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2054                       NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2055
2056         return 0;
2057 }
2058
2059 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
2060                            struct qed_ptt *p_ptt, u32 *p_media_type)
2061 {
2062         *p_media_type = MEDIA_UNSPECIFIED;
2063
2064         if (IS_VF(p_hwfn->cdev))
2065                 return -EINVAL;
2066
2067         if (!qed_mcp_is_init(p_hwfn)) {
2068                 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2069                 return -EBUSY;
2070         }
2071
2072         if (!p_ptt) {
2073                 *p_media_type = MEDIA_UNSPECIFIED;
2074                 return -EINVAL;
2075         }
2076
2077         *p_media_type = qed_rd(p_hwfn, p_ptt,
2078                                p_hwfn->mcp_info->port_addr +
2079                                offsetof(struct public_port,
2080                                         media_type));
2081
2082         return 0;
2083 }
2084
2085 int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
2086                                  struct qed_ptt *p_ptt,
2087                                  u32 *p_transceiver_state,
2088                                  u32 *p_transceiver_type)
2089 {
2090         u32 transceiver_info;
2091
2092         *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2093         *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2094
2095         if (IS_VF(p_hwfn->cdev))
2096                 return -EINVAL;
2097
2098         if (!qed_mcp_is_init(p_hwfn)) {
2099                 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2100                 return -EBUSY;
2101         }
2102
2103         transceiver_info = qed_rd(p_hwfn, p_ptt,
2104                                   p_hwfn->mcp_info->port_addr +
2105                                   offsetof(struct public_port,
2106                                            transceiver_data));
2107
2108         *p_transceiver_state = (transceiver_info &
2109                                 ETH_TRANSCEIVER_STATE_MASK) >>
2110                                 ETH_TRANSCEIVER_STATE_OFFSET;
2111
2112         if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2113                 *p_transceiver_type = (transceiver_info &
2114                                        ETH_TRANSCEIVER_TYPE_MASK) >>
2115                                        ETH_TRANSCEIVER_TYPE_OFFSET;
2116         else
2117                 *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2118
2119         return 0;
2120 }
2121 static bool qed_is_transceiver_ready(u32 transceiver_state,
2122                                      u32 transceiver_type)
2123 {
2124         if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2125             ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2126             (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2127                 return true;
2128
2129         return false;
2130 }
2131
2132 int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2133                              struct qed_ptt *p_ptt, u32 *p_speed_mask)
2134 {
2135         u32 transceiver_type, transceiver_state;
2136         int ret;
2137
2138         ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2139                                            &transceiver_type);
2140         if (ret)
2141                 return ret;
2142
2143         if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2144                                      false)
2145                 return -EINVAL;
2146
2147         switch (transceiver_type) {
2148         case ETH_TRANSCEIVER_TYPE_1G_LX:
2149         case ETH_TRANSCEIVER_TYPE_1G_SX:
2150         case ETH_TRANSCEIVER_TYPE_1G_PCC:
2151         case ETH_TRANSCEIVER_TYPE_1G_ACC:
2152         case ETH_TRANSCEIVER_TYPE_1000BASET:
2153                 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2154                 break;
2155         case ETH_TRANSCEIVER_TYPE_10G_SR:
2156         case ETH_TRANSCEIVER_TYPE_10G_LR:
2157         case ETH_TRANSCEIVER_TYPE_10G_LRM:
2158         case ETH_TRANSCEIVER_TYPE_10G_ER:
2159         case ETH_TRANSCEIVER_TYPE_10G_PCC:
2160         case ETH_TRANSCEIVER_TYPE_10G_ACC:
2161         case ETH_TRANSCEIVER_TYPE_4x10G:
2162                 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2163                 break;
2164         case ETH_TRANSCEIVER_TYPE_40G_LR4:
2165         case ETH_TRANSCEIVER_TYPE_40G_SR4:
2166         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2167         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2168                 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2169                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2170                 break;
2171         case ETH_TRANSCEIVER_TYPE_100G_AOC:
2172         case ETH_TRANSCEIVER_TYPE_100G_SR4:
2173         case ETH_TRANSCEIVER_TYPE_100G_LR4:
2174         case ETH_TRANSCEIVER_TYPE_100G_ER4:
2175         case ETH_TRANSCEIVER_TYPE_100G_ACC:
2176                 *p_speed_mask =
2177                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2178                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2179                 break;
2180         case ETH_TRANSCEIVER_TYPE_25G_SR:
2181         case ETH_TRANSCEIVER_TYPE_25G_LR:
2182         case ETH_TRANSCEIVER_TYPE_25G_AOC:
2183         case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2184         case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2185         case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2186                 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2187                 break;
2188         case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2189         case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2190         case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2191         case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2192                 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2193                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2194                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2195                 break;
2196         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2197         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
2198                 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2199                                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2200                 break;
2201         case ETH_TRANSCEIVER_TYPE_40G_CR4:
2202         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2203                 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2204                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2205                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2206                 break;
2207         case ETH_TRANSCEIVER_TYPE_100G_CR4:
2208         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2209                 *p_speed_mask =
2210                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2211                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2212                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2213                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2214                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2215                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2216                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2217                 break;
2218         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2219         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2220         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2221                 *p_speed_mask =
2222                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2223                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2224                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2225                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2226                 break;
2227         case ETH_TRANSCEIVER_TYPE_XLPPI:
2228                 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2229                 break;
2230         case ETH_TRANSCEIVER_TYPE_10G_BASET:
2231         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
2232         case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
2233                 *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2234                                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2235                 break;
2236         default:
2237                 DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2238                         transceiver_type);
2239                 *p_speed_mask = 0xff;
2240                 break;
2241         }
2242
2243         return 0;
2244 }
2245
2246 int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2247                              struct qed_ptt *p_ptt, u32 *p_board_config)
2248 {
2249         u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2250
2251         if (IS_VF(p_hwfn->cdev))
2252                 return -EINVAL;
2253
2254         if (!qed_mcp_is_init(p_hwfn)) {
2255                 DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2256                 return -EBUSY;
2257         }
2258         if (!p_ptt) {
2259                 *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2260                 return -EINVAL;
2261         }
2262
2263         nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2264         nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2265         port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2266                         offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2267         *p_board_config = qed_rd(p_hwfn, p_ptt,
2268                                  port_cfg_addr +
2269                                  offsetof(struct nvm_cfg1_port,
2270                                           board_cfg));
2271
2272         return 0;
2273 }
2274
2275 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2276 static void
2277 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
2278                                enum qed_pci_personality *p_proto)
2279 {
2280         /* There wasn't ever a legacy MFW that published iwarp.
2281          * So at this point, this is either plain l2 or RoCE.
2282          */
2283         if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
2284                 *p_proto = QED_PCI_ETH_ROCE;
2285         else
2286                 *p_proto = QED_PCI_ETH;
2287
2288         DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2289                    "According to Legacy capabilities, L2 personality is %08x\n",
2290                    (u32) *p_proto);
2291 }
2292
2293 static int
2294 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
2295                             struct qed_ptt *p_ptt,
2296                             enum qed_pci_personality *p_proto)
2297 {
2298         u32 resp = 0, param = 0;
2299         int rc;
2300
2301         rc = qed_mcp_cmd(p_hwfn, p_ptt,
2302                          DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
2303         if (rc)
2304                 return rc;
2305         if (resp != FW_MSG_CODE_OK) {
2306                 DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2307                            "MFW lacks support for command; Returns %08x\n",
2308                            resp);
2309                 return -EINVAL;
2310         }
2311
2312         switch (param) {
2313         case FW_MB_PARAM_GET_PF_RDMA_NONE:
2314                 *p_proto = QED_PCI_ETH;
2315                 break;
2316         case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2317                 *p_proto = QED_PCI_ETH_ROCE;
2318                 break;
2319         case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2320                 *p_proto = QED_PCI_ETH_IWARP;
2321                 break;
2322         case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2323                 *p_proto = QED_PCI_ETH_RDMA;
2324                 break;
2325         default:
2326                 DP_NOTICE(p_hwfn,
2327                           "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2328                           param);
2329                 return -EINVAL;
2330         }
2331
2332         DP_VERBOSE(p_hwfn,
2333                    NETIF_MSG_IFUP,
2334                    "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2335                    (u32) *p_proto, resp, param);
2336         return 0;
2337 }
2338
2339 static int
2340 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
2341                         struct public_func *p_info,
2342                         struct qed_ptt *p_ptt,
2343                         enum qed_pci_personality *p_proto)
2344 {
2345         int rc = 0;
2346
2347         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2348         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2349                 if (!IS_ENABLED(CONFIG_QED_RDMA))
2350                         *p_proto = QED_PCI_ETH;
2351                 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
2352                         qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2353                 break;
2354         case FUNC_MF_CFG_PROTOCOL_ISCSI:
2355                 *p_proto = QED_PCI_ISCSI;
2356                 break;
2357         case FUNC_MF_CFG_PROTOCOL_FCOE:
2358                 *p_proto = QED_PCI_FCOE;
2359                 break;
2360         case FUNC_MF_CFG_PROTOCOL_ROCE:
2361                 DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
2362         /* Fallthrough */
2363         default:
2364                 rc = -EINVAL;
2365         }
2366
2367         return rc;
2368 }
2369
2370 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
2371                                  struct qed_ptt *p_ptt)
2372 {
2373         struct qed_mcp_function_info *info;
2374         struct public_func shmem_info;
2375
2376         qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2377         info = &p_hwfn->mcp_info->func_info;
2378
2379         info->pause_on_host = (shmem_info.config &
2380                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2381
2382         if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2383                                     &info->protocol)) {
2384                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
2385                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2386                 return -EINVAL;
2387         }
2388
2389         qed_read_pf_bandwidth(p_hwfn, &shmem_info);
2390
2391         if (shmem_info.mac_upper || shmem_info.mac_lower) {
2392                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2393                 info->mac[1] = (u8)(shmem_info.mac_upper);
2394                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2395                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2396                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2397                 info->mac[5] = (u8)(shmem_info.mac_lower);
2398
2399                 /* Store primary MAC for later possible WoL */
2400                 memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
2401         } else {
2402                 DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
2403         }
2404
2405         info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2406                          (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2407         info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2408                          (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2409
2410         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2411
2412         info->mtu = (u16)shmem_info.mtu_size;
2413
2414         p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
2415         p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
2416         if (qed_mcp_is_init(p_hwfn)) {
2417                 u32 resp = 0, param = 0;
2418                 int rc;
2419
2420                 rc = qed_mcp_cmd(p_hwfn, p_ptt,
2421                                  DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
2422                 if (rc)
2423                         return rc;
2424                 if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2425                         p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
2426         }
2427
2428         DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2429                    "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2430                 info->pause_on_host, info->protocol,
2431                 info->bandwidth_min, info->bandwidth_max,
2432                 info->mac[0], info->mac[1], info->mac[2],
2433                 info->mac[3], info->mac[4], info->mac[5],
2434                 info->wwn_port, info->wwn_node,
2435                 info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
2436
2437         return 0;
2438 }
2439
2440 struct qed_mcp_link_params
2441 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
2442 {
2443         if (!p_hwfn || !p_hwfn->mcp_info)
2444                 return NULL;
2445         return &p_hwfn->mcp_info->link_input;
2446 }
2447
2448 struct qed_mcp_link_state
2449 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
2450 {
2451         if (!p_hwfn || !p_hwfn->mcp_info)
2452                 return NULL;
2453         return &p_hwfn->mcp_info->link_output;
2454 }
2455
2456 struct qed_mcp_link_capabilities
2457 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
2458 {
2459         if (!p_hwfn || !p_hwfn->mcp_info)
2460                 return NULL;
2461         return &p_hwfn->mcp_info->link_capabilities;
2462 }
2463
2464 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2465 {
2466         u32 resp = 0, param = 0;
2467         int rc;
2468
2469         rc = qed_mcp_cmd(p_hwfn, p_ptt,
2470                          DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
2471
2472         /* Wait for the drain to complete before returning */
2473         msleep(1020);
2474
2475         return rc;
2476 }
2477
2478 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
2479                            struct qed_ptt *p_ptt, u32 *p_flash_size)
2480 {
2481         u32 flash_size;
2482
2483         if (IS_VF(p_hwfn->cdev))
2484                 return -EINVAL;
2485
2486         flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2487         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2488                       MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2489         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2490
2491         *p_flash_size = flash_size;
2492
2493         return 0;
2494 }
2495
2496 int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2497 {
2498         struct qed_dev *cdev = p_hwfn->cdev;
2499
2500         if (cdev->recov_in_prog) {
2501                 DP_NOTICE(p_hwfn,
2502                           "Avoid triggering a recovery since such a process is already in progress\n");
2503                 return -EAGAIN;
2504         }
2505
2506         DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2507         qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2508
2509         return 0;
2510 }
2511
2512 #define QED_RECOVERY_PROLOG_SLEEP_MS    100
2513
2514 int qed_recovery_prolog(struct qed_dev *cdev)
2515 {
2516         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2517         struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2518         int rc;
2519
2520         /* Allow ongoing PCIe transactions to complete */
2521         msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2522
2523         /* Clear the PF's internal FID_enable in the PXP */
2524         rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2525         if (rc)
2526                 DP_NOTICE(p_hwfn,
2527                           "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2528                           rc);
2529
2530         return rc;
2531 }
2532
2533 static int
2534 qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
2535                           struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2536 {
2537         u32 resp = 0, param = 0, rc_param = 0;
2538         int rc;
2539
2540         /* Only Leader can configure MSIX, and need to take CMT into account */
2541         if (!IS_LEAD_HWFN(p_hwfn))
2542                 return 0;
2543         num *= p_hwfn->cdev->num_hwfns;
2544
2545         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2546                  DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2547         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2548                  DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2549
2550         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2551                          &resp, &rc_param);
2552
2553         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2554                 DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2555                 rc = -EINVAL;
2556         } else {
2557                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2558                            "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2559                            num, vf_id);
2560         }
2561
2562         return rc;
2563 }
2564
2565 static int
2566 qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2567                           struct qed_ptt *p_ptt, u8 num)
2568 {
2569         u32 resp = 0, param = num, rc_param = 0;
2570         int rc;
2571
2572         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2573                          param, &resp, &rc_param);
2574
2575         if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2576                 DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2577                 rc = -EINVAL;
2578         } else {
2579                 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2580                            "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2581         }
2582
2583         return rc;
2584 }
2585
2586 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2587                            struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2588 {
2589         if (QED_IS_BB(p_hwfn->cdev))
2590                 return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2591         else
2592                 return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2593 }
2594
2595 int
2596 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2597                          struct qed_ptt *p_ptt,
2598                          struct qed_mcp_drv_version *p_ver)
2599 {
2600         struct qed_mcp_mb_params mb_params;
2601         struct drv_version_stc drv_version;
2602         __be32 val;
2603         u32 i;
2604         int rc;
2605
2606         memset(&drv_version, 0, sizeof(drv_version));
2607         drv_version.version = p_ver->version;
2608         for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2609                 val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2610                 *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2611         }
2612
2613         memset(&mb_params, 0, sizeof(mb_params));
2614         mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2615         mb_params.p_data_src = &drv_version;
2616         mb_params.data_src_size = sizeof(drv_version);
2617         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2618         if (rc)
2619                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2620
2621         return rc;
2622 }
2623
2624 /* A maximal 100 msec waiting time for the MCP to halt */
2625 #define QED_MCP_HALT_SLEEP_MS           10
2626 #define QED_MCP_HALT_MAX_RETRIES        10
2627
2628 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2629 {
2630         u32 resp = 0, param = 0, cpu_state, cnt = 0;
2631         int rc;
2632
2633         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2634                          &param);
2635         if (rc) {
2636                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2637                 return rc;
2638         }
2639
2640         do {
2641                 msleep(QED_MCP_HALT_SLEEP_MS);
2642                 cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2643                 if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2644                         break;
2645         } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2646
2647         if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2648                 DP_NOTICE(p_hwfn,
2649                           "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2650                           qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2651                 return -EBUSY;
2652         }
2653
2654         qed_mcp_cmd_set_blocking(p_hwfn, true);
2655
2656         return 0;
2657 }
2658
2659 #define QED_MCP_RESUME_SLEEP_MS 10
2660
2661 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2662 {
2663         u32 cpu_mode, cpu_state;
2664
2665         qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2666
2667         cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2668         cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2669         qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2670         msleep(QED_MCP_RESUME_SLEEP_MS);
2671         cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2672
2673         if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2674                 DP_NOTICE(p_hwfn,
2675                           "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2676                           cpu_mode, cpu_state);
2677                 return -EBUSY;
2678         }
2679
2680         qed_mcp_cmd_set_blocking(p_hwfn, false);
2681
2682         return 0;
2683 }
2684
2685 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2686                                      struct qed_ptt *p_ptt,
2687                                      enum qed_ov_client client)
2688 {
2689         u32 resp = 0, param = 0;
2690         u32 drv_mb_param;
2691         int rc;
2692
2693         switch (client) {
2694         case QED_OV_CLIENT_DRV:
2695                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2696                 break;
2697         case QED_OV_CLIENT_USER:
2698                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2699                 break;
2700         case QED_OV_CLIENT_VENDOR_SPEC:
2701                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2702                 break;
2703         default:
2704                 DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2705                 return -EINVAL;
2706         }
2707
2708         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2709                          drv_mb_param, &resp, &param);
2710         if (rc)
2711                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2712
2713         return rc;
2714 }
2715
2716 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2717                                    struct qed_ptt *p_ptt,
2718                                    enum qed_ov_driver_state drv_state)
2719 {
2720         u32 resp = 0, param = 0;
2721         u32 drv_mb_param;
2722         int rc;
2723
2724         switch (drv_state) {
2725         case QED_OV_DRIVER_STATE_NOT_LOADED:
2726                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2727                 break;
2728         case QED_OV_DRIVER_STATE_DISABLED:
2729                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2730                 break;
2731         case QED_OV_DRIVER_STATE_ACTIVE:
2732                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2733                 break;
2734         default:
2735                 DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2736                 return -EINVAL;
2737         }
2738
2739         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2740                          drv_mb_param, &resp, &param);
2741         if (rc)
2742                 DP_ERR(p_hwfn, "Failed to send driver state\n");
2743
2744         return rc;
2745 }
2746
2747 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2748                           struct qed_ptt *p_ptt, u16 mtu)
2749 {
2750         u32 resp = 0, param = 0;
2751         u32 drv_mb_param;
2752         int rc;
2753
2754         drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2755         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2756                          drv_mb_param, &resp, &param);
2757         if (rc)
2758                 DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2759
2760         return rc;
2761 }
2762
2763 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2764                           struct qed_ptt *p_ptt, u8 *mac)
2765 {
2766         struct qed_mcp_mb_params mb_params;
2767         u32 mfw_mac[2];
2768         int rc;
2769
2770         memset(&mb_params, 0, sizeof(mb_params));
2771         mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2772         mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2773                           DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2774         mb_params.param |= MCP_PF_ID(p_hwfn);
2775
2776         /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2777          * in 32-bit granularity.
2778          * So the MAC has to be set in native order [and not byte order],
2779          * otherwise it would be read incorrectly by MFW after swap.
2780          */
2781         mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2782         mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2783
2784         mb_params.p_data_src = (u8 *)mfw_mac;
2785         mb_params.data_src_size = 8;
2786         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2787         if (rc)
2788                 DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2789
2790         /* Store primary MAC for later possible WoL */
2791         memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2792
2793         return rc;
2794 }
2795
2796 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2797                           struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2798 {
2799         u32 resp = 0, param = 0;
2800         u32 drv_mb_param;
2801         int rc;
2802
2803         if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2804                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
2805                            "Can't change WoL configuration when WoL isn't supported\n");
2806                 return -EINVAL;
2807         }
2808
2809         switch (wol) {
2810         case QED_OV_WOL_DEFAULT:
2811                 drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2812                 break;
2813         case QED_OV_WOL_DISABLED:
2814                 drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2815                 break;
2816         case QED_OV_WOL_ENABLED:
2817                 drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2818                 break;
2819         default:
2820                 DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2821                 return -EINVAL;
2822         }
2823
2824         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2825                          drv_mb_param, &resp, &param);
2826         if (rc)
2827                 DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2828
2829         /* Store the WoL update for a future unload */
2830         p_hwfn->cdev->wol_config = (u8)wol;
2831
2832         return rc;
2833 }
2834
2835 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2836                               struct qed_ptt *p_ptt,
2837                               enum qed_ov_eswitch eswitch)
2838 {
2839         u32 resp = 0, param = 0;
2840         u32 drv_mb_param;
2841         int rc;
2842
2843         switch (eswitch) {
2844         case QED_OV_ESWITCH_NONE:
2845                 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2846                 break;
2847         case QED_OV_ESWITCH_VEB:
2848                 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2849                 break;
2850         case QED_OV_ESWITCH_VEPA:
2851                 drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2852                 break;
2853         default:
2854                 DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2855                 return -EINVAL;
2856         }
2857
2858         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2859                          drv_mb_param, &resp, &param);
2860         if (rc)
2861                 DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2862
2863         return rc;
2864 }
2865
2866 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2867                     struct qed_ptt *p_ptt, enum qed_led_mode mode)
2868 {
2869         u32 resp = 0, param = 0, drv_mb_param;
2870         int rc;
2871
2872         switch (mode) {
2873         case QED_LED_MODE_ON:
2874                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2875                 break;
2876         case QED_LED_MODE_OFF:
2877                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2878                 break;
2879         case QED_LED_MODE_RESTORE:
2880                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2881                 break;
2882         default:
2883                 DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2884                 return -EINVAL;
2885         }
2886
2887         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2888                          drv_mb_param, &resp, &param);
2889
2890         return rc;
2891 }
2892
2893 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2894                           struct qed_ptt *p_ptt, u32 mask_parities)
2895 {
2896         u32 resp = 0, param = 0;
2897         int rc;
2898
2899         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2900                          mask_parities, &resp, &param);
2901
2902         if (rc) {
2903                 DP_ERR(p_hwfn,
2904                        "MCP response failure for mask parities, aborting\n");
2905         } else if (resp != FW_MSG_CODE_OK) {
2906                 DP_ERR(p_hwfn,
2907                        "MCP did not acknowledge mask parity request. Old MFW?\n");
2908                 rc = -EINVAL;
2909         }
2910
2911         return rc;
2912 }
2913
2914 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2915 {
2916         u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2917         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2918         u32 resp = 0, resp_param = 0;
2919         struct qed_ptt *p_ptt;
2920         int rc = 0;
2921
2922         p_ptt = qed_ptt_acquire(p_hwfn);
2923         if (!p_ptt)
2924                 return -EBUSY;
2925
2926         while (bytes_left > 0) {
2927                 bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2928
2929                 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2930                                         DRV_MSG_CODE_NVM_READ_NVRAM,
2931                                         addr + offset +
2932                                         (bytes_to_copy <<
2933                                          DRV_MB_PARAM_NVM_LEN_OFFSET),
2934                                         &resp, &resp_param,
2935                                         &read_len,
2936                                         (u32 *)(p_buf + offset));
2937
2938                 if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2939                         DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2940                         break;
2941                 }
2942
2943                 /* This can be a lengthy process, and it's possible scheduler
2944                  * isn't preemptable. Sleep a bit to prevent CPU hogging.
2945                  */
2946                 if (bytes_left % 0x1000 <
2947                     (bytes_left - read_len) % 0x1000)
2948                         usleep_range(1000, 2000);
2949
2950                 offset += read_len;
2951                 bytes_left -= read_len;
2952         }
2953
2954         cdev->mcp_nvm_resp = resp;
2955         qed_ptt_release(p_hwfn, p_ptt);
2956
2957         return rc;
2958 }
2959
2960 int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
2961 {
2962         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2963         struct qed_ptt *p_ptt;
2964
2965         p_ptt = qed_ptt_acquire(p_hwfn);
2966         if (!p_ptt)
2967                 return -EBUSY;
2968
2969         memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
2970         qed_ptt_release(p_hwfn, p_ptt);
2971
2972         return 0;
2973 }
2974
2975 int qed_mcp_nvm_write(struct qed_dev *cdev,
2976                       u32 cmd, u32 addr, u8 *p_buf, u32 len)
2977 {
2978         u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
2979         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2980         struct qed_ptt *p_ptt;
2981         int rc = -EINVAL;
2982
2983         p_ptt = qed_ptt_acquire(p_hwfn);
2984         if (!p_ptt)
2985                 return -EBUSY;
2986
2987         switch (cmd) {
2988         case QED_PUT_FILE_BEGIN:
2989                 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2990                 break;
2991         case QED_PUT_FILE_DATA:
2992                 nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2993                 break;
2994         case QED_NVM_WRITE_NVRAM:
2995                 nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2996                 break;
2997         default:
2998                 DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
2999                 rc = -EINVAL;
3000                 goto out;
3001         }
3002
3003         buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
3004         while (buf_idx < len) {
3005                 if (cmd == QED_PUT_FILE_BEGIN)
3006                         nvm_offset = addr;
3007                 else
3008                         nvm_offset = ((buf_size <<
3009                                        DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
3010                                        buf_idx;
3011                 rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3012                                         &resp, &param, buf_size,
3013                                         (u32 *)&p_buf[buf_idx]);
3014                 if (rc) {
3015                         DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
3016                         resp = FW_MSG_CODE_ERROR;
3017                         break;
3018                 }
3019
3020                 if (resp != FW_MSG_CODE_OK &&
3021                     resp != FW_MSG_CODE_NVM_OK &&
3022                     resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3023                         DP_NOTICE(cdev,
3024                                   "nvm write failed, resp = 0x%08x\n", resp);
3025                         rc = -EINVAL;
3026                         break;
3027                 }
3028
3029                 /* This can be a lengthy process, and it's possible scheduler
3030                  * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
3031                  */
3032                 if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
3033                         usleep_range(1000, 2000);
3034
3035                 /* For MBI upgrade, MFW response includes the next buffer offset
3036                  * to be delivered to MFW.
3037                  */
3038                 if (param && cmd == QED_PUT_FILE_DATA) {
3039                         buf_idx = QED_MFW_GET_FIELD(param,
3040                                         FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
3041                         buf_size = QED_MFW_GET_FIELD(param,
3042                                          FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
3043                 } else {
3044                         buf_idx += buf_size;
3045                         buf_size = min_t(u32, (len - buf_idx),
3046                                          MCP_DRV_NVM_BUF_LEN);
3047                 }
3048         }
3049
3050         cdev->mcp_nvm_resp = resp;
3051 out:
3052         qed_ptt_release(p_hwfn, p_ptt);
3053
3054         return rc;
3055 }
3056
3057 int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3058                          u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
3059 {
3060         u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
3061         u32 resp, param;
3062         int rc;
3063
3064         nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
3065                        DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
3066         nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
3067                        DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
3068
3069         addr = offset;
3070         offset = 0;
3071         bytes_left = len;
3072         while (bytes_left > 0) {
3073                 bytes_to_copy = min_t(u32, bytes_left,
3074                                       MAX_I2C_TRANSACTION_SIZE);
3075                 nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3076                                DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3077                 nvm_offset |= ((addr + offset) <<
3078                                DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
3079                                DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
3080                 nvm_offset |= (bytes_to_copy <<
3081                                DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
3082                                DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
3083                 rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3084                                         DRV_MSG_CODE_TRANSCEIVER_READ,
3085                                         nvm_offset, &resp, &param, &buf_size,
3086                                         (u32 *)(p_buf + offset));
3087                 if (rc) {
3088                         DP_NOTICE(p_hwfn,
3089                                   "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3090                                   rc);
3091                         return rc;
3092                 }
3093
3094                 if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3095                         return -ENODEV;
3096                 else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3097                         return -EINVAL;
3098
3099                 offset += buf_size;
3100                 bytes_left -= buf_size;
3101         }
3102
3103         return 0;
3104 }
3105
3106 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3107 {
3108         u32 drv_mb_param = 0, rsp, param;
3109         int rc = 0;
3110
3111         drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3112                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3113
3114         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3115                          drv_mb_param, &rsp, &param);
3116
3117         if (rc)
3118                 return rc;
3119
3120         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3121             (param != DRV_MB_PARAM_BIST_RC_PASSED))
3122                 rc = -EAGAIN;
3123
3124         return rc;
3125 }
3126
3127 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3128 {
3129         u32 drv_mb_param, rsp, param;
3130         int rc = 0;
3131
3132         drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3133                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3134
3135         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3136                          drv_mb_param, &rsp, &param);
3137
3138         if (rc)
3139                 return rc;
3140
3141         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3142             (param != DRV_MB_PARAM_BIST_RC_PASSED))
3143                 rc = -EAGAIN;
3144
3145         return rc;
3146 }
3147
3148 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
3149                                     struct qed_ptt *p_ptt,
3150                                     u32 *num_images)
3151 {
3152         u32 drv_mb_param = 0, rsp;
3153         int rc = 0;
3154
3155         drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3156                         DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3157
3158         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3159                          drv_mb_param, &rsp, num_images);
3160         if (rc)
3161                 return rc;
3162
3163         if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3164                 rc = -EINVAL;
3165
3166         return rc;
3167 }
3168
3169 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
3170                                    struct qed_ptt *p_ptt,
3171                                    struct bist_nvm_image_att *p_image_att,
3172                                    u32 image_index)
3173 {
3174         u32 buf_size = 0, param, resp = 0, resp_param = 0;
3175         int rc;
3176
3177         param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3178                 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
3179         param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
3180
3181         rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3182                                 DRV_MSG_CODE_BIST_TEST, param,
3183                                 &resp, &resp_param,
3184                                 &buf_size,
3185                                 (u32 *)p_image_att);
3186         if (rc)
3187                 return rc;
3188
3189         if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3190             (p_image_att->return_code != 1))
3191                 rc = -EINVAL;
3192
3193         return rc;
3194 }
3195
3196 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
3197 {
3198         struct qed_nvm_image_info nvm_info;
3199         struct qed_ptt *p_ptt;
3200         int rc;
3201         u32 i;
3202
3203         if (p_hwfn->nvm_info.valid)
3204                 return 0;
3205
3206         p_ptt = qed_ptt_acquire(p_hwfn);
3207         if (!p_ptt) {
3208                 DP_ERR(p_hwfn, "failed to acquire ptt\n");
3209                 return -EBUSY;
3210         }
3211
3212         /* Acquire from MFW the amount of available images */
3213         nvm_info.num_images = 0;
3214         rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
3215                                              p_ptt, &nvm_info.num_images);
3216         if (rc == -EOPNOTSUPP) {
3217                 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3218                 goto out;
3219         } else if (rc || !nvm_info.num_images) {
3220                 DP_ERR(p_hwfn, "Failed getting number of images\n");
3221                 goto err0;
3222         }
3223
3224         nvm_info.image_att = kmalloc_array(nvm_info.num_images,
3225                                            sizeof(struct bist_nvm_image_att),
3226                                            GFP_KERNEL);
3227         if (!nvm_info.image_att) {
3228                 rc = -ENOMEM;
3229                 goto err0;
3230         }
3231
3232         /* Iterate over images and get their attributes */
3233         for (i = 0; i < nvm_info.num_images; i++) {
3234                 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3235                                                     &nvm_info.image_att[i], i);
3236                 if (rc) {
3237                         DP_ERR(p_hwfn,
3238                                "Failed getting image index %d attributes\n", i);
3239                         goto err1;
3240                 }
3241
3242                 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
3243                            nvm_info.image_att[i].len);
3244         }
3245 out:
3246         /* Update hwfn's nvm_info */
3247         if (nvm_info.num_images) {
3248                 p_hwfn->nvm_info.num_images = nvm_info.num_images;
3249                 kfree(p_hwfn->nvm_info.image_att);
3250                 p_hwfn->nvm_info.image_att = nvm_info.image_att;
3251                 p_hwfn->nvm_info.valid = true;
3252         }
3253
3254         qed_ptt_release(p_hwfn, p_ptt);
3255         return 0;
3256
3257 err1:
3258         kfree(nvm_info.image_att);
3259 err0:
3260         qed_ptt_release(p_hwfn, p_ptt);
3261         return rc;
3262 }
3263
3264 void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
3265 {
3266         kfree(p_hwfn->nvm_info.image_att);
3267         p_hwfn->nvm_info.image_att = NULL;
3268         p_hwfn->nvm_info.valid = false;
3269 }
3270
3271 int
3272 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3273                           enum qed_nvm_images image_id,
3274                           struct qed_nvm_image_att *p_image_att)
3275 {
3276         enum nvm_image_type type;
3277         u32 i;
3278
3279         /* Translate image_id into MFW definitions */
3280         switch (image_id) {
3281         case QED_NVM_IMAGE_ISCSI_CFG:
3282                 type = NVM_TYPE_ISCSI_CFG;
3283                 break;
3284         case QED_NVM_IMAGE_FCOE_CFG:
3285                 type = NVM_TYPE_FCOE_CFG;
3286                 break;
3287         case QED_NVM_IMAGE_MDUMP:
3288                 type = NVM_TYPE_MDUMP;
3289                 break;
3290         case QED_NVM_IMAGE_NVM_CFG1:
3291                 type = NVM_TYPE_NVM_CFG1;
3292                 break;
3293         case QED_NVM_IMAGE_DEFAULT_CFG:
3294                 type = NVM_TYPE_DEFAULT_CFG;
3295                 break;
3296         case QED_NVM_IMAGE_NVM_META:
3297                 type = NVM_TYPE_META;
3298                 break;
3299         default:
3300                 DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
3301                           image_id);
3302                 return -EINVAL;
3303         }
3304
3305         qed_mcp_nvm_info_populate(p_hwfn);
3306         for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3307                 if (type == p_hwfn->nvm_info.image_att[i].image_type)
3308                         break;
3309         if (i == p_hwfn->nvm_info.num_images) {
3310                 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3311                            "Failed to find nvram image of type %08x\n",
3312                            image_id);
3313                 return -ENOENT;
3314         }
3315
3316         p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3317         p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3318
3319         return 0;
3320 }
3321
3322 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
3323                           enum qed_nvm_images image_id,
3324                           u8 *p_buffer, u32 buffer_len)
3325 {
3326         struct qed_nvm_image_att image_att;
3327         int rc;
3328
3329         memset(p_buffer, 0, buffer_len);
3330
3331         rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3332         if (rc)
3333                 return rc;
3334
3335         /* Validate sizes - both the image's and the supplied buffer's */
3336         if (image_att.length <= 4) {
3337                 DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3338                            "Image [%d] is too small - only %d bytes\n",
3339                            image_id, image_att.length);
3340                 return -EINVAL;
3341         }
3342
3343         if (image_att.length > buffer_len) {
3344                 DP_VERBOSE(p_hwfn,
3345                            QED_MSG_STORAGE,
3346                            "Image [%d] is too big - %08x bytes where only %08x are available\n",
3347                            image_id, image_att.length, buffer_len);
3348                 return -ENOMEM;
3349         }
3350
3351         return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
3352                                 p_buffer, image_att.length);
3353 }
3354
3355 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
3356 {
3357         enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3358
3359         switch (res_id) {
3360         case QED_SB:
3361                 mfw_res_id = RESOURCE_NUM_SB_E;
3362                 break;
3363         case QED_L2_QUEUE:
3364                 mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3365                 break;
3366         case QED_VPORT:
3367                 mfw_res_id = RESOURCE_NUM_VPORT_E;
3368                 break;
3369         case QED_RSS_ENG:
3370                 mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3371                 break;
3372         case QED_PQ:
3373                 mfw_res_id = RESOURCE_NUM_PQ_E;
3374                 break;
3375         case QED_RL:
3376                 mfw_res_id = RESOURCE_NUM_RL_E;
3377                 break;
3378         case QED_MAC:
3379         case QED_VLAN:
3380                 /* Each VFC resource can accommodate both a MAC and a VLAN */
3381                 mfw_res_id = RESOURCE_VFC_FILTER_E;
3382                 break;
3383         case QED_ILT:
3384                 mfw_res_id = RESOURCE_ILT_E;
3385                 break;
3386         case QED_LL2_RAM_QUEUE:
3387                 mfw_res_id = RESOURCE_LL2_QUEUE_E;
3388                 break;
3389         case QED_LL2_CTX_QUEUE:
3390                 mfw_res_id = RESOURCE_LL2_CQS_E;
3391                 break;
3392         case QED_RDMA_CNQ_RAM:
3393         case QED_CMDQS_CQS:
3394                 /* CNQ/CMDQS are the same resource */
3395                 mfw_res_id = RESOURCE_CQS_E;
3396                 break;
3397         case QED_RDMA_STATS_QUEUE:
3398                 mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3399                 break;
3400         case QED_BDQ:
3401                 mfw_res_id = RESOURCE_BDQ_E;
3402                 break;
3403         default:
3404                 break;
3405         }
3406
3407         return mfw_res_id;
3408 }
3409
3410 #define QED_RESC_ALLOC_VERSION_MAJOR    2
3411 #define QED_RESC_ALLOC_VERSION_MINOR    0
3412 #define QED_RESC_ALLOC_VERSION                               \
3413         ((QED_RESC_ALLOC_VERSION_MAJOR <<                    \
3414           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3415          (QED_RESC_ALLOC_VERSION_MINOR <<                    \
3416           DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3417
3418 struct qed_resc_alloc_in_params {
3419         u32 cmd;
3420         enum qed_resources res_id;
3421         u32 resc_max_val;
3422 };
3423
3424 struct qed_resc_alloc_out_params {
3425         u32 mcp_resp;
3426         u32 mcp_param;
3427         u32 resc_num;
3428         u32 resc_start;
3429         u32 vf_resc_num;
3430         u32 vf_resc_start;
3431         u32 flags;
3432 };
3433
3434 static int
3435 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
3436                             struct qed_ptt *p_ptt,
3437                             struct qed_resc_alloc_in_params *p_in_params,
3438                             struct qed_resc_alloc_out_params *p_out_params)
3439 {
3440         struct qed_mcp_mb_params mb_params;
3441         struct resource_info mfw_resc_info;
3442         int rc;
3443
3444         memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
3445
3446         mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
3447         if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3448                 DP_ERR(p_hwfn,
3449                        "Failed to match resource %d [%s] with the MFW resources\n",
3450                        p_in_params->res_id,
3451                        qed_hw_get_resc_name(p_in_params->res_id));
3452                 return -EINVAL;
3453         }
3454
3455         switch (p_in_params->cmd) {
3456         case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3457                 mfw_resc_info.size = p_in_params->resc_max_val;
3458                 /* Fallthrough */
3459         case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3460                 break;
3461         default:
3462                 DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3463                        p_in_params->cmd);
3464                 return -EINVAL;
3465         }
3466
3467         memset(&mb_params, 0, sizeof(mb_params));
3468         mb_params.cmd = p_in_params->cmd;
3469         mb_params.param = QED_RESC_ALLOC_VERSION;
3470         mb_params.p_data_src = &mfw_resc_info;
3471         mb_params.data_src_size = sizeof(mfw_resc_info);
3472         mb_params.p_data_dst = mb_params.p_data_src;
3473         mb_params.data_dst_size = mb_params.data_src_size;
3474
3475         DP_VERBOSE(p_hwfn,
3476                    QED_MSG_SP,
3477                    "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3478                    p_in_params->cmd,
3479                    p_in_params->res_id,
3480                    qed_hw_get_resc_name(p_in_params->res_id),
3481                    QED_MFW_GET_FIELD(mb_params.param,
3482                                      DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3483                    QED_MFW_GET_FIELD(mb_params.param,
3484                                      DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3485                    p_in_params->resc_max_val);
3486
3487         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3488         if (rc)
3489                 return rc;
3490
3491         p_out_params->mcp_resp = mb_params.mcp_resp;
3492         p_out_params->mcp_param = mb_params.mcp_param;
3493         p_out_params->resc_num = mfw_resc_info.size;
3494         p_out_params->resc_start = mfw_resc_info.offset;
3495         p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3496         p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3497         p_out_params->flags = mfw_resc_info.flags;
3498
3499         DP_VERBOSE(p_hwfn,
3500                    QED_MSG_SP,
3501                    "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3502                    QED_MFW_GET_FIELD(p_out_params->mcp_param,
3503                                      FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3504                    QED_MFW_GET_FIELD(p_out_params->mcp_param,
3505                                      FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3506                    p_out_params->resc_num,
3507                    p_out_params->resc_start,
3508                    p_out_params->vf_resc_num,
3509                    p_out_params->vf_resc_start, p_out_params->flags);
3510
3511         return 0;
3512 }
3513
3514 int
3515 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
3516                          struct qed_ptt *p_ptt,
3517                          enum qed_resources res_id,
3518                          u32 resc_max_val, u32 *p_mcp_resp)
3519 {
3520         struct qed_resc_alloc_out_params out_params;
3521         struct qed_resc_alloc_in_params in_params;
3522         int rc;
3523
3524         memset(&in_params, 0, sizeof(in_params));
3525         in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3526         in_params.res_id = res_id;
3527         in_params.resc_max_val = resc_max_val;
3528         memset(&out_params, 0, sizeof(out_params));
3529         rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3530                                          &out_params);
3531         if (rc)
3532                 return rc;
3533
3534         *p_mcp_resp = out_params.mcp_resp;
3535
3536         return 0;
3537 }
3538
3539 int
3540 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
3541                       struct qed_ptt *p_ptt,
3542                       enum qed_resources res_id,
3543                       u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
3544 {
3545         struct qed_resc_alloc_out_params out_params;
3546         struct qed_resc_alloc_in_params in_params;
3547         int rc;
3548
3549         memset(&in_params, 0, sizeof(in_params));
3550         in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3551         in_params.res_id = res_id;
3552         memset(&out_params, 0, sizeof(out_params));
3553         rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3554                                          &out_params);
3555         if (rc)
3556                 return rc;
3557
3558         *p_mcp_resp = out_params.mcp_resp;
3559
3560         if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3561                 *p_resc_num = out_params.resc_num;
3562                 *p_resc_start = out_params.resc_start;
3563         }
3564
3565         return 0;
3566 }
3567
3568 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3569 {
3570         u32 mcp_resp, mcp_param;
3571
3572         return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3573                            &mcp_resp, &mcp_param);
3574 }
3575
3576 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
3577                                 struct qed_ptt *p_ptt,
3578                                 u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
3579 {
3580         int rc;
3581
3582         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3583                          p_mcp_resp, p_mcp_param);
3584         if (rc)
3585                 return rc;
3586
3587         if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3588                 DP_INFO(p_hwfn,
3589                         "The resource command is unsupported by the MFW\n");
3590                 return -EINVAL;
3591         }
3592
3593         if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3594                 u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3595
3596                 DP_NOTICE(p_hwfn,
3597                           "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3598                           param, opcode);
3599                 return -EINVAL;
3600         }
3601
3602         return rc;
3603 }
3604
3605 static int
3606 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3607                     struct qed_ptt *p_ptt,
3608                     struct qed_resc_lock_params *p_params)
3609 {
3610         u32 param = 0, mcp_resp, mcp_param;
3611         u8 opcode;
3612         int rc;
3613
3614         switch (p_params->timeout) {
3615         case QED_MCP_RESC_LOCK_TO_DEFAULT:
3616                 opcode = RESOURCE_OPCODE_REQ;
3617                 p_params->timeout = 0;
3618                 break;
3619         case QED_MCP_RESC_LOCK_TO_NONE:
3620                 opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3621                 p_params->timeout = 0;
3622                 break;
3623         default:
3624                 opcode = RESOURCE_OPCODE_REQ_W_AGING;
3625                 break;
3626         }
3627
3628         QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3629         QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3630         QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3631
3632         DP_VERBOSE(p_hwfn,
3633                    QED_MSG_SP,
3634                    "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3635                    param, p_params->timeout, opcode, p_params->resource);
3636
3637         /* Attempt to acquire the resource */
3638         rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3639         if (rc)
3640                 return rc;
3641
3642         /* Analyze the response */
3643         p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3644         opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3645
3646         DP_VERBOSE(p_hwfn,
3647                    QED_MSG_SP,
3648                    "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3649                    mcp_param, opcode, p_params->owner);
3650
3651         switch (opcode) {
3652         case RESOURCE_OPCODE_GNT:
3653                 p_params->b_granted = true;
3654                 break;
3655         case RESOURCE_OPCODE_BUSY:
3656                 p_params->b_granted = false;
3657                 break;
3658         default:
3659                 DP_NOTICE(p_hwfn,
3660                           "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3661                           mcp_param, opcode);
3662                 return -EINVAL;
3663         }
3664
3665         return 0;
3666 }
3667
3668 int
3669 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3670                   struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3671 {
3672         u32 retry_cnt = 0;
3673         int rc;
3674
3675         do {
3676                 /* No need for an interval before the first iteration */
3677                 if (retry_cnt) {
3678                         if (p_params->sleep_b4_retry) {
3679                                 u16 retry_interval_in_ms =
3680                                     DIV_ROUND_UP(p_params->retry_interval,
3681                                                  1000);
3682
3683                                 msleep(retry_interval_in_ms);
3684                         } else {
3685                                 udelay(p_params->retry_interval);
3686                         }
3687                 }
3688
3689                 rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3690                 if (rc)
3691                         return rc;
3692
3693                 if (p_params->b_granted)
3694                         break;
3695         } while (retry_cnt++ < p_params->retry_num);
3696
3697         return 0;
3698 }
3699
3700 int
3701 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3702                     struct qed_ptt *p_ptt,
3703                     struct qed_resc_unlock_params *p_params)
3704 {
3705         u32 param = 0, mcp_resp, mcp_param;
3706         u8 opcode;
3707         int rc;
3708
3709         opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3710                                    : RESOURCE_OPCODE_RELEASE;
3711         QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3712         QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3713
3714         DP_VERBOSE(p_hwfn, QED_MSG_SP,
3715                    "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3716                    param, opcode, p_params->resource);
3717
3718         /* Attempt to release the resource */
3719         rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3720         if (rc)
3721                 return rc;
3722
3723         /* Analyze the response */
3724         opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3725
3726         DP_VERBOSE(p_hwfn, QED_MSG_SP,
3727                    "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3728                    mcp_param, opcode);
3729
3730         switch (opcode) {
3731         case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3732                 DP_INFO(p_hwfn,
3733                         "Resource unlock request for an already released resource [%d]\n",
3734                         p_params->resource);
3735                 /* Fallthrough */
3736         case RESOURCE_OPCODE_RELEASED:
3737                 p_params->b_released = true;
3738                 break;
3739         case RESOURCE_OPCODE_WRONG_OWNER:
3740                 p_params->b_released = false;
3741                 break;
3742         default:
3743                 DP_NOTICE(p_hwfn,
3744                           "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3745                           mcp_param, opcode);
3746                 return -EINVAL;
3747         }
3748
3749         return 0;
3750 }
3751
3752 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3753                                     struct qed_resc_unlock_params *p_unlock,
3754                                     enum qed_resc_lock
3755                                     resource, bool b_is_permanent)
3756 {
3757         if (p_lock) {
3758                 memset(p_lock, 0, sizeof(*p_lock));
3759
3760                 /* Permanent resources don't require aging, and there's no
3761                  * point in trying to acquire them more than once since it's
3762                  * unexpected another entity would release them.
3763                  */
3764                 if (b_is_permanent) {
3765                         p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3766                 } else {
3767                         p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3768                         p_lock->retry_interval =
3769                             QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3770                         p_lock->sleep_b4_retry = true;
3771                 }
3772
3773                 p_lock->resource = resource;
3774         }
3775
3776         if (p_unlock) {
3777                 memset(p_unlock, 0, sizeof(*p_unlock));
3778                 p_unlock->resource = resource;
3779         }
3780 }
3781
3782 bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
3783 {
3784         return !!(p_hwfn->mcp_info->capabilities &
3785                   FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3786 }
3787
3788 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3789 {
3790         u32 mcp_resp;
3791         int rc;
3792
3793         rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3794                          0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3795         if (!rc)
3796                 DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3797                            "MFW supported features: %08x\n",
3798                            p_hwfn->mcp_info->capabilities);
3799
3800         return rc;
3801 }
3802
3803 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3804 {
3805         u32 mcp_resp, mcp_param, features;
3806
3807         features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3808                    DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
3809
3810         return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3811                            features, &mcp_resp, &mcp_param);
3812 }
3813
3814 int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3815 {
3816         struct qed_mcp_mb_params mb_params = {0};
3817         struct qed_dev *cdev = p_hwfn->cdev;
3818         u8 fir_valid, l2_valid;
3819         int rc;
3820
3821         mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
3822         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3823         if (rc)
3824                 return rc;
3825
3826         if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3827                 DP_INFO(p_hwfn,
3828                         "The get_engine_config command is unsupported by the MFW\n");
3829                 return -EOPNOTSUPP;
3830         }
3831
3832         fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3833                                       FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
3834         if (fir_valid)
3835                 cdev->fir_affin =
3836                     QED_MFW_GET_FIELD(mb_params.mcp_param,
3837                                       FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
3838
3839         l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3840                                      FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
3841         if (l2_valid)
3842                 cdev->l2_affin_hint =
3843                     QED_MFW_GET_FIELD(mb_params.mcp_param,
3844                                       FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
3845
3846         DP_INFO(p_hwfn,
3847                 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3848                 fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
3849
3850         return 0;
3851 }
3852
3853 int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3854 {
3855         struct qed_mcp_mb_params mb_params = {0};
3856         struct qed_dev *cdev = p_hwfn->cdev;
3857         int rc;
3858
3859         mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
3860         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3861         if (rc)
3862                 return rc;
3863
3864         if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3865                 DP_INFO(p_hwfn,
3866                         "The get_ppfid_bitmap command is unsupported by the MFW\n");
3867                 return -EOPNOTSUPP;
3868         }
3869
3870         cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
3871                                                FW_MB_PARAM_PPFID_BITMAP);
3872
3873         DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
3874                    cdev->ppfid_bitmap);
3875
3876         return 0;
3877 }
3878
3879 int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3880                         u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3881                         u32 *p_len)
3882 {
3883         u32 mb_param = 0, resp, param;
3884         int rc;
3885
3886         QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3887         if (flags & QED_NVM_CFG_OPTION_INIT)
3888                 QED_MFW_SET_FIELD(mb_param,
3889                                   DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3890         if (flags & QED_NVM_CFG_OPTION_FREE)
3891                 QED_MFW_SET_FIELD(mb_param,
3892                                   DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3893         if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3894                 QED_MFW_SET_FIELD(mb_param,
3895                                   DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3896                 QED_MFW_SET_FIELD(mb_param,
3897                                   DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3898                                   entity_id);
3899         }
3900
3901         rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3902                                 DRV_MSG_CODE_GET_NVM_CFG_OPTION,
3903                                 mb_param, &resp, &param, p_len, (u32 *)p_buf);
3904
3905         return rc;
3906 }
3907
3908 int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3909                         u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3910                         u32 len)
3911 {
3912         u32 mb_param = 0, resp, param;
3913
3914         QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3915         if (flags & QED_NVM_CFG_OPTION_ALL)
3916                 QED_MFW_SET_FIELD(mb_param,
3917                                   DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
3918         if (flags & QED_NVM_CFG_OPTION_INIT)
3919                 QED_MFW_SET_FIELD(mb_param,
3920                                   DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3921         if (flags & QED_NVM_CFG_OPTION_COMMIT)
3922                 QED_MFW_SET_FIELD(mb_param,
3923                                   DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
3924         if (flags & QED_NVM_CFG_OPTION_FREE)
3925                 QED_MFW_SET_FIELD(mb_param,
3926                                   DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3927         if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3928                 QED_MFW_SET_FIELD(mb_param,
3929                                   DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3930                 QED_MFW_SET_FIELD(mb_param,
3931                                   DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3932                                   entity_id);
3933         }
3934
3935         return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3936                                   DRV_MSG_CODE_SET_NVM_CFG_OPTION,
3937                                   mb_param, &resp, &param, len, (u32 *)p_buf);
3938 }
3939
3940 #define QED_MCP_DBG_DATA_MAX_SIZE               MCP_DRV_NVM_BUF_LEN
3941 #define QED_MCP_DBG_DATA_MAX_HEADER_SIZE        sizeof(u32)
3942 #define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
3943         (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
3944
3945 static int
3946 __qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
3947                           struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
3948 {
3949         struct qed_mcp_mb_params mb_params;
3950         int rc;
3951
3952         if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
3953                 DP_ERR(p_hwfn,
3954                        "Debug data size is %d while it should not exceed %d\n",
3955                        size, QED_MCP_DBG_DATA_MAX_SIZE);
3956                 return -EINVAL;
3957         }
3958
3959         memset(&mb_params, 0, sizeof(mb_params));
3960         mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
3961         SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
3962         mb_params.p_data_src = p_buf;
3963         mb_params.data_src_size = size;
3964         rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3965         if (rc)
3966                 return rc;
3967
3968         if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3969                 DP_INFO(p_hwfn,
3970                         "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
3971                 return -EOPNOTSUPP;
3972         } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
3973                 DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
3974                 return -EBUSY;
3975         } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
3976                 DP_NOTICE(p_hwfn,
3977                           "Failed to send debug data to the MFW [resp 0x%08x]\n",
3978                           mb_params.mcp_resp);
3979                 return -EINVAL;
3980         }
3981
3982         return 0;
3983 }
3984
3985 enum qed_mcp_dbg_data_type {
3986         QED_MCP_DBG_DATA_TYPE_RAW,
3987 };
3988
3989 /* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
3990 #define QED_MCP_DBG_DATA_HDR_SN_OFFSET  0
3991 #define QED_MCP_DBG_DATA_HDR_SN_MASK            0x00000fff
3992 #define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET        12
3993 #define QED_MCP_DBG_DATA_HDR_TYPE_MASK  0x000ff000
3994 #define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET       20
3995 #define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
3996 #define QED_MCP_DBG_DATA_HDR_PF_OFFSET  28
3997 #define QED_MCP_DBG_DATA_HDR_PF_MASK            0xf0000000
3998
3999 #define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST        0x1
4000 #define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
4001
4002 static int
4003 qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4004                         struct qed_ptt *p_ptt,
4005                         enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
4006 {
4007         u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
4008         u32 tmp_size = size, *p_header, *p_payload;
4009         u8 flags = 0;
4010         u16 seq;
4011         int rc;
4012
4013         p_header = (u32 *)raw_data;
4014         p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
4015
4016         seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
4017
4018         /* First chunk is marked as 'first' */
4019         flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4020
4021         *p_header = 0;
4022         SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
4023         SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
4024         SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4025         SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
4026
4027         while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
4028                 memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
4029                 rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4030                                                QED_MCP_DBG_DATA_MAX_SIZE);
4031                 if (rc)
4032                         return rc;
4033
4034                 /* Clear the 'first' marking after sending the first chunk */
4035                 if (p_tmp_buf == p_buf) {
4036                         flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4037                         SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
4038                                       flags);
4039                 }
4040
4041                 p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4042                 tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4043         }
4044
4045         /* Last chunk is marked as 'last' */
4046         flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
4047         SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4048         memcpy(p_payload, p_tmp_buf, tmp_size);
4049
4050         /* Casting the left size to u8 is ok since at this point it is <= 32 */
4051         return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4052                                          (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
4053                                          tmp_size));
4054 }
4055
4056 int
4057 qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
4058                             struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
4059 {
4060         return qed_mcp_send_debug_data(p_hwfn, p_ptt,
4061                                        QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
4062 }