qed: Add CONFIG_QED_SRIOV
[linux-2.6-microblaze.git] / drivers / net / ethernet / qlogic / qed / qed_dev.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
23 #include "qed.h"
24 #include "qed_cxt.h"
25 #include "qed_dev_api.h"
26 #include "qed_hsi.h"
27 #include "qed_hw.h"
28 #include "qed_init_ops.h"
29 #include "qed_int.h"
30 #include "qed_mcp.h"
31 #include "qed_reg_addr.h"
32 #include "qed_sp.h"
33 #include "qed_sriov.h"
34
35 /* API common to all protocols */
36 enum BAR_ID {
37         BAR_ID_0,       /* used for GRC */
38         BAR_ID_1        /* Used for doorbells */
39 };
40
41 static u32 qed_hw_bar_size(struct qed_hwfn      *p_hwfn,
42                            enum BAR_ID          bar_id)
43 {
44         u32     bar_reg = (bar_id == BAR_ID_0 ?
45                            PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
46         u32     val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
47
48         if (val)
49                 return 1 << (val + 15);
50
51         /* Old MFW initialized above registered only conditionally */
52         if (p_hwfn->cdev->num_hwfns > 1) {
53                 DP_INFO(p_hwfn,
54                         "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
55                         return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
56         } else {
57                 DP_INFO(p_hwfn,
58                         "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
59                         return 512 * 1024;
60         }
61 }
62
63 void qed_init_dp(struct qed_dev *cdev,
64                  u32 dp_module, u8 dp_level)
65 {
66         u32 i;
67
68         cdev->dp_level = dp_level;
69         cdev->dp_module = dp_module;
70         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
71                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
72
73                 p_hwfn->dp_level = dp_level;
74                 p_hwfn->dp_module = dp_module;
75         }
76 }
77
78 void qed_init_struct(struct qed_dev *cdev)
79 {
80         u8 i;
81
82         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
83                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
84
85                 p_hwfn->cdev = cdev;
86                 p_hwfn->my_id = i;
87                 p_hwfn->b_active = false;
88
89                 mutex_init(&p_hwfn->dmae_info.mutex);
90         }
91
92         /* hwfn 0 is always active */
93         cdev->hwfns[0].b_active = true;
94
95         /* set the default cache alignment to 128 */
96         cdev->cache_shift = 7;
97 }
98
99 static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
100 {
101         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
102
103         kfree(qm_info->qm_pq_params);
104         qm_info->qm_pq_params = NULL;
105         kfree(qm_info->qm_vport_params);
106         qm_info->qm_vport_params = NULL;
107         kfree(qm_info->qm_port_params);
108         qm_info->qm_port_params = NULL;
109         kfree(qm_info->wfq_data);
110         qm_info->wfq_data = NULL;
111 }
112
113 void qed_resc_free(struct qed_dev *cdev)
114 {
115         int i;
116
117         kfree(cdev->fw_data);
118         cdev->fw_data = NULL;
119
120         kfree(cdev->reset_stats);
121
122         for_each_hwfn(cdev, i) {
123                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
124
125                 kfree(p_hwfn->p_tx_cids);
126                 p_hwfn->p_tx_cids = NULL;
127                 kfree(p_hwfn->p_rx_cids);
128                 p_hwfn->p_rx_cids = NULL;
129         }
130
131         for_each_hwfn(cdev, i) {
132                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
133
134                 qed_cxt_mngr_free(p_hwfn);
135                 qed_qm_info_free(p_hwfn);
136                 qed_spq_free(p_hwfn);
137                 qed_eq_free(p_hwfn, p_hwfn->p_eq);
138                 qed_consq_free(p_hwfn, p_hwfn->p_consq);
139                 qed_int_free(p_hwfn);
140                 qed_iov_free(p_hwfn);
141                 qed_dmae_info_free(p_hwfn);
142         }
143 }
144
145 static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
146 {
147         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
148         struct init_qm_port_params *p_qm_port;
149         u8 num_vports, i, vport_id, num_ports;
150         u16 num_pqs, multi_cos_tcs = 1;
151
152         memset(qm_info, 0, sizeof(*qm_info));
153
154         num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
155         num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
156
157         /* Sanity checking that setup requires legal number of resources */
158         if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
159                 DP_ERR(p_hwfn,
160                        "Need too many Physical queues - 0x%04x when only %04x are available\n",
161                        num_pqs, RESC_NUM(p_hwfn, QED_PQ));
162                 return -EINVAL;
163         }
164
165         /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
166          */
167         qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
168                                         num_pqs, GFP_KERNEL);
169         if (!qm_info->qm_pq_params)
170                 goto alloc_err;
171
172         qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
173                                            num_vports, GFP_KERNEL);
174         if (!qm_info->qm_vport_params)
175                 goto alloc_err;
176
177         qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
178                                           MAX_NUM_PORTS, GFP_KERNEL);
179         if (!qm_info->qm_port_params)
180                 goto alloc_err;
181
182         qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
183                                     GFP_KERNEL);
184         if (!qm_info->wfq_data)
185                 goto alloc_err;
186
187         vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
188
189         /* First init per-TC PQs */
190         for (i = 0; i < multi_cos_tcs; i++) {
191                 struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
192
193                 params->vport_id = vport_id;
194                 params->tc_id = p_hwfn->hw_info.non_offload_tc;
195                 params->wrr_group = 1;
196         }
197
198         /* Then init pure-LB PQ */
199         qm_info->pure_lb_pq = i;
200         qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
201         qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
202         qm_info->qm_pq_params[i].wrr_group = 1;
203         i++;
204
205         qm_info->offload_pq = 0;
206         qm_info->num_pqs = num_pqs;
207         qm_info->num_vports = num_vports;
208
209         /* Initialize qm port parameters */
210         num_ports = p_hwfn->cdev->num_ports_in_engines;
211         for (i = 0; i < num_ports; i++) {
212                 p_qm_port = &qm_info->qm_port_params[i];
213                 p_qm_port->active = 1;
214                 p_qm_port->num_active_phys_tcs = 4;
215                 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
216                 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
217         }
218
219         qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
220
221         qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
222
223         qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
224
225         for (i = 0; i < qm_info->num_vports; i++)
226                 qm_info->qm_vport_params[i].vport_wfq = 1;
227
228         qm_info->pf_wfq = 0;
229         qm_info->pf_rl = 0;
230         qm_info->vport_rl_en = 1;
231         qm_info->vport_wfq_en = 1;
232
233         return 0;
234
235 alloc_err:
236         DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
237         qed_qm_info_free(p_hwfn);
238         return -ENOMEM;
239 }
240
241 int qed_resc_alloc(struct qed_dev *cdev)
242 {
243         struct qed_consq *p_consq;
244         struct qed_eq *p_eq;
245         int i, rc = 0;
246
247         cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
248         if (!cdev->fw_data)
249                 return -ENOMEM;
250
251         /* Allocate Memory for the Queue->CID mapping */
252         for_each_hwfn(cdev, i) {
253                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
254                 int tx_size = sizeof(struct qed_hw_cid_data) *
255                                      RESC_NUM(p_hwfn, QED_L2_QUEUE);
256                 int rx_size = sizeof(struct qed_hw_cid_data) *
257                                      RESC_NUM(p_hwfn, QED_L2_QUEUE);
258
259                 p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
260                 if (!p_hwfn->p_tx_cids) {
261                         DP_NOTICE(p_hwfn,
262                                   "Failed to allocate memory for Tx Cids\n");
263                         rc = -ENOMEM;
264                         goto alloc_err;
265                 }
266
267                 p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
268                 if (!p_hwfn->p_rx_cids) {
269                         DP_NOTICE(p_hwfn,
270                                   "Failed to allocate memory for Rx Cids\n");
271                         rc = -ENOMEM;
272                         goto alloc_err;
273                 }
274         }
275
276         for_each_hwfn(cdev, i) {
277                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
278
279                 /* First allocate the context manager structure */
280                 rc = qed_cxt_mngr_alloc(p_hwfn);
281                 if (rc)
282                         goto alloc_err;
283
284                 /* Set the HW cid/tid numbers (in the contest manager)
285                  * Must be done prior to any further computations.
286                  */
287                 rc = qed_cxt_set_pf_params(p_hwfn);
288                 if (rc)
289                         goto alloc_err;
290
291                 /* Prepare and process QM requirements */
292                 rc = qed_init_qm_info(p_hwfn);
293                 if (rc)
294                         goto alloc_err;
295
296                 /* Compute the ILT client partition */
297                 rc = qed_cxt_cfg_ilt_compute(p_hwfn);
298                 if (rc)
299                         goto alloc_err;
300
301                 /* CID map / ILT shadow table / T2
302                  * The talbes sizes are determined by the computations above
303                  */
304                 rc = qed_cxt_tables_alloc(p_hwfn);
305                 if (rc)
306                         goto alloc_err;
307
308                 /* SPQ, must follow ILT because initializes SPQ context */
309                 rc = qed_spq_alloc(p_hwfn);
310                 if (rc)
311                         goto alloc_err;
312
313                 /* SP status block allocation */
314                 p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
315                                                          RESERVED_PTT_DPC);
316
317                 rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
318                 if (rc)
319                         goto alloc_err;
320
321                 rc = qed_iov_alloc(p_hwfn);
322                 if (rc)
323                         goto alloc_err;
324
325                 /* EQ */
326                 p_eq = qed_eq_alloc(p_hwfn, 256);
327                 if (!p_eq) {
328                         rc = -ENOMEM;
329                         goto alloc_err;
330                 }
331                 p_hwfn->p_eq = p_eq;
332
333                 p_consq = qed_consq_alloc(p_hwfn);
334                 if (!p_consq) {
335                         rc = -ENOMEM;
336                         goto alloc_err;
337                 }
338                 p_hwfn->p_consq = p_consq;
339
340                 /* DMA info initialization */
341                 rc = qed_dmae_info_alloc(p_hwfn);
342                 if (rc) {
343                         DP_NOTICE(p_hwfn,
344                                   "Failed to allocate memory for dmae_info structure\n");
345                         goto alloc_err;
346                 }
347         }
348
349         cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
350         if (!cdev->reset_stats) {
351                 DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
352                 rc = -ENOMEM;
353                 goto alloc_err;
354         }
355
356         return 0;
357
358 alloc_err:
359         qed_resc_free(cdev);
360         return rc;
361 }
362
363 void qed_resc_setup(struct qed_dev *cdev)
364 {
365         int i;
366
367         for_each_hwfn(cdev, i) {
368                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
369
370                 qed_cxt_mngr_setup(p_hwfn);
371                 qed_spq_setup(p_hwfn);
372                 qed_eq_setup(p_hwfn, p_hwfn->p_eq);
373                 qed_consq_setup(p_hwfn, p_hwfn->p_consq);
374
375                 /* Read shadow of current MFW mailbox */
376                 qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
377                 memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
378                        p_hwfn->mcp_info->mfw_mb_cur,
379                        p_hwfn->mcp_info->mfw_mb_length);
380
381                 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
382
383                 qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
384         }
385 }
386
387 #define FINAL_CLEANUP_POLL_CNT          (100)
388 #define FINAL_CLEANUP_POLL_TIME         (10)
389 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
390                       struct qed_ptt *p_ptt,
391                       u16 id)
392 {
393         u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
394         int rc = -EBUSY;
395
396         addr = GTT_BAR0_MAP_REG_USDM_RAM +
397                 USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
398
399         command |= X_FINAL_CLEANUP_AGG_INT <<
400                 SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
401         command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
402         command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
403         command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
404
405         /* Make sure notification is not set before initiating final cleanup */
406         if (REG_RD(p_hwfn, addr)) {
407                 DP_NOTICE(
408                         p_hwfn,
409                         "Unexpected; Found final cleanup notification before initiating final cleanup\n");
410                 REG_WR(p_hwfn, addr, 0);
411         }
412
413         DP_VERBOSE(p_hwfn, QED_MSG_IOV,
414                    "Sending final cleanup for PFVF[%d] [Command %08x\n]",
415                    id, command);
416
417         qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
418
419         /* Poll until completion */
420         while (!REG_RD(p_hwfn, addr) && count--)
421                 msleep(FINAL_CLEANUP_POLL_TIME);
422
423         if (REG_RD(p_hwfn, addr))
424                 rc = 0;
425         else
426                 DP_NOTICE(p_hwfn,
427                           "Failed to receive FW final cleanup notification\n");
428
429         /* Cleanup afterwards */
430         REG_WR(p_hwfn, addr, 0);
431
432         return rc;
433 }
434
435 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
436 {
437         int hw_mode = 0;
438
439         hw_mode = (1 << MODE_BB_B0);
440
441         switch (p_hwfn->cdev->num_ports_in_engines) {
442         case 1:
443                 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
444                 break;
445         case 2:
446                 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
447                 break;
448         case 4:
449                 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
450                 break;
451         default:
452                 DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
453                           p_hwfn->cdev->num_ports_in_engines);
454                 return;
455         }
456
457         switch (p_hwfn->cdev->mf_mode) {
458         case QED_MF_DEFAULT:
459         case QED_MF_NPAR:
460                 hw_mode |= 1 << MODE_MF_SI;
461                 break;
462         case QED_MF_OVLAN:
463                 hw_mode |= 1 << MODE_MF_SD;
464                 break;
465         default:
466                 DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
467                 hw_mode |= 1 << MODE_MF_SI;
468         }
469
470         hw_mode |= 1 << MODE_ASIC;
471
472         p_hwfn->hw_info.hw_mode = hw_mode;
473 }
474
475 /* Init run time data for all PFs on an engine. */
476 static void qed_init_cau_rt_data(struct qed_dev *cdev)
477 {
478         u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
479         int i, sb_id;
480
481         for_each_hwfn(cdev, i) {
482                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
483                 struct qed_igu_info *p_igu_info;
484                 struct qed_igu_block *p_block;
485                 struct cau_sb_entry sb_entry;
486
487                 p_igu_info = p_hwfn->hw_info.p_igu_info;
488
489                 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
490                      sb_id++) {
491                         p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
492                         if (!p_block->is_pf)
493                                 continue;
494
495                         qed_init_cau_sb_entry(p_hwfn, &sb_entry,
496                                               p_block->function_id,
497                                               0, 0);
498                         STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
499                                          sb_entry);
500                 }
501         }
502 }
503
504 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
505                               struct qed_ptt *p_ptt,
506                               int hw_mode)
507 {
508         struct qed_qm_info *qm_info = &p_hwfn->qm_info;
509         struct qed_qm_common_rt_init_params params;
510         struct qed_dev *cdev = p_hwfn->cdev;
511         int rc = 0;
512
513         qed_init_cau_rt_data(cdev);
514
515         /* Program GTT windows */
516         qed_gtt_init(p_hwfn);
517
518         if (p_hwfn->mcp_info) {
519                 if (p_hwfn->mcp_info->func_info.bandwidth_max)
520                         qm_info->pf_rl_en = 1;
521                 if (p_hwfn->mcp_info->func_info.bandwidth_min)
522                         qm_info->pf_wfq_en = 1;
523         }
524
525         memset(&params, 0, sizeof(params));
526         params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
527         params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
528         params.pf_rl_en = qm_info->pf_rl_en;
529         params.pf_wfq_en = qm_info->pf_wfq_en;
530         params.vport_rl_en = qm_info->vport_rl_en;
531         params.vport_wfq_en = qm_info->vport_wfq_en;
532         params.port_params = qm_info->qm_port_params;
533
534         qed_qm_common_rt_init(p_hwfn, &params);
535
536         qed_cxt_hw_init_common(p_hwfn);
537
538         /* Close gate from NIG to BRB/Storm; By default they are open, but
539          * we close them to prevent NIG from passing data to reset blocks.
540          * Should have been done in the ENGINE phase, but init-tool lacks
541          * proper port-pretend capabilities.
542          */
543         qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
544         qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
545         qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
546         qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
547         qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
548         qed_port_unpretend(p_hwfn, p_ptt);
549
550         rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
551         if (rc != 0)
552                 return rc;
553
554         qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
555         qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
556
557         /* Disable relaxed ordering in the PCI config space */
558         qed_wr(p_hwfn, p_ptt, 0x20b4,
559                qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
560
561         return rc;
562 }
563
564 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
565                             struct qed_ptt *p_ptt,
566                             int hw_mode)
567 {
568         int rc = 0;
569
570         rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
571                           hw_mode);
572         return rc;
573 }
574
575 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
576                           struct qed_ptt *p_ptt,
577                           struct qed_tunn_start_params *p_tunn,
578                           int hw_mode,
579                           bool b_hw_start,
580                           enum qed_int_mode int_mode,
581                           bool allow_npar_tx_switch)
582 {
583         u8 rel_pf_id = p_hwfn->rel_pf_id;
584         int rc = 0;
585
586         if (p_hwfn->mcp_info) {
587                 struct qed_mcp_function_info *p_info;
588
589                 p_info = &p_hwfn->mcp_info->func_info;
590                 if (p_info->bandwidth_min)
591                         p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
592
593                 /* Update rate limit once we'll actually have a link */
594                 p_hwfn->qm_info.pf_rl = 100000;
595         }
596
597         qed_cxt_hw_init_pf(p_hwfn);
598
599         qed_int_igu_init_rt(p_hwfn);
600
601         /* Set VLAN in NIG if needed */
602         if (hw_mode & (1 << MODE_MF_SD)) {
603                 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
604                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
605                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
606                              p_hwfn->hw_info.ovlan);
607         }
608
609         /* Enable classification by MAC if needed */
610         if (hw_mode & (1 << MODE_MF_SI)) {
611                 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
612                            "Configuring TAGMAC_CLS_TYPE\n");
613                 STORE_RT_REG(p_hwfn,
614                              NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
615         }
616
617         /* Protocl Configuration  */
618         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
619         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
620         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
621
622         /* Cleanup chip from previous driver if such remains exist */
623         rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
624         if (rc != 0)
625                 return rc;
626
627         /* PF Init sequence */
628         rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
629         if (rc)
630                 return rc;
631
632         /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
633         rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
634         if (rc)
635                 return rc;
636
637         /* Pure runtime initializations - directly to the HW  */
638         qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
639
640         if (b_hw_start) {
641                 /* enable interrupts */
642                 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
643
644                 /* send function start command */
645                 rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
646                 if (rc)
647                         DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
648         }
649         return rc;
650 }
651
652 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
653                                struct qed_ptt *p_ptt,
654                                u8 enable)
655 {
656         u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
657
658         /* Change PF in PXP */
659         qed_wr(p_hwfn, p_ptt,
660                PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
661
662         /* wait until value is set - try for 1 second every 50us */
663         for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
664                 val = qed_rd(p_hwfn, p_ptt,
665                              PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
666                 if (val == set_val)
667                         break;
668
669                 usleep_range(50, 60);
670         }
671
672         if (val != set_val) {
673                 DP_NOTICE(p_hwfn,
674                           "PFID_ENABLE_MASTER wasn't changed after a second\n");
675                 return -EAGAIN;
676         }
677
678         return 0;
679 }
680
681 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
682                                 struct qed_ptt *p_main_ptt)
683 {
684         /* Read shadow of current MFW mailbox */
685         qed_mcp_read_mb(p_hwfn, p_main_ptt);
686         memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
687                p_hwfn->mcp_info->mfw_mb_cur,
688                p_hwfn->mcp_info->mfw_mb_length);
689 }
690
691 int qed_hw_init(struct qed_dev *cdev,
692                 struct qed_tunn_start_params *p_tunn,
693                 bool b_hw_start,
694                 enum qed_int_mode int_mode,
695                 bool allow_npar_tx_switch,
696                 const u8 *bin_fw_data)
697 {
698         u32 load_code, param;
699         int rc, mfw_rc, i;
700
701         rc = qed_init_fw_data(cdev, bin_fw_data);
702         if (rc != 0)
703                 return rc;
704
705         for_each_hwfn(cdev, i) {
706                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
707
708                 /* Enable DMAE in PXP */
709                 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
710
711                 qed_calc_hw_mode(p_hwfn);
712
713                 rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
714                                       &load_code);
715                 if (rc) {
716                         DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
717                         return rc;
718                 }
719
720                 qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
721
722                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
723                            "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
724                            rc, load_code);
725
726                 p_hwfn->first_on_engine = (load_code ==
727                                            FW_MSG_CODE_DRV_LOAD_ENGINE);
728
729                 switch (load_code) {
730                 case FW_MSG_CODE_DRV_LOAD_ENGINE:
731                         rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
732                                                 p_hwfn->hw_info.hw_mode);
733                         if (rc)
734                                 break;
735                 /* Fall into */
736                 case FW_MSG_CODE_DRV_LOAD_PORT:
737                         rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
738                                               p_hwfn->hw_info.hw_mode);
739                         if (rc)
740                                 break;
741
742                 /* Fall into */
743                 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
744                         rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
745                                             p_tunn, p_hwfn->hw_info.hw_mode,
746                                             b_hw_start, int_mode,
747                                             allow_npar_tx_switch);
748                         break;
749                 default:
750                         rc = -EINVAL;
751                         break;
752                 }
753
754                 if (rc)
755                         DP_NOTICE(p_hwfn,
756                                   "init phase failed for loadcode 0x%x (rc %d)\n",
757                                    load_code, rc);
758
759                 /* ACK mfw regardless of success or failure of initialization */
760                 mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
761                                      DRV_MSG_CODE_LOAD_DONE,
762                                      0, &load_code, &param);
763                 if (rc)
764                         return rc;
765                 if (mfw_rc) {
766                         DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
767                         return mfw_rc;
768                 }
769
770                 p_hwfn->hw_init_done = true;
771         }
772
773         return 0;
774 }
775
776 #define QED_HW_STOP_RETRY_LIMIT (10)
777 static inline void qed_hw_timers_stop(struct qed_dev *cdev,
778                                       struct qed_hwfn *p_hwfn,
779                                       struct qed_ptt *p_ptt)
780 {
781         int i;
782
783         /* close timers */
784         qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
785         qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
786
787         for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
788                 if ((!qed_rd(p_hwfn, p_ptt,
789                              TM_REG_PF_SCAN_ACTIVE_CONN)) &&
790                     (!qed_rd(p_hwfn, p_ptt,
791                              TM_REG_PF_SCAN_ACTIVE_TASK)))
792                         break;
793
794                 /* Dependent on number of connection/tasks, possibly
795                  * 1ms sleep is required between polls
796                  */
797                 usleep_range(1000, 2000);
798         }
799
800         if (i < QED_HW_STOP_RETRY_LIMIT)
801                 return;
802
803         DP_NOTICE(p_hwfn,
804                   "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
805                   (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
806                   (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
807 }
808
809 void qed_hw_timers_stop_all(struct qed_dev *cdev)
810 {
811         int j;
812
813         for_each_hwfn(cdev, j) {
814                 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
815                 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
816
817                 qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
818         }
819 }
820
821 int qed_hw_stop(struct qed_dev *cdev)
822 {
823         int rc = 0, t_rc;
824         int j;
825
826         for_each_hwfn(cdev, j) {
827                 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
828                 struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
829
830                 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
831
832                 /* mark the hw as uninitialized... */
833                 p_hwfn->hw_init_done = false;
834
835                 rc = qed_sp_pf_stop(p_hwfn);
836                 if (rc)
837                         DP_NOTICE(p_hwfn,
838                                   "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
839
840                 qed_wr(p_hwfn, p_ptt,
841                        NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
842
843                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
844                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
845                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
846                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
847                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
848
849                 qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
850
851                 /* Disable Attention Generation */
852                 qed_int_igu_disable_int(p_hwfn, p_ptt);
853
854                 qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
855                 qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
856
857                 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
858
859                 /* Need to wait 1ms to guarantee SBs are cleared */
860                 usleep_range(1000, 2000);
861         }
862
863         /* Disable DMAE in PXP - in CMT, this should only be done for
864          * first hw-function, and only after all transactions have
865          * stopped for all active hw-functions.
866          */
867         t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
868                                    cdev->hwfns[0].p_main_ptt,
869                                    false);
870         if (t_rc != 0)
871                 rc = t_rc;
872
873         return rc;
874 }
875
876 void qed_hw_stop_fastpath(struct qed_dev *cdev)
877 {
878         int j;
879
880         for_each_hwfn(cdev, j) {
881                 struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
882                 struct qed_ptt *p_ptt   = p_hwfn->p_main_ptt;
883
884                 DP_VERBOSE(p_hwfn,
885                            NETIF_MSG_IFDOWN,
886                            "Shutting down the fastpath\n");
887
888                 qed_wr(p_hwfn, p_ptt,
889                        NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
890
891                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
892                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
893                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
894                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
895                 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
896
897                 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
898
899                 /* Need to wait 1ms to guarantee SBs are cleared */
900                 usleep_range(1000, 2000);
901         }
902 }
903
904 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
905 {
906         /* Re-open incoming traffic */
907         qed_wr(p_hwfn, p_hwfn->p_main_ptt,
908                NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
909 }
910
911 static int qed_reg_assert(struct qed_hwfn *hwfn,
912                           struct qed_ptt *ptt, u32 reg,
913                           bool expected)
914 {
915         u32 assert_val = qed_rd(hwfn, ptt, reg);
916
917         if (assert_val != expected) {
918                 DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
919                           reg, expected);
920                 return -EINVAL;
921         }
922
923         return 0;
924 }
925
926 int qed_hw_reset(struct qed_dev *cdev)
927 {
928         int rc = 0;
929         u32 unload_resp, unload_param;
930         int i;
931
932         for_each_hwfn(cdev, i) {
933                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
934
935                 DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
936
937                 /* Check for incorrect states */
938                 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
939                                QM_REG_USG_CNT_PF_TX, 0);
940                 qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
941                                QM_REG_USG_CNT_PF_OTHER, 0);
942
943                 /* Disable PF in HW blocks */
944                 qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
945                 qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
946                 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
947                        TCFC_REG_STRONG_ENABLE_PF, 0);
948                 qed_wr(p_hwfn, p_hwfn->p_main_ptt,
949                        CCFC_REG_STRONG_ENABLE_PF, 0);
950
951                 /* Send unload command to MCP */
952                 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
953                                  DRV_MSG_CODE_UNLOAD_REQ,
954                                  DRV_MB_PARAM_UNLOAD_WOL_MCP,
955                                  &unload_resp, &unload_param);
956                 if (rc) {
957                         DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
958                         unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
959                 }
960
961                 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
962                                  DRV_MSG_CODE_UNLOAD_DONE,
963                                  0, &unload_resp, &unload_param);
964                 if (rc) {
965                         DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
966                         return rc;
967                 }
968         }
969
970         return rc;
971 }
972
973 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
974 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
975 {
976         qed_ptt_pool_free(p_hwfn);
977         kfree(p_hwfn->hw_info.p_igu_info);
978 }
979
980 /* Setup bar access */
981 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
982 {
983         /* clear indirect access */
984         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
985         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
986         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
987         qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
988
989         /* Clean Previous errors if such exist */
990         qed_wr(p_hwfn, p_hwfn->p_main_ptt,
991                PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
992                1 << p_hwfn->abs_pf_id);
993
994         /* enable internal target-read */
995         qed_wr(p_hwfn, p_hwfn->p_main_ptt,
996                PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
997 }
998
999 static void get_function_id(struct qed_hwfn *p_hwfn)
1000 {
1001         /* ME Register */
1002         p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
1003
1004         p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
1005
1006         p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
1007         p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1008                                       PXP_CONCRETE_FID_PFID);
1009         p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1010                                     PXP_CONCRETE_FID_PORT);
1011 }
1012
1013 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
1014 {
1015         u32 *feat_num = p_hwfn->hw_info.feat_num;
1016         int num_features = 1;
1017
1018         feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1019                                                 num_features,
1020                                         RESC_NUM(p_hwfn, QED_L2_QUEUE));
1021         DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1022                    "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1023                    feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
1024                    num_features);
1025 }
1026
1027 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1028 {
1029         u32 *resc_start = p_hwfn->hw_info.resc_start;
1030         u32 *resc_num = p_hwfn->hw_info.resc_num;
1031         struct qed_sb_cnt_info sb_cnt_info;
1032         int num_funcs, i;
1033
1034         num_funcs = MAX_NUM_PFS_BB;
1035
1036         memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
1037         qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
1038
1039         resc_num[QED_SB] = min_t(u32,
1040                                  (MAX_SB_PER_PATH_BB / num_funcs),
1041                                  sb_cnt_info.sb_cnt);
1042         resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1043         resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
1044         resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1045         resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1046         resc_num[QED_RL] = 8;
1047         resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1048         resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1049                              num_funcs;
1050         resc_num[QED_ILT] = 950;
1051
1052         for (i = 0; i < QED_MAX_RESC; i++)
1053                 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1054
1055         qed_hw_set_feat(p_hwfn);
1056
1057         DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1058                    "The numbers for each resource are:\n"
1059                    "SB = %d start = %d\n"
1060                    "L2_QUEUE = %d start = %d\n"
1061                    "VPORT = %d start = %d\n"
1062                    "PQ = %d start = %d\n"
1063                    "RL = %d start = %d\n"
1064                    "MAC = %d start = %d\n"
1065                    "VLAN = %d start = %d\n"
1066                    "ILT = %d start = %d\n",
1067                    p_hwfn->hw_info.resc_num[QED_SB],
1068                    p_hwfn->hw_info.resc_start[QED_SB],
1069                    p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
1070                    p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1071                    p_hwfn->hw_info.resc_num[QED_VPORT],
1072                    p_hwfn->hw_info.resc_start[QED_VPORT],
1073                    p_hwfn->hw_info.resc_num[QED_PQ],
1074                    p_hwfn->hw_info.resc_start[QED_PQ],
1075                    p_hwfn->hw_info.resc_num[QED_RL],
1076                    p_hwfn->hw_info.resc_start[QED_RL],
1077                    p_hwfn->hw_info.resc_num[QED_MAC],
1078                    p_hwfn->hw_info.resc_start[QED_MAC],
1079                    p_hwfn->hw_info.resc_num[QED_VLAN],
1080                    p_hwfn->hw_info.resc_start[QED_VLAN],
1081                    p_hwfn->hw_info.resc_num[QED_ILT],
1082                    p_hwfn->hw_info.resc_start[QED_ILT]);
1083 }
1084
1085 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1086                                struct qed_ptt *p_ptt)
1087 {
1088         u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1089         u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1090         struct qed_mcp_link_params *link;
1091
1092         /* Read global nvm_cfg address */
1093         nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1094
1095         /* Verify MCP has initialized it */
1096         if (!nvm_cfg_addr) {
1097                 DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1098                 return -EINVAL;
1099         }
1100
1101         /* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1102         nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1103
1104         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1105                offsetof(struct nvm_cfg1, glob) +
1106                offsetof(struct nvm_cfg1_glob, core_cfg);
1107
1108         core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1109
1110         switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1111                 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1112         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1113                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1114                 break;
1115         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1116                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1117                 break;
1118         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1119                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1120                 break;
1121         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1122                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1123                 break;
1124         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1125                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1126                 break;
1127         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1128                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1129                 break;
1130         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1131                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1132                 break;
1133         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1134                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1135                 break;
1136         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1137                 p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1138                 break;
1139         default:
1140                 DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1141                           core_cfg);
1142                 break;
1143         }
1144
1145         /* Read default link configuration */
1146         link = &p_hwfn->mcp_info->link_input;
1147         port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1148                         offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1149         link_temp = qed_rd(p_hwfn, p_ptt,
1150                            port_cfg_addr +
1151                            offsetof(struct nvm_cfg1_port, speed_cap_mask));
1152         link->speed.advertised_speeds =
1153                 link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1154
1155         p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1156                                                 link->speed.advertised_speeds;
1157
1158         link_temp = qed_rd(p_hwfn, p_ptt,
1159                            port_cfg_addr +
1160                            offsetof(struct nvm_cfg1_port, link_settings));
1161         switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1162                 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1163         case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1164                 link->speed.autoneg = true;
1165                 break;
1166         case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1167                 link->speed.forced_speed = 1000;
1168                 break;
1169         case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1170                 link->speed.forced_speed = 10000;
1171                 break;
1172         case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1173                 link->speed.forced_speed = 25000;
1174                 break;
1175         case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1176                 link->speed.forced_speed = 40000;
1177                 break;
1178         case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1179                 link->speed.forced_speed = 50000;
1180                 break;
1181         case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1182                 link->speed.forced_speed = 100000;
1183                 break;
1184         default:
1185                 DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1186                           link_temp);
1187         }
1188
1189         link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1190         link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1191         link->pause.autoneg = !!(link_temp &
1192                                  NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1193         link->pause.forced_rx = !!(link_temp &
1194                                    NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1195         link->pause.forced_tx = !!(link_temp &
1196                                    NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1197         link->loopback_mode = 0;
1198
1199         DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1200                    "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1201                    link->speed.forced_speed, link->speed.advertised_speeds,
1202                    link->speed.autoneg, link->pause.autoneg);
1203
1204         /* Read Multi-function information from shmem */
1205         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1206                offsetof(struct nvm_cfg1, glob) +
1207                offsetof(struct nvm_cfg1_glob, generic_cont0);
1208
1209         generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1210
1211         mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1212                   NVM_CFG1_GLOB_MF_MODE_OFFSET;
1213
1214         switch (mf_mode) {
1215         case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1216                 p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1217                 break;
1218         case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1219                 p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1220                 break;
1221         case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1222                 p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1223                 break;
1224         }
1225         DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1226                 p_hwfn->cdev->mf_mode);
1227
1228         /* Read Multi-function information from shmem */
1229         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1230                 offsetof(struct nvm_cfg1, glob) +
1231                 offsetof(struct nvm_cfg1_glob, device_capabilities);
1232
1233         device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1234         if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1235                 __set_bit(QED_DEV_CAP_ETH,
1236                           &p_hwfn->hw_info.device_capabilities);
1237
1238         return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1239 }
1240
1241 static int
1242 qed_get_hw_info(struct qed_hwfn *p_hwfn,
1243                 struct qed_ptt *p_ptt,
1244                 enum qed_pci_personality personality)
1245 {
1246         u32 port_mode;
1247         int rc;
1248
1249         /* Since all information is common, only first hwfns should do this */
1250         if (IS_LEAD_HWFN(p_hwfn)) {
1251                 rc = qed_iov_hw_info(p_hwfn);
1252                 if (rc)
1253                         return rc;
1254         }
1255
1256         /* Read the port mode */
1257         port_mode = qed_rd(p_hwfn, p_ptt,
1258                            CNIG_REG_NW_PORT_MODE_BB_B0);
1259
1260         if (port_mode < 3) {
1261                 p_hwfn->cdev->num_ports_in_engines = 1;
1262         } else if (port_mode <= 5) {
1263                 p_hwfn->cdev->num_ports_in_engines = 2;
1264         } else {
1265                 DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1266                           p_hwfn->cdev->num_ports_in_engines);
1267
1268                 /* Default num_ports_in_engines to something */
1269                 p_hwfn->cdev->num_ports_in_engines = 1;
1270         }
1271
1272         qed_hw_get_nvm_info(p_hwfn, p_ptt);
1273
1274         rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1275         if (rc)
1276                 return rc;
1277
1278         if (qed_mcp_is_init(p_hwfn))
1279                 ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1280                                 p_hwfn->mcp_info->func_info.mac);
1281         else
1282                 eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1283
1284         if (qed_mcp_is_init(p_hwfn)) {
1285                 if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1286                         p_hwfn->hw_info.ovlan =
1287                                 p_hwfn->mcp_info->func_info.ovlan;
1288
1289                 qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1290         }
1291
1292         if (qed_mcp_is_init(p_hwfn)) {
1293                 enum qed_pci_personality protocol;
1294
1295                 protocol = p_hwfn->mcp_info->func_info.protocol;
1296                 p_hwfn->hw_info.personality = protocol;
1297         }
1298
1299         qed_hw_get_resc(p_hwfn);
1300
1301         return rc;
1302 }
1303
1304 static int qed_get_dev_info(struct qed_dev *cdev)
1305 {
1306         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1307         u32 tmp;
1308
1309         /* Read Vendor Id / Device Id */
1310         pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1311                              &cdev->vendor_id);
1312         pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1313                              &cdev->device_id);
1314         cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1315                                      MISCS_REG_CHIP_NUM);
1316         cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1317                                      MISCS_REG_CHIP_REV);
1318         MASK_FIELD(CHIP_REV, cdev->chip_rev);
1319
1320         cdev->type = QED_DEV_TYPE_BB;
1321         /* Learn number of HW-functions */
1322         tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1323                      MISCS_REG_CMT_ENABLED_FOR_PAIR);
1324
1325         if (tmp & (1 << p_hwfn->rel_pf_id)) {
1326                 DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1327                 cdev->num_hwfns = 2;
1328         } else {
1329                 cdev->num_hwfns = 1;
1330         }
1331
1332         cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1333                                     MISCS_REG_CHIP_TEST_REG) >> 4;
1334         MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1335         cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1336                                        MISCS_REG_CHIP_METAL);
1337         MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1338
1339         DP_INFO(cdev->hwfns,
1340                 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1341                 cdev->chip_num, cdev->chip_rev,
1342                 cdev->chip_bond_id, cdev->chip_metal);
1343
1344         if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
1345                 DP_NOTICE(cdev->hwfns,
1346                           "The chip type/rev (BB A0) is not supported!\n");
1347                 return -EINVAL;
1348         }
1349
1350         return 0;
1351 }
1352
1353 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1354                                  void __iomem *p_regview,
1355                                  void __iomem *p_doorbells,
1356                                  enum qed_pci_personality personality)
1357 {
1358         int rc = 0;
1359
1360         /* Split PCI bars evenly between hwfns */
1361         p_hwfn->regview = p_regview;
1362         p_hwfn->doorbells = p_doorbells;
1363
1364         /* Validate that chip access is feasible */
1365         if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1366                 DP_ERR(p_hwfn,
1367                        "Reading the ME register returns all Fs; Preventing further chip access\n");
1368                 return -EINVAL;
1369         }
1370
1371         get_function_id(p_hwfn);
1372
1373         /* Allocate PTT pool */
1374         rc = qed_ptt_pool_alloc(p_hwfn);
1375         if (rc) {
1376                 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1377                 goto err0;
1378         }
1379
1380         /* Allocate the main PTT */
1381         p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
1382
1383         /* First hwfn learns basic information, e.g., number of hwfns */
1384         if (!p_hwfn->my_id) {
1385                 rc = qed_get_dev_info(p_hwfn->cdev);
1386                 if (rc != 0)
1387                         goto err1;
1388         }
1389
1390         qed_hw_hwfn_prepare(p_hwfn);
1391
1392         /* Initialize MCP structure */
1393         rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1394         if (rc) {
1395                 DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1396                 goto err1;
1397         }
1398
1399         /* Read the device configuration information from the HW and SHMEM */
1400         rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1401         if (rc) {
1402                 DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1403                 goto err2;
1404         }
1405
1406         /* Allocate the init RT array and initialize the init-ops engine */
1407         rc = qed_init_alloc(p_hwfn);
1408         if (rc) {
1409                 DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1410                 goto err2;
1411         }
1412
1413         return rc;
1414 err2:
1415         if (IS_LEAD_HWFN(p_hwfn))
1416                 qed_iov_free_hw_info(p_hwfn->cdev);
1417         qed_mcp_free(p_hwfn);
1418 err1:
1419         qed_hw_hwfn_free(p_hwfn);
1420 err0:
1421         return rc;
1422 }
1423
1424 int qed_hw_prepare(struct qed_dev *cdev,
1425                    int personality)
1426 {
1427         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1428         int rc;
1429
1430         /* Store the precompiled init data ptrs */
1431         qed_init_iro_array(cdev);
1432
1433         /* Initialize the first hwfn - will learn number of hwfns */
1434         rc = qed_hw_prepare_single(p_hwfn,
1435                                    cdev->regview,
1436                                    cdev->doorbells, personality);
1437         if (rc)
1438                 return rc;
1439
1440         personality = p_hwfn->hw_info.personality;
1441
1442         /* Initialize the rest of the hwfns */
1443         if (cdev->num_hwfns > 1) {
1444                 void __iomem *p_regview, *p_doorbell;
1445                 u8 __iomem *addr;
1446
1447                 /* adjust bar offset for second engine */
1448                 addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
1449                 p_regview = addr;
1450
1451                 /* adjust doorbell bar offset for second engine */
1452                 addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
1453                 p_doorbell = addr;
1454
1455                 /* prepare second hw function */
1456                 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1457                                            p_doorbell, personality);
1458
1459                 /* in case of error, need to free the previously
1460                  * initiliazed hwfn 0.
1461                  */
1462                 if (rc) {
1463                         qed_init_free(p_hwfn);
1464                         qed_mcp_free(p_hwfn);
1465                         qed_hw_hwfn_free(p_hwfn);
1466                 }
1467         }
1468
1469         return rc;
1470 }
1471
1472 void qed_hw_remove(struct qed_dev *cdev)
1473 {
1474         int i;
1475
1476         for_each_hwfn(cdev, i) {
1477                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1478
1479                 qed_init_free(p_hwfn);
1480                 qed_hw_hwfn_free(p_hwfn);
1481                 qed_mcp_free(p_hwfn);
1482         }
1483
1484         qed_iov_free_hw_info(cdev);
1485 }
1486
1487 int qed_chain_alloc(struct qed_dev *cdev,
1488                     enum qed_chain_use_mode intended_use,
1489                     enum qed_chain_mode mode,
1490                     u16 num_elems,
1491                     size_t elem_size,
1492                     struct qed_chain *p_chain)
1493 {
1494         dma_addr_t p_pbl_phys = 0;
1495         void *p_pbl_virt = NULL;
1496         dma_addr_t p_phys = 0;
1497         void *p_virt = NULL;
1498         u16 page_cnt = 0;
1499         size_t size;
1500
1501         if (mode == QED_CHAIN_MODE_SINGLE)
1502                 page_cnt = 1;
1503         else
1504                 page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1505
1506         size = page_cnt * QED_CHAIN_PAGE_SIZE;
1507         p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1508                                     size, &p_phys, GFP_KERNEL);
1509         if (!p_virt) {
1510                 DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1511                 goto nomem;
1512         }
1513
1514         if (mode == QED_CHAIN_MODE_PBL) {
1515                 size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1516                 p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1517                                                 size, &p_pbl_phys,
1518                                                 GFP_KERNEL);
1519                 if (!p_pbl_virt) {
1520                         DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1521                         goto nomem;
1522                 }
1523
1524                 qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1525                                    (u8)elem_size, intended_use,
1526                                    p_pbl_phys, p_pbl_virt);
1527         } else {
1528                 qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1529                                (u8)elem_size, intended_use, mode);
1530         }
1531
1532         return 0;
1533
1534 nomem:
1535         dma_free_coherent(&cdev->pdev->dev,
1536                           page_cnt * QED_CHAIN_PAGE_SIZE,
1537                           p_virt, p_phys);
1538         dma_free_coherent(&cdev->pdev->dev,
1539                           page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1540                           p_pbl_virt, p_pbl_phys);
1541
1542         return -ENOMEM;
1543 }
1544
1545 void qed_chain_free(struct qed_dev *cdev,
1546                     struct qed_chain *p_chain)
1547 {
1548         size_t size;
1549
1550         if (!p_chain->p_virt_addr)
1551                 return;
1552
1553         if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1554                 size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1555                 dma_free_coherent(&cdev->pdev->dev, size,
1556                                   p_chain->pbl.p_virt_table,
1557                                   p_chain->pbl.p_phys_table);
1558         }
1559
1560         size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1561         dma_free_coherent(&cdev->pdev->dev, size,
1562                           p_chain->p_virt_addr,
1563                           p_chain->p_phys_addr);
1564 }
1565
1566 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1567                     u16 src_id, u16 *dst_id)
1568 {
1569         if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1570                 u16 min, max;
1571
1572                 min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1573                 max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1574                 DP_NOTICE(p_hwfn,
1575                           "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1576                           src_id, min, max);
1577
1578                 return -EINVAL;
1579         }
1580
1581         *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1582
1583         return 0;
1584 }
1585
1586 int qed_fw_vport(struct qed_hwfn *p_hwfn,
1587                  u8 src_id, u8 *dst_id)
1588 {
1589         if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1590                 u8 min, max;
1591
1592                 min = (u8)RESC_START(p_hwfn, QED_VPORT);
1593                 max = min + RESC_NUM(p_hwfn, QED_VPORT);
1594                 DP_NOTICE(p_hwfn,
1595                           "vport id [%d] is not valid, available indices [%d - %d]\n",
1596                           src_id, min, max);
1597
1598                 return -EINVAL;
1599         }
1600
1601         *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1602
1603         return 0;
1604 }
1605
1606 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1607                    u8 src_id, u8 *dst_id)
1608 {
1609         if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1610                 u8 min, max;
1611
1612                 min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1613                 max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1614                 DP_NOTICE(p_hwfn,
1615                           "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1616                           src_id, min, max);
1617
1618                 return -EINVAL;
1619         }
1620
1621         *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1622
1623         return 0;
1624 }
1625
1626 /* Calculate final WFQ values for all vports and configure them.
1627  * After this configuration each vport will have
1628  * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
1629  */
1630 static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1631                                              struct qed_ptt *p_ptt,
1632                                              u32 min_pf_rate)
1633 {
1634         struct init_qm_vport_params *vport_params;
1635         int i;
1636
1637         vport_params = p_hwfn->qm_info.qm_vport_params;
1638
1639         for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1640                 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1641
1642                 vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
1643                                                 min_pf_rate;
1644                 qed_init_vport_wfq(p_hwfn, p_ptt,
1645                                    vport_params[i].first_tx_pq_id,
1646                                    vport_params[i].vport_wfq);
1647         }
1648 }
1649
1650 static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
1651                                        u32 min_pf_rate)
1652
1653 {
1654         int i;
1655
1656         for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
1657                 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
1658 }
1659
1660 static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1661                                            struct qed_ptt *p_ptt,
1662                                            u32 min_pf_rate)
1663 {
1664         struct init_qm_vport_params *vport_params;
1665         int i;
1666
1667         vport_params = p_hwfn->qm_info.qm_vport_params;
1668
1669         for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1670                 qed_init_wfq_default_param(p_hwfn, min_pf_rate);
1671                 qed_init_vport_wfq(p_hwfn, p_ptt,
1672                                    vport_params[i].first_tx_pq_id,
1673                                    vport_params[i].vport_wfq);
1674         }
1675 }
1676
1677 /* This function performs several validations for WFQ
1678  * configuration and required min rate for a given vport
1679  * 1. req_rate must be greater than one percent of min_pf_rate.
1680  * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
1681  *    rates to get less than one percent of min_pf_rate.
1682  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
1683  */
1684 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
1685                               u16 vport_id, u32 req_rate,
1686                               u32 min_pf_rate)
1687 {
1688         u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
1689         int non_requested_count = 0, req_count = 0, i, num_vports;
1690
1691         num_vports = p_hwfn->qm_info.num_vports;
1692
1693         /* Accounting for the vports which are configured for WFQ explicitly */
1694         for (i = 0; i < num_vports; i++) {
1695                 u32 tmp_speed;
1696
1697                 if ((i != vport_id) &&
1698                     p_hwfn->qm_info.wfq_data[i].configured) {
1699                         req_count++;
1700                         tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1701                         total_req_min_rate += tmp_speed;
1702                 }
1703         }
1704
1705         /* Include current vport data as well */
1706         req_count++;
1707         total_req_min_rate += req_rate;
1708         non_requested_count = num_vports - req_count;
1709
1710         if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
1711                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1712                            "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1713                            vport_id, req_rate, min_pf_rate);
1714                 return -EINVAL;
1715         }
1716
1717         if (num_vports > QED_WFQ_UNIT) {
1718                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1719                            "Number of vports is greater than %d\n",
1720                            QED_WFQ_UNIT);
1721                 return -EINVAL;
1722         }
1723
1724         if (total_req_min_rate > min_pf_rate) {
1725                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1726                            "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
1727                            total_req_min_rate, min_pf_rate);
1728                 return -EINVAL;
1729         }
1730
1731         total_left_rate = min_pf_rate - total_req_min_rate;
1732
1733         left_rate_per_vp = total_left_rate / non_requested_count;
1734         if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
1735                 DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1736                            "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1737                            left_rate_per_vp, min_pf_rate);
1738                 return -EINVAL;
1739         }
1740
1741         p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
1742         p_hwfn->qm_info.wfq_data[vport_id].configured = true;
1743
1744         for (i = 0; i < num_vports; i++) {
1745                 if (p_hwfn->qm_info.wfq_data[i].configured)
1746                         continue;
1747
1748                 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
1749         }
1750
1751         return 0;
1752 }
1753
1754 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
1755                                                  struct qed_ptt *p_ptt,
1756                                                  u32 min_pf_rate)
1757 {
1758         bool use_wfq = false;
1759         int rc = 0;
1760         u16 i;
1761
1762         /* Validate all pre configured vports for wfq */
1763         for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1764                 u32 rate;
1765
1766                 if (!p_hwfn->qm_info.wfq_data[i].configured)
1767                         continue;
1768
1769                 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
1770                 use_wfq = true;
1771
1772                 rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
1773                 if (rc) {
1774                         DP_NOTICE(p_hwfn,
1775                                   "WFQ validation failed while configuring min rate\n");
1776                         break;
1777                 }
1778         }
1779
1780         if (!rc && use_wfq)
1781                 qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1782         else
1783                 qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1784
1785         return rc;
1786 }
1787
1788 /* API to configure WFQ from mcp link change */
1789 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
1790 {
1791         int i;
1792
1793         for_each_hwfn(cdev, i) {
1794                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1795
1796                 __qed_configure_vp_wfq_on_link_change(p_hwfn,
1797                                                       p_hwfn->p_dpc_ptt,
1798                                                       min_pf_rate);
1799         }
1800 }
1801
1802 int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
1803                                      struct qed_ptt *p_ptt,
1804                                      struct qed_mcp_link_state *p_link,
1805                                      u8 max_bw)
1806 {
1807         int rc = 0;
1808
1809         p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
1810
1811         if (!p_link->line_speed && (max_bw != 100))
1812                 return rc;
1813
1814         p_link->speed = (p_link->line_speed * max_bw) / 100;
1815         p_hwfn->qm_info.pf_rl = p_link->speed;
1816
1817         /* Since the limiter also affects Tx-switched traffic, we don't want it
1818          * to limit such traffic in case there's no actual limit.
1819          * In that case, set limit to imaginary high boundary.
1820          */
1821         if (max_bw == 100)
1822                 p_hwfn->qm_info.pf_rl = 100000;
1823
1824         rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1825                             p_hwfn->qm_info.pf_rl);
1826
1827         DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1828                    "Configured MAX bandwidth to be %08x Mb/sec\n",
1829                    p_link->speed);
1830
1831         return rc;
1832 }
1833
1834 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
1835 int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
1836 {
1837         int i, rc = -EINVAL;
1838
1839         if (max_bw < 1 || max_bw > 100) {
1840                 DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
1841                 return rc;
1842         }
1843
1844         for_each_hwfn(cdev, i) {
1845                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1846                 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
1847                 struct qed_mcp_link_state *p_link;
1848                 struct qed_ptt *p_ptt;
1849
1850                 p_link = &p_lead->mcp_info->link_output;
1851
1852                 p_ptt = qed_ptt_acquire(p_hwfn);
1853                 if (!p_ptt)
1854                         return -EBUSY;
1855
1856                 rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
1857                                                       p_link, max_bw);
1858
1859                 qed_ptt_release(p_hwfn, p_ptt);
1860
1861                 if (rc)
1862                         break;
1863         }
1864
1865         return rc;
1866 }
1867
1868 int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
1869                                      struct qed_ptt *p_ptt,
1870                                      struct qed_mcp_link_state *p_link,
1871                                      u8 min_bw)
1872 {
1873         int rc = 0;
1874
1875         p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
1876         p_hwfn->qm_info.pf_wfq = min_bw;
1877
1878         if (!p_link->line_speed)
1879                 return rc;
1880
1881         p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
1882
1883         rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
1884
1885         DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1886                    "Configured MIN bandwidth to be %d Mb/sec\n",
1887                    p_link->min_pf_rate);
1888
1889         return rc;
1890 }
1891
1892 /* Main API to configure PF min bandwidth where bw range is [1-100] */
1893 int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
1894 {
1895         int i, rc = -EINVAL;
1896
1897         if (min_bw < 1 || min_bw > 100) {
1898                 DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
1899                 return rc;
1900         }
1901
1902         for_each_hwfn(cdev, i) {
1903                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1904                 struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
1905                 struct qed_mcp_link_state *p_link;
1906                 struct qed_ptt *p_ptt;
1907
1908                 p_link = &p_lead->mcp_info->link_output;
1909
1910                 p_ptt = qed_ptt_acquire(p_hwfn);
1911                 if (!p_ptt)
1912                         return -EBUSY;
1913
1914                 rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1915                                                       p_link, min_bw);
1916                 if (rc) {
1917                         qed_ptt_release(p_hwfn, p_ptt);
1918                         return rc;
1919                 }
1920
1921                 if (p_link->min_pf_rate) {
1922                         u32 min_rate = p_link->min_pf_rate;
1923
1924                         rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
1925                                                                    p_ptt,
1926                                                                    min_rate);
1927                 }
1928
1929                 qed_ptt_release(p_hwfn, p_ptt);
1930         }
1931
1932         return rc;
1933 }