qed: Introduce VFs
[linux-2.6-microblaze.git] / drivers / net / ethernet / qlogic / qed / qed_dev.c
index b7d100f..362e8db 100644 (file)
@@ -30,6 +30,7 @@
 #include "qed_mcp.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
+#include "qed_sriov.h"
 
 /* API common to all protocols */
 enum BAR_ID {
@@ -40,10 +41,14 @@ enum BAR_ID {
 static u32 qed_hw_bar_size(struct qed_hwfn     *p_hwfn,
                           enum BAR_ID          bar_id)
 {
-       u32     bar_reg = (bar_id == BAR_ID_0 ?
-                          PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
-       u32     val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+       u32 bar_reg = (bar_id == BAR_ID_0 ?
+                      PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+       u32 val;
 
+       if (IS_VF(p_hwfn->cdev))
+               return 1 << 17;
+
+       val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
        if (val)
                return 1 << (val + 15);
 
@@ -105,12 +110,17 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
        qm_info->qm_vport_params = NULL;
        kfree(qm_info->qm_port_params);
        qm_info->qm_port_params = NULL;
+       kfree(qm_info->wfq_data);
+       qm_info->wfq_data = NULL;
 }
 
 void qed_resc_free(struct qed_dev *cdev)
 {
        int i;
 
+       if (IS_VF(cdev))
+               return;
+
        kfree(cdev->fw_data);
        cdev->fw_data = NULL;
 
@@ -134,20 +144,26 @@ void qed_resc_free(struct qed_dev *cdev)
                qed_eq_free(p_hwfn, p_hwfn->p_eq);
                qed_consq_free(p_hwfn, p_hwfn->p_consq);
                qed_int_free(p_hwfn);
+               qed_iov_free(p_hwfn);
                qed_dmae_info_free(p_hwfn);
        }
 }
 
 static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
 {
+       u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
        struct init_qm_port_params *p_qm_port;
-       u8 num_vports, i, vport_id, num_ports;
        u16 num_pqs, multi_cos_tcs = 1;
+       u16 num_vfs = 0;
 
+#ifdef CONFIG_QED_SRIOV
+       if (p_hwfn->cdev->p_iov_info)
+               num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+#endif
        memset(qm_info, 0, sizeof(*qm_info));
 
-       num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+       num_pqs = multi_cos_tcs + num_vfs + 1;  /* The '1' is for pure-LB */
        num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
 
        /* Sanity checking that setup requires legal number of resources */
@@ -175,11 +191,17 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
        if (!qm_info->qm_port_params)
                goto alloc_err;
 
+       qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
+                                   GFP_KERNEL);
+       if (!qm_info->wfq_data)
+               goto alloc_err;
+
        vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
 
        /* First init per-TC PQs */
-       for (i = 0; i < multi_cos_tcs; i++) {
-               struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+       for (i = 0; i < multi_cos_tcs; i++, curr_queue++) {
+               struct init_qm_pq_params *params =
+                   &qm_info->qm_pq_params[curr_queue];
 
                params->vport_id = vport_id;
                params->tc_id = p_hwfn->hw_info.non_offload_tc;
@@ -187,13 +209,26 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
        }
 
        /* Then init pure-LB PQ */
-       qm_info->pure_lb_pq = i;
-       qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
-       qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
-       qm_info->qm_pq_params[i].wrr_group = 1;
-       i++;
+       qm_info->pure_lb_pq = curr_queue;
+       qm_info->qm_pq_params[curr_queue].vport_id =
+           (u8) RESC_START(p_hwfn, QED_VPORT);
+       qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
+       qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+       curr_queue++;
 
        qm_info->offload_pq = 0;
+       /* Then init per-VF PQs */
+       vf_offset = curr_queue;
+       for (i = 0; i < num_vfs; i++) {
+               /* First vport is used by the PF */
+               qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
+               qm_info->qm_pq_params[curr_queue].tc_id =
+                   p_hwfn->hw_info.non_offload_tc;
+               qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+               curr_queue++;
+       }
+
+       qm_info->vf_queues_offset = vf_offset;
        qm_info->num_pqs = num_pqs;
        qm_info->num_vports = num_vports;
 
@@ -211,20 +246,22 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
 
        qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
 
-       qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+       qm_info->num_vf_pqs = num_vfs;
+       qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+       for (i = 0; i < qm_info->num_vports; i++)
+               qm_info->qm_vport_params[i].vport_wfq = 1;
 
        qm_info->pf_wfq = 0;
        qm_info->pf_rl = 0;
        qm_info->vport_rl_en = 1;
+       qm_info->vport_wfq_en = 1;
 
        return 0;
 
 alloc_err:
        DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
-       kfree(qm_info->qm_pq_params);
-       kfree(qm_info->qm_vport_params);
-       kfree(qm_info->qm_port_params);
-
+       qed_qm_info_free(p_hwfn);
        return -ENOMEM;
 }
 
@@ -234,6 +271,9 @@ int qed_resc_alloc(struct qed_dev *cdev)
        struct qed_eq *p_eq;
        int i, rc = 0;
 
+       if (IS_VF(cdev))
+               return rc;
+
        cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
        if (!cdev->fw_data)
                return -ENOMEM;
@@ -308,6 +348,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
                if (rc)
                        goto alloc_err;
 
+               rc = qed_iov_alloc(p_hwfn);
+               if (rc)
+                       goto alloc_err;
+
                /* EQ */
                p_eq = qed_eq_alloc(p_hwfn, 256);
                if (!p_eq) {
@@ -350,6 +394,9 @@ void qed_resc_setup(struct qed_dev *cdev)
 {
        int i;
 
+       if (IS_VF(cdev))
+               return;
+
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
@@ -365,6 +412,8 @@ void qed_resc_setup(struct qed_dev *cdev)
                       p_hwfn->mcp_info->mfw_mb_length);
 
                qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+               qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
        }
 }
 
@@ -492,7 +541,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
        struct qed_qm_common_rt_init_params params;
        struct qed_dev *cdev = p_hwfn->cdev;
+       u32 concrete_fid;
        int rc = 0;
+       u8 vf_id;
 
        qed_init_cau_rt_data(cdev);
 
@@ -542,6 +593,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
        qed_wr(p_hwfn, p_ptt, 0x20b4,
               qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
 
+       for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+               concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
+               qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+               qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+       }
+       /* pretend to original PF */
+       qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
        return rc;
 }
 
@@ -558,6 +617,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
 
 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
                          struct qed_ptt *p_ptt,
+                         struct qed_tunn_start_params *p_tunn,
                          int hw_mode,
                          bool b_hw_start,
                          enum qed_int_mode int_mode,
@@ -574,7 +634,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
                        p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
 
                /* Update rate limit once we'll actually have a link */
-               p_hwfn->qm_info.pf_rl = 100;
+               p_hwfn->qm_info.pf_rl = 100000;
        }
 
        qed_cxt_hw_init_pf(p_hwfn);
@@ -625,7 +685,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
                qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
 
                /* send function start command */
-               rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+               rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
                if (rc)
                        DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
        }
@@ -672,6 +732,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
 }
 
 int qed_hw_init(struct qed_dev *cdev,
+               struct qed_tunn_start_params *p_tunn,
                bool b_hw_start,
                enum qed_int_mode int_mode,
                bool allow_npar_tx_switch,
@@ -680,13 +741,20 @@ int qed_hw_init(struct qed_dev *cdev,
        u32 load_code, param;
        int rc, mfw_rc, i;
 
-       rc = qed_init_fw_data(cdev, bin_fw_data);
-       if (rc != 0)
-               return rc;
+       if (IS_PF(cdev)) {
+               rc = qed_init_fw_data(cdev, bin_fw_data);
+               if (rc != 0)
+                       return rc;
+       }
 
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
+               if (IS_VF(cdev)) {
+                       p_hwfn->b_int_enabled = 1;
+                       continue;
+               }
+
                /* Enable DMAE in PXP */
                rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
 
@@ -724,7 +792,7 @@ int qed_hw_init(struct qed_dev *cdev,
                /* Fall into */
                case FW_MSG_CODE_DRV_LOAD_FUNCTION:
                        rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
-                                           p_hwfn->hw_info.hw_mode,
+                                           p_tunn, p_hwfn->hw_info.hw_mode,
                                            b_hw_start, int_mode,
                                            allow_npar_tx_switch);
                        break;
@@ -811,6 +879,11 @@ int qed_hw_stop(struct qed_dev *cdev)
 
                DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
 
+               if (IS_VF(cdev)) {
+                       /* To be implemented in a later patch */
+                       continue;
+               }
+
                /* mark the hw as uninitialized... */
                p_hwfn->hw_init_done = false;
 
@@ -842,15 +915,16 @@ int qed_hw_stop(struct qed_dev *cdev)
                usleep_range(1000, 2000);
        }
 
-       /* Disable DMAE in PXP - in CMT, this should only be done for
-        * first hw-function, and only after all transactions have
-        * stopped for all active hw-functions.
-        */
-       t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
-                                  cdev->hwfns[0].p_main_ptt,
-                                  false);
-       if (t_rc != 0)
-               rc = t_rc;
+       if (IS_PF(cdev)) {
+               /* Disable DMAE in PXP - in CMT, this should only be done for
+                * first hw-function, and only after all transactions have
+                * stopped for all active hw-functions.
+                */
+               t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+                                          cdev->hwfns[0].p_main_ptt, false);
+               if (t_rc != 0)
+                       rc = t_rc;
+       }
 
        return rc;
 }
@@ -914,6 +988,11 @@ int qed_hw_reset(struct qed_dev *cdev)
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
+               if (IS_VF(cdev)) {
+                       /* Will be implemented in a later patch */
+                       continue;
+               }
+
                DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
 
                /* Check for incorrect states */
@@ -1009,11 +1088,10 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
 {
        u32 *resc_start = p_hwfn->hw_info.resc_start;
+       u8 num_funcs = p_hwfn->num_funcs_on_engine;
        u32 *resc_num = p_hwfn->hw_info.resc_num;
        struct qed_sb_cnt_info sb_cnt_info;
-       int num_funcs, i;
-
-       num_funcs = MAX_NUM_PFS_BB;
+       int i;
 
        memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
        qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
@@ -1220,6 +1298,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
        return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
 }
 
+static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+       u32 reg_function_hide, tmp, eng_mask;
+       u8 num_funcs;
+
+       num_funcs = MAX_NUM_PFS_BB;
+
+       /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+        * in the other bits are selected.
+        * Bits 1-15 are for functions 1-15, respectively, and their value is
+        * '0' only for enabled functions (function 0 always exists and
+        * enabled).
+        * In case of CMT, only the "even" functions are enabled, and thus the
+        * number of functions for both hwfns is learnt from the same bits.
+        */
+       reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+
+       if (reg_function_hide & 0x1) {
+               if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
+                       num_funcs = 0;
+                       eng_mask = 0xaaaa;
+               } else {
+                       num_funcs = 1;
+                       eng_mask = 0x5554;
+               }
+
+               /* Get the number of the enabled functions on the engine */
+               tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+               while (tmp) {
+                       if (tmp & 0x1)
+                               num_funcs++;
+                       tmp >>= 0x1;
+               }
+       }
+
+       p_hwfn->num_funcs_on_engine = num_funcs;
+
+       DP_VERBOSE(p_hwfn,
+                  NETIF_MSG_PROBE,
+                  "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+                  p_hwfn->rel_pf_id,
+                  p_hwfn->abs_pf_id,
+                  p_hwfn->num_funcs_on_engine);
+}
+
 static int
 qed_get_hw_info(struct qed_hwfn *p_hwfn,
                struct qed_ptt *p_ptt,
@@ -1228,6 +1351,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
        u32 port_mode;
        int rc;
 
+       /* Since all information is common, only first hwfns should do this */
+       if (IS_LEAD_HWFN(p_hwfn)) {
+               rc = qed_iov_hw_info(p_hwfn);
+               if (rc)
+                       return rc;
+       }
+
        /* Read the port mode */
        port_mode = qed_rd(p_hwfn, p_ptt,
                           CNIG_REG_NW_PORT_MODE_BB_B0);
@@ -1271,6 +1401,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
                p_hwfn->hw_info.personality = protocol;
        }
 
+       qed_get_num_funcs(p_hwfn, p_ptt);
+
        qed_hw_get_resc(p_hwfn);
 
        return rc;
@@ -1336,6 +1468,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
        p_hwfn->regview = p_regview;
        p_hwfn->doorbells = p_doorbells;
 
+       if (IS_VF(p_hwfn->cdev))
+               return qed_vf_hw_prepare(p_hwfn);
+
        /* Validate that chip access is feasible */
        if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
                DP_ERR(p_hwfn,
@@ -1387,6 +1522,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
 
        return rc;
 err2:
+       if (IS_LEAD_HWFN(p_hwfn))
+               qed_iov_free_hw_info(p_hwfn->cdev);
        qed_mcp_free(p_hwfn);
 err1:
        qed_hw_hwfn_free(p_hwfn);
@@ -1401,7 +1538,8 @@ int qed_hw_prepare(struct qed_dev *cdev,
        int rc;
 
        /* Store the precompiled init data ptrs */
-       qed_init_iro_array(cdev);
+       if (IS_PF(cdev))
+               qed_init_iro_array(cdev);
 
        /* Initialize the first hwfn - will learn number of hwfns */
        rc = qed_hw_prepare_single(p_hwfn,
@@ -1433,9 +1571,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
                 * initiliazed hwfn 0.
                 */
                if (rc) {
-                       qed_init_free(p_hwfn);
-                       qed_mcp_free(p_hwfn);
-                       qed_hw_hwfn_free(p_hwfn);
+                       if (IS_PF(cdev)) {
+                               qed_init_free(p_hwfn);
+                               qed_mcp_free(p_hwfn);
+                               qed_hw_hwfn_free(p_hwfn);
+                       }
                }
        }
 
@@ -1449,10 +1589,17 @@ void qed_hw_remove(struct qed_dev *cdev)
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
+               if (IS_VF(cdev)) {
+                       /* Will be implemented in a later patch */
+                       continue;
+               }
+
                qed_init_free(p_hwfn);
                qed_hw_hwfn_free(p_hwfn);
                qed_mcp_free(p_hwfn);
        }
+
+       qed_iov_free_hw_info(cdev);
 }
 
 int qed_chain_alloc(struct qed_dev *cdev,
@@ -1593,3 +1740,312 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
 
        return 0;
 }
+
+/* Calculate final WFQ values for all vports and configure them.
+ * After this configuration each vport will have
+ * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
+ */
+static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+                                            struct qed_ptt *p_ptt,
+                                            u32 min_pf_rate)
+{
+       struct init_qm_vport_params *vport_params;
+       int i;
+
+       vport_params = p_hwfn->qm_info.qm_vport_params;
+
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+               u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+               vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+                                               min_pf_rate;
+               qed_init_vport_wfq(p_hwfn, p_ptt,
+                                  vport_params[i].first_tx_pq_id,
+                                  vport_params[i].vport_wfq);
+       }
+}
+
+static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
+                                      u32 min_pf_rate)
+
+{
+       int i;
+
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+               p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+}
+
+static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+                                          struct qed_ptt *p_ptt,
+                                          u32 min_pf_rate)
+{
+       struct init_qm_vport_params *vport_params;
+       int i;
+
+       vport_params = p_hwfn->qm_info.qm_vport_params;
+
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+               qed_init_wfq_default_param(p_hwfn, min_pf_rate);
+               qed_init_vport_wfq(p_hwfn, p_ptt,
+                                  vport_params[i].first_tx_pq_id,
+                                  vport_params[i].vport_wfq);
+       }
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ *    rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+                             u16 vport_id, u32 req_rate,
+                             u32 min_pf_rate)
+{
+       u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+       int non_requested_count = 0, req_count = 0, i, num_vports;
+
+       num_vports = p_hwfn->qm_info.num_vports;
+
+       /* Accounting for the vports which are configured for WFQ explicitly */
+       for (i = 0; i < num_vports; i++) {
+               u32 tmp_speed;
+
+               if ((i != vport_id) &&
+                   p_hwfn->qm_info.wfq_data[i].configured) {
+                       req_count++;
+                       tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+                       total_req_min_rate += tmp_speed;
+               }
+       }
+
+       /* Include current vport data as well */
+       req_count++;
+       total_req_min_rate += req_rate;
+       non_requested_count = num_vports - req_count;
+
+       if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+                          vport_id, req_rate, min_pf_rate);
+               return -EINVAL;
+       }
+
+       if (num_vports > QED_WFQ_UNIT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Number of vports is greater than %d\n",
+                          QED_WFQ_UNIT);
+               return -EINVAL;
+       }
+
+       if (total_req_min_rate > min_pf_rate) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+                          total_req_min_rate, min_pf_rate);
+               return -EINVAL;
+       }
+
+       total_left_rate = min_pf_rate - total_req_min_rate;
+
+       left_rate_per_vp = total_left_rate / non_requested_count;
+       if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+                          left_rate_per_vp, min_pf_rate);
+               return -EINVAL;
+       }
+
+       p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+       p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+       for (i = 0; i < num_vports; i++) {
+               if (p_hwfn->qm_info.wfq_data[i].configured)
+                       continue;
+
+               p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+       }
+
+       return 0;
+}
+
+static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
+                                                struct qed_ptt *p_ptt,
+                                                u32 min_pf_rate)
+{
+       bool use_wfq = false;
+       int rc = 0;
+       u16 i;
+
+       /* Validate all pre configured vports for wfq */
+       for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+               u32 rate;
+
+               if (!p_hwfn->qm_info.wfq_data[i].configured)
+                       continue;
+
+               rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+               use_wfq = true;
+
+               rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+               if (rc) {
+                       DP_NOTICE(p_hwfn,
+                                 "WFQ validation failed while configuring min rate\n");
+                       break;
+               }
+       }
+
+       if (!rc && use_wfq)
+               qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+       else
+               qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+       return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+               __qed_configure_vp_wfq_on_link_change(p_hwfn,
+                                                     p_hwfn->p_dpc_ptt,
+                                                     min_pf_rate);
+       }
+}
+
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_mcp_link_state *p_link,
+                                    u8 max_bw)
+{
+       int rc = 0;
+
+       p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+       if (!p_link->line_speed && (max_bw != 100))
+               return rc;
+
+       p_link->speed = (p_link->line_speed * max_bw) / 100;
+       p_hwfn->qm_info.pf_rl = p_link->speed;
+
+       /* Since the limiter also affects Tx-switched traffic, we don't want it
+        * to limit such traffic in case there's no actual limit.
+        * In that case, set limit to imaginary high boundary.
+        */
+       if (max_bw == 100)
+               p_hwfn->qm_info.pf_rl = 100000;
+
+       rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+                           p_hwfn->qm_info.pf_rl);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                  "Configured MAX bandwidth to be %08x Mb/sec\n",
+                  p_link->speed);
+
+       return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
+{
+       int i, rc = -EINVAL;
+
+       if (max_bw < 1 || max_bw > 100) {
+               DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
+               return rc;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+               struct qed_mcp_link_state *p_link;
+               struct qed_ptt *p_ptt;
+
+               p_link = &p_lead->mcp_info->link_output;
+
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return -EBUSY;
+
+               rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+                                                     p_link, max_bw);
+
+               qed_ptt_release(p_hwfn, p_ptt);
+
+               if (rc)
+                       break;
+       }
+
+       return rc;
+}
+
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+                                    struct qed_ptt *p_ptt,
+                                    struct qed_mcp_link_state *p_link,
+                                    u8 min_bw)
+{
+       int rc = 0;
+
+       p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+       p_hwfn->qm_info.pf_wfq = min_bw;
+
+       if (!p_link->line_speed)
+               return rc;
+
+       p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+       rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                  "Configured MIN bandwidth to be %d Mb/sec\n",
+                  p_link->min_pf_rate);
+
+       return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
+{
+       int i, rc = -EINVAL;
+
+       if (min_bw < 1 || min_bw > 100) {
+               DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
+               return rc;
+       }
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+               struct qed_mcp_link_state *p_link;
+               struct qed_ptt *p_ptt;
+
+               p_link = &p_lead->mcp_info->link_output;
+
+               p_ptt = qed_ptt_acquire(p_hwfn);
+               if (!p_ptt)
+                       return -EBUSY;
+
+               rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+                                                     p_link, min_bw);
+               if (rc) {
+                       qed_ptt_release(p_hwfn, p_ptt);
+                       return rc;
+               }
+
+               if (p_link->min_pf_rate) {
+                       u32 min_rate = p_link->min_pf_rate;
+
+                       rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
+                                                                  p_ptt,
+                                                                  min_rate);
+               }
+
+               qed_ptt_release(p_hwfn, p_ptt);
+       }
+
+       return rc;
+}