qed: Update qed_hsi.h for fw 8.59.1.0
[linux-2.6-microblaze.git] / drivers / net / ethernet / qlogic / qed / qed_sriov.c
index 2a67b13..9556a2c 100644 (file)
 #include "qed_sp.h"
 #include "qed_sriov.h"
 #include "qed_vf.h"
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
-                              u8 opcode,
-                              __le16 echo,
-                              union event_ring_data *data, u8 fw_return_code);
 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
 
+static u16 qed_vf_from_entity_id(__le16 entity_id)
+{
+       return le16_to_cpu(entity_id) - MAX_NUM_PFS;
+}
+
 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
 {
        u8 legacy = 0;
@@ -170,8 +171,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
                                  b_enabled_only, false))
                vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
        else
-               DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
-                      relative_vf_id);
+               DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n",
+                      __func__, relative_vf_id);
 
        return vf;
 }
@@ -309,7 +310,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
        struct qed_dmae_params params;
        struct qed_vf_info *p_vf;
 
-       p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!p_vf)
                return -EINVAL;
 
@@ -421,7 +422,7 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
        bulletin_p = p_iov_info->bulletins_phys;
        if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
                DP_ERR(p_hwfn,
-                      "qed_iov_setup_vfdb called without allocating mem first\n");
+                      "%s called without allocating mem first\n", __func__);
                return;
        }
 
@@ -465,7 +466,7 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
        num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
 
        DP_VERBOSE(p_hwfn, QED_MSG_IOV,
-                  "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+                  "%s for %d VFs\n", __func__, num_vfs);
 
        /* Allocate PF Mailbox buffer (per-VF) */
        p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
@@ -501,10 +502,10 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
                   QED_MSG_IOV,
                   "PF's Requests mailbox [%p virt 0x%llx phys],  Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
                   p_iov_info->mbx_msg_virt_addr,
-                  (u64) p_iov_info->mbx_msg_phys_addr,
+                  (u64)p_iov_info->mbx_msg_phys_addr,
                   p_iov_info->mbx_reply_virt_addr,
-                  (u64) p_iov_info->mbx_reply_phys_addr,
-                  p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+                  (u64)p_iov_info->mbx_reply_phys_addr,
+                  p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
 
        return 0;
 }
@@ -609,7 +610,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
        if (rc)
                return rc;
 
-       /* We want PF IOV to be synonemous with the existance of p_iov_info;
+       /* We want PF IOV to be synonemous with the existence of p_iov_info;
         * In case the capability is published but there are no VFs, simply
         * de-allocate the struct.
         */
@@ -715,12 +716,12 @@ static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
        int i;
 
        /* Set VF masks and configuration - pretend */
-       qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+       qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
 
        qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
 
        /* unpretend */
-       qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+       qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
 
        /* iterate over all queues, clear sb consumer */
        for (i = 0; i < vf->num_sbs; i++)
@@ -735,7 +736,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
 {
        u32 igu_vf_conf;
 
-       qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+       qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
 
        igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
 
@@ -747,7 +748,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
        qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
 
        /* unpretend */
-       qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+       qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
 }
 
 static int
@@ -808,7 +809,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
        if (rc)
                return rc;
 
-       qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+       qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
 
        SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
        STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
@@ -817,7 +818,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
                     p_hwfn->hw_info.hw_mode);
 
        /* unpretend */
-       qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+       qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
 
        vf->state = VF_FREE;
 
@@ -905,7 +906,7 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
                                  p_block->igu_sb_id * sizeof(u64), 2, NULL);
        }
 
-       vf->num_sbs = (u8) num_rx_queues;
+       vf->num_sbs = (u8)num_rx_queues;
 
        return vf->num_sbs;
 }
@@ -989,7 +990,7 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
 
        vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
        if (!vf) {
-               DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+               DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
                return -EINVAL;
        }
 
@@ -1093,7 +1094,7 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
 
        vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
        if (!vf) {
-               DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+               DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
                return -EINVAL;
        }
 
@@ -1546,7 +1547,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
        memset(resp, 0, sizeof(*resp));
 
        /* Write the PF version so that VF would know which version
-        * is supported - might be later overriden. This guarantees that
+        * is supported - might be later overridden. This guarantees that
         * VF could recognize legacy PF based on lack of versions in reply.
         */
        pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
@@ -1898,7 +1899,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
        int sb_id;
        int rc;
 
-       vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
        if (!vf_info) {
                DP_NOTICE(p_hwfn->cdev,
                          "Failed to get VF info, invalid vfid [%d]\n",
@@ -1958,7 +1959,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
        rc = qed_sp_eth_vport_start(p_hwfn, &params);
        if (rc) {
                DP_ERR(p_hwfn,
-                      "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+                      "%s returned error %d\n", __func__, rc);
                status = PFVF_STATUS_FAILURE;
        } else {
                vf->vport_instance++;
@@ -1994,8 +1995,8 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
 
        rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
        if (rc) {
-               DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
-                      rc);
+               DP_ERR(p_hwfn, "%s returned error %d\n",
+                      __func__, rc);
                status = PFVF_STATUS_FAILURE;
        }
 
@@ -3031,7 +3032,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
                goto out;
        }
        p_rss_params = vzalloc(sizeof(*p_rss_params));
-       if (p_rss_params == NULL) {
+       if (!p_rss_params) {
                status = PFVF_STATUS_FAILURE;
                goto out;
        }
@@ -3551,6 +3552,7 @@ out:
        qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
                             sizeof(struct pfvf_def_resp_tlv), status);
 }
+
 static int
 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
                         struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
@@ -3558,7 +3560,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
        int cnt;
        u32 val;
 
-       qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+       qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
 
        for (cnt = 0; cnt < 50; cnt++) {
                val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
@@ -3566,7 +3568,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
                        break;
                msleep(20);
        }
-       qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+       qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
 
        if (cnt == 50) {
                DP_ERR(p_hwfn,
@@ -3843,7 +3845,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
        struct qed_iov_vf_mbx *mbx;
        struct qed_vf_info *p_vf;
 
-       p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!p_vf)
                return;
 
@@ -3980,7 +3982,7 @@ static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
                                                       u16 abs_vfid)
 {
-       u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+       u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
 
        if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
                DP_VERBOSE(p_hwfn,
@@ -3990,7 +3992,7 @@ static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
                return NULL;
        }
 
-       return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
+       return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
 }
 
 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
@@ -4014,13 +4016,13 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
-static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
-                                    struct malicious_vf_eqe_data *p_data)
+void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+                             struct fw_err_data *p_data)
 {
        struct qed_vf_info *p_vf;
 
-       p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
-
+       p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id
+                                          (p_data->entity_id));
        if (!p_vf)
                return;
 
@@ -4037,16 +4039,13 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
        }
 }
 
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
-                              union event_ring_data *data, u8 fw_return_code)
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
+                       union event_ring_data *data, u8 fw_return_code)
 {
        switch (opcode) {
        case COMMON_EVENT_VF_PF_CHANNEL:
                return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
                                          &data->vf_pf_channel.msg_addr);
-       case COMMON_EVENT_MALICIOUS_VF:
-               qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
-               return 0;
        default:
                DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
                        opcode);
@@ -4076,7 +4075,7 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
        struct qed_dmae_params params;
        struct qed_vf_info *vf_info;
 
-       vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!vf_info)
                return -EINVAL;
 
@@ -4177,7 +4176,7 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
        struct qed_vf_info *vf_info;
        u64 feature;
 
-       vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!vf_info) {
                DP_NOTICE(p_hwfn->cdev,
                          "Can not set forced MAC, invalid vfid [%d]\n", vfid);
@@ -4227,7 +4226,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
 {
        struct qed_vf_info *p_vf_info;
 
-       p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!p_vf_info)
                return false;
 
@@ -4238,7 +4237,7 @@ static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
 {
        struct qed_vf_info *p_vf_info;
 
-       p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!p_vf_info)
                return true;
 
@@ -4249,7 +4248,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
 {
        struct qed_vf_info *vf_info;
 
-       vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!vf_info)
                return false;
 
@@ -4267,7 +4266,7 @@ static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
                goto out;
        }
 
-       vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!vf)
                goto out;
 
@@ -4346,7 +4345,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
                return rc;
 
        rl_id = abs_vp_id;      /* The "rl_id" is set as the "vport_id" */
-       return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
+       return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val,
+                                 QM_RL_TYPE_NORMAL);
 }
 
 static int
@@ -4377,7 +4377,7 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
        struct qed_wfq_data *vf_vp_wfq;
        struct qed_vf_info *vf_info;
 
-       vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+       vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!vf_info)
                return 0;
 
@@ -4396,8 +4396,10 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
  */
 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
 {
+       /* Memory barrier for setting atomic bit */
        smp_mb__before_atomic();
        set_bit(flag, &hwfn->iov_task_flags);
+       /* Memory barrier after setting atomic bit */
        smp_mb__after_atomic();
        DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
        queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
@@ -4408,8 +4410,8 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev)
        int i;
 
        for_each_hwfn(cdev, i)
-           queue_delayed_work(cdev->hwfns[i].iov_wq,
-                              &cdev->hwfns[i].iov_task, 0);
+               queue_delayed_work(cdev->hwfns[i].iov_wq,
+                                  &cdev->hwfns[i].iov_task, 0);
 }
 
 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
@@ -4417,8 +4419,8 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
        int i, j;
 
        for_each_hwfn(cdev, i)
-           if (cdev->hwfns[i].iov_wq)
-               flush_workqueue(cdev->hwfns[i].iov_wq);
+               if (cdev->hwfns[i].iov_wq)
+                       flush_workqueue(cdev->hwfns[i].iov_wq);
 
        /* Mark VFs for disablement */
        qed_iov_set_vfs_to_disable(cdev, true);
@@ -5011,7 +5013,7 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
        }
 
        qed_for_each_vf(hwfn, i)
-           qed_iov_post_vf_bulletin(hwfn, i, ptt);
+               qed_iov_post_vf_bulletin(hwfn, i, ptt);
 
        qed_ptt_release(hwfn, ptt);
 }