#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data, u8 fw_return_code);
static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
+static u16 qed_vf_from_entity_id(__le16 entity_id)
+{
+ return le16_to_cpu(entity_id) - MAX_NUM_PFS;
+}
+
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{
u8 legacy = 0;
b_enabled_only, false))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else
- DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
- relative_vf_id);
+ DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n",
+ __func__, relative_vf_id);
return vf;
}
struct qed_dmae_params params;
struct qed_vf_info *p_vf;
- p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf)
return -EINVAL;
bulletin_p = p_iov_info->bulletins_phys;
if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
DP_ERR(p_hwfn,
- "qed_iov_setup_vfdb called without allocating mem first\n");
+ "%s called without allocating mem first\n", __func__);
return;
}
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+ "%s for %d VFs\n", __func__, num_vfs);
/* Allocate PF Mailbox buffer (per-VF) */
p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
QED_MSG_IOV,
"PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
p_iov_info->mbx_msg_virt_addr,
- (u64) p_iov_info->mbx_msg_phys_addr,
+ (u64)p_iov_info->mbx_msg_phys_addr,
p_iov_info->mbx_reply_virt_addr,
- (u64) p_iov_info->mbx_reply_phys_addr,
- p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+ (u64)p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
return 0;
}
if (rc)
return rc;
- /* We want PF IOV to be synonemous with the existance of p_iov_info;
+ /* We want PF IOV to be synonemous with the existence of p_iov_info;
* In case the capability is published but there are no VFs, simply
* de-allocate the struct.
*/
int i;
/* Set VF masks and configuration - pretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
/* iterate over all queues, clear sb consumer */
for (i = 0; i < vf->num_sbs; i++)
{
u32 igu_vf_conf;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
}
static int
if (rc)
return rc;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
p_hwfn->hw_info.hw_mode);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
vf->state = VF_FREE;
p_block->igu_sb_id * sizeof(u64), 2, NULL);
}
- vf->num_sbs = (u8) num_rx_queues;
+ vf->num_sbs = (u8)num_rx_queues;
return vf->num_sbs;
}
vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) {
- DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+ DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
return -EINVAL;
}
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!vf) {
- DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+ DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
return -EINVAL;
}
memset(resp, 0, sizeof(*resp));
/* Write the PF version so that VF would know which version
- * is supported - might be later overriden. This guarantees that
+ * is supported - might be later overridden. This guarantees that
* VF could recognize legacy PF based on lack of versions in reply.
*/
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
int sb_id;
int rc;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Failed to get VF info, invalid vfid [%d]\n",
rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
if (rc) {
DP_ERR(p_hwfn,
- "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+ "%s returned error %d\n", __func__, rc);
status = PFVF_STATUS_FAILURE;
} else {
vf->vport_instance++;
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc) {
- DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
- rc);
+ DP_ERR(p_hwfn, "%s returned error %d\n",
+ __func__, rc);
status = PFVF_STATUS_FAILURE;
}
goto out;
}
p_rss_params = vzalloc(sizeof(*p_rss_params));
- if (p_rss_params == NULL) {
+ if (!p_rss_params) {
status = PFVF_STATUS_FAILURE;
goto out;
}
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
sizeof(struct pfvf_def_resp_tlv), status);
}
+
static int
qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
int cnt;
u32 val;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
for (cnt = 0; cnt < 50; cnt++) {
val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
break;
msleep(20);
}
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
if (cnt == 50) {
DP_ERR(p_hwfn,
struct qed_iov_vf_mbx *mbx;
struct qed_vf_info *p_vf;
- p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf)
return;
static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
u16 abs_vfid)
{
- u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
DP_VERBOSE(p_hwfn,
return NULL;
}
- return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
+ return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
}
static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
- struct malicious_vf_eqe_data *p_data)
+void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data)
{
struct qed_vf_info *p_vf;
- p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
-
+ p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id
+ (p_data->entity_id));
if (!p_vf)
return;
}
}
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
- union event_ring_data *data, u8 fw_return_code)
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
+ union event_ring_data *data, u8 fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
&data->vf_pf_channel.msg_addr);
- case COMMON_EVENT_MALICIOUS_VF:
- qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
- return 0;
default:
DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
opcode);
struct qed_dmae_params params;
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return -EINVAL;
struct qed_vf_info *vf_info;
u64 feature;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Can not set forced MAC, invalid vfid [%d]\n", vfid);
{
struct qed_vf_info *p_vf_info;
- p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf_info)
return false;
{
struct qed_vf_info *p_vf_info;
- p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf_info)
return true;
{
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return false;
goto out;
}
- vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf)
goto out;
return rc;
rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
- return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
+ return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val,
+ QM_RL_TYPE_NORMAL);
}
static int
struct qed_wfq_data *vf_vp_wfq;
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return 0;
*/
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
{
+ /* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(flag, &hwfn->iov_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
int i;
for_each_hwfn(cdev, i)
- queue_delayed_work(cdev->hwfns[i].iov_wq,
- &cdev->hwfns[i].iov_task, 0);
+ queue_delayed_work(cdev->hwfns[i].iov_wq,
+ &cdev->hwfns[i].iov_task, 0);
}
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
int i, j;
for_each_hwfn(cdev, i)
- if (cdev->hwfns[i].iov_wq)
- flush_workqueue(cdev->hwfns[i].iov_wq);
+ if (cdev->hwfns[i].iov_wq)
+ flush_workqueue(cdev->hwfns[i].iov_wq);
/* Mark VFs for disablement */
qed_iov_set_vfs_to_disable(cdev, true);
}
qed_for_each_vf(hwfn, i)
- qed_iov_post_vf_bulletin(hwfn, i, ptt);
+ qed_iov_post_vf_bulletin(hwfn, i, ptt);
qed_ptt_release(hwfn, ptt);
}