1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/etherdevice.h>
8 #include <linux/crc32.h>
9 #include <linux/vmalloc.h>
10 #include <linux/crash_dump.h>
11 #include <linux/qed/qed_iov_if.h>
15 #include "qed_init_ops.h"
18 #include "qed_reg_addr.h"
20 #include "qed_sriov.h"
22 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
25 union event_ring_data *data, u8 fw_return_code);
26 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
28 static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
32 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
33 ETH_HSI_VER_NO_PKT_LEN_TUNN)
34 legacy |= QED_QCID_LEGACY_VF_RX_PROD;
36 if (!(p_vf->acquire.vfdev_info.capabilities &
37 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
38 legacy |= QED_QCID_LEGACY_VF_CID;
44 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
46 struct vf_start_ramrod_data *p_ramrod = NULL;
47 struct qed_spq_entry *p_ent = NULL;
48 struct qed_sp_init_data init_data;
53 memset(&init_data, 0, sizeof(init_data));
54 init_data.cid = qed_spq_get_cid(p_hwfn);
55 init_data.opaque_fid = p_vf->opaque_fid;
56 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
58 rc = qed_sp_init_request(p_hwfn, &p_ent,
59 COMMON_RAMROD_VF_START,
60 PROTOCOLID_COMMON, &init_data);
64 p_ramrod = &p_ent->ramrod.vf_start;
66 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
67 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
69 switch (p_hwfn->hw_info.personality) {
71 p_ramrod->personality = PERSONALITY_ETH;
73 case QED_PCI_ETH_ROCE:
74 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
77 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
78 p_hwfn->hw_info.personality);
79 qed_sp_destroy_request(p_hwfn, p_ent);
83 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
84 if (fp_minor > ETH_HSI_VER_MINOR &&
85 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
88 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
91 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
92 fp_minor = ETH_HSI_VER_MINOR;
95 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
96 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
98 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
99 "VF[%d] - Starting using HSI %02x.%02x\n",
100 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
102 return qed_spq_post(p_hwfn, p_ent, NULL);
105 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
106 u32 concrete_vfid, u16 opaque_vfid)
108 struct vf_stop_ramrod_data *p_ramrod = NULL;
109 struct qed_spq_entry *p_ent = NULL;
110 struct qed_sp_init_data init_data;
114 memset(&init_data, 0, sizeof(init_data));
115 init_data.cid = qed_spq_get_cid(p_hwfn);
116 init_data.opaque_fid = opaque_vfid;
117 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
119 rc = qed_sp_init_request(p_hwfn, &p_ent,
120 COMMON_RAMROD_VF_STOP,
121 PROTOCOLID_COMMON, &init_data);
125 p_ramrod = &p_ent->ramrod.vf_stop;
127 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
129 return qed_spq_post(p_hwfn, p_ent, NULL);
132 bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
134 bool b_enabled_only, bool b_non_malicious)
136 if (!p_hwfn->pf_iov_info) {
137 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
141 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
145 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
149 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
156 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
160 struct qed_vf_info *vf = NULL;
162 if (!p_hwfn->pf_iov_info) {
163 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
167 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
168 b_enabled_only, false))
169 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
171 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
177 static struct qed_queue_cid *
178 qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
182 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
183 if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
184 return p_queue->cids[i].p_cid;
190 enum qed_iov_validate_q_mode {
191 QED_IOV_VALIDATE_Q_NA,
192 QED_IOV_VALIDATE_Q_ENABLE,
193 QED_IOV_VALIDATE_Q_DISABLE,
196 static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
197 struct qed_vf_info *p_vf,
199 enum qed_iov_validate_q_mode mode,
204 if (mode == QED_IOV_VALIDATE_Q_NA)
207 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
208 struct qed_vf_queue_cid *p_qcid;
210 p_qcid = &p_vf->vf_queues[qid].cids[i];
215 if (p_qcid->b_is_tx != b_is_tx)
218 return mode == QED_IOV_VALIDATE_Q_ENABLE;
221 /* In case we haven't found any valid cid, then its disabled */
222 return mode == QED_IOV_VALIDATE_Q_DISABLE;
225 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
226 struct qed_vf_info *p_vf,
228 enum qed_iov_validate_q_mode mode)
230 if (rx_qid >= p_vf->num_rxqs) {
233 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
234 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
238 return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
241 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
242 struct qed_vf_info *p_vf,
244 enum qed_iov_validate_q_mode mode)
246 if (tx_qid >= p_vf->num_txqs) {
249 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
250 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
254 return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
257 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
258 struct qed_vf_info *p_vf, u16 sb_idx)
262 for (i = 0; i < p_vf->num_sbs; i++)
263 if (p_vf->igu_sbs[i] == sb_idx)
268 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
269 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
274 static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
275 struct qed_vf_info *p_vf)
279 for (i = 0; i < p_vf->num_rxqs; i++)
280 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
281 QED_IOV_VALIDATE_Q_ENABLE,
288 static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
289 struct qed_vf_info *p_vf)
293 for (i = 0; i < p_vf->num_txqs; i++)
294 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
295 QED_IOV_VALIDATE_Q_ENABLE,
302 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
303 int vfid, struct qed_ptt *p_ptt)
305 struct qed_bulletin_content *p_bulletin;
306 int crc_size = sizeof(p_bulletin->crc);
307 struct qed_dmae_params params;
308 struct qed_vf_info *p_vf;
310 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
314 if (!p_vf->vf_bulletin)
317 p_bulletin = p_vf->bulletin.p_virt;
319 /* Increment bulletin board version and compute crc */
320 p_bulletin->version++;
321 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
322 p_vf->bulletin.size - crc_size);
324 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
325 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
326 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
328 /* propagate bulletin board via dmae to vm memory */
329 memset(¶ms, 0, sizeof(params));
330 SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
331 params.dst_vfid = p_vf->abs_vf_id;
332 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
333 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
337 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
339 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
342 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
343 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
345 pci_read_config_word(cdev->pdev,
346 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
347 pci_read_config_word(cdev->pdev,
348 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
350 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
354 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
358 pci_read_config_word(cdev->pdev,
359 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
361 pci_read_config_word(cdev->pdev,
362 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
364 pci_read_config_word(cdev->pdev,
365 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
367 pci_read_config_dword(cdev->pdev,
368 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
370 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
372 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
376 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
382 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
384 /* Some sanity checks */
385 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
386 iov->total_vfs > NUM_OF_VFS(cdev)) {
387 /* This can happen only due to a bug. In this case we set
388 * num_vfs to zero to avoid memory corruption in the code that
389 * assumes max number of vfs
392 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
402 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
404 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
405 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
406 struct qed_bulletin_content *p_bulletin_virt;
407 dma_addr_t req_p, rply_p, bulletin_p;
408 union pfvf_tlvs *p_reply_virt_addr;
409 union vfpf_tlvs *p_req_virt_addr;
412 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
414 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
415 req_p = p_iov_info->mbx_msg_phys_addr;
416 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
417 rply_p = p_iov_info->mbx_reply_phys_addr;
418 p_bulletin_virt = p_iov_info->p_bulletins;
419 bulletin_p = p_iov_info->bulletins_phys;
420 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
422 "qed_iov_setup_vfdb called without allocating mem first\n");
426 for (idx = 0; idx < p_iov->total_vfs; idx++) {
427 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
430 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
431 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
432 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
433 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
435 vf->state = VF_STOPPED;
438 vf->bulletin.phys = idx *
439 sizeof(struct qed_bulletin_content) +
441 vf->bulletin.p_virt = p_bulletin_virt + idx;
442 vf->bulletin.size = sizeof(struct qed_bulletin_content);
444 vf->relative_vf_id = idx;
445 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
446 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
447 vf->concrete_fid = concrete;
448 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
449 (vf->abs_vf_id << 8);
450 vf->vport_id = idx + 1;
452 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
453 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
457 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
459 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
463 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
465 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
466 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
468 /* Allocate PF Mailbox buffer (per-VF) */
469 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
470 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
471 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
472 p_iov_info->mbx_msg_size,
473 &p_iov_info->mbx_msg_phys_addr,
478 /* Allocate PF Mailbox Reply buffer (per-VF) */
479 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
480 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
481 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
482 p_iov_info->mbx_reply_size,
483 &p_iov_info->mbx_reply_phys_addr,
488 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
490 p_v_addr = &p_iov_info->p_bulletins;
491 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
492 p_iov_info->bulletins_size,
493 &p_iov_info->bulletins_phys,
500 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
501 p_iov_info->mbx_msg_virt_addr,
502 (u64) p_iov_info->mbx_msg_phys_addr,
503 p_iov_info->mbx_reply_virt_addr,
504 (u64) p_iov_info->mbx_reply_phys_addr,
505 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
510 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
512 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
514 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
515 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
516 p_iov_info->mbx_msg_size,
517 p_iov_info->mbx_msg_virt_addr,
518 p_iov_info->mbx_msg_phys_addr);
520 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
521 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
522 p_iov_info->mbx_reply_size,
523 p_iov_info->mbx_reply_virt_addr,
524 p_iov_info->mbx_reply_phys_addr);
526 if (p_iov_info->p_bulletins)
527 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
528 p_iov_info->bulletins_size,
529 p_iov_info->p_bulletins,
530 p_iov_info->bulletins_phys);
533 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
535 struct qed_pf_iov *p_sriov;
537 if (!IS_PF_SRIOV(p_hwfn)) {
538 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
539 "No SR-IOV - no need for IOV db\n");
543 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
547 p_hwfn->pf_iov_info = p_sriov;
549 qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
550 qed_sriov_eqe_event);
552 return qed_iov_allocate_vfdb(p_hwfn);
555 void qed_iov_setup(struct qed_hwfn *p_hwfn)
557 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
560 qed_iov_setup_vfdb(p_hwfn);
563 void qed_iov_free(struct qed_hwfn *p_hwfn)
565 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
567 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
568 qed_iov_free_vfdb(p_hwfn);
569 kfree(p_hwfn->pf_iov_info);
573 void qed_iov_free_hw_info(struct qed_dev *cdev)
575 kfree(cdev->p_iov_info);
576 cdev->p_iov_info = NULL;
579 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
581 struct qed_dev *cdev = p_hwfn->cdev;
585 if (is_kdump_kernel())
588 if (IS_VF(p_hwfn->cdev))
591 /* Learn the PCI configuration */
592 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
593 PCI_EXT_CAP_ID_SRIOV);
595 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
599 /* Allocate a new struct for IOV information */
600 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
601 if (!cdev->p_iov_info)
604 cdev->p_iov_info->pos = pos;
606 rc = qed_iov_pci_cfg_info(cdev);
610 /* We want PF IOV to be synonemous with the existance of p_iov_info;
611 * In case the capability is published but there are no VFs, simply
612 * de-allocate the struct.
614 if (!cdev->p_iov_info->total_vfs) {
615 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
616 "IOV capabilities, but no VFs are published\n");
617 kfree(cdev->p_iov_info);
618 cdev->p_iov_info = NULL;
622 /* First VF index based on offset is tricky:
623 * - If ARI is supported [likely], offset - (16 - pf_id) would
624 * provide the number for eng0. 2nd engine Vfs would begin
625 * after the first engine's VFs.
626 * - If !ARI, VFs would start on next device.
627 * so offset - (256 - pf_id) would provide the number.
628 * Utilize the fact that (256 - pf_id) is achieved only by later
629 * to differentiate between the two.
632 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
633 u32 first = p_hwfn->cdev->p_iov_info->offset +
634 p_hwfn->abs_pf_id - 16;
636 cdev->p_iov_info->first_vf_in_pf = first;
638 if (QED_PATH_ID(p_hwfn))
639 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
641 u32 first = p_hwfn->cdev->p_iov_info->offset +
642 p_hwfn->abs_pf_id - 256;
644 cdev->p_iov_info->first_vf_in_pf = first;
647 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
648 "First VF in hwfn 0x%08x\n",
649 cdev->p_iov_info->first_vf_in_pf);
654 static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
655 int vfid, bool b_fail_malicious)
657 /* Check PF supports sriov */
658 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
659 !IS_PF_SRIOV_ALLOC(p_hwfn))
662 /* Check VF validity */
663 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
669 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
671 return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
674 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
675 u16 rel_vf_id, u8 to_disable)
677 struct qed_vf_info *vf;
680 for_each_hwfn(cdev, i) {
681 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
683 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
687 vf->to_disable = to_disable;
691 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
695 if (!IS_QED_SRIOV(cdev))
698 for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
699 qed_iov_set_vf_to_disable(cdev, i, to_disable);
702 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
703 struct qed_ptt *p_ptt, u8 abs_vfid)
705 qed_wr(p_hwfn, p_ptt,
706 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
707 1 << (abs_vfid & 0x1f));
710 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
711 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
715 /* Set VF masks and configuration - pretend */
716 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
718 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
721 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
723 /* iterate over all queues, clear sb consumer */
724 for (i = 0; i < vf->num_sbs; i++)
725 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
727 vf->opaque_fid, true);
730 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
731 struct qed_ptt *p_ptt,
732 struct qed_vf_info *vf, bool enable)
736 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
738 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
741 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
743 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
745 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
748 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
752 qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
753 struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
758 /* For AH onward, configuration is per-PF. Find maximum of all
759 * the currently enabled child VFs, and set the number to be that.
761 if (!QED_IS_BB(p_hwfn->cdev)) {
762 qed_for_each_vf(p_hwfn, i) {
763 struct qed_vf_info *p_vf;
765 p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
769 current_max = max_t(u8, current_max, p_vf->num_sbs);
773 if (num_sbs > current_max)
774 return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
780 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
781 struct qed_ptt *p_ptt,
782 struct qed_vf_info *vf)
784 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
787 /* It's possible VF was previously considered malicious -
788 * clear the indication even if we're only going to disable VF.
790 vf->b_malicious = false;
797 "Enable internal access for vf %x [abs %x]\n",
798 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
800 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
802 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
804 rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
805 vf->abs_vf_id, vf->num_sbs);
809 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
811 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
812 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
814 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
815 p_hwfn->hw_info.hw_mode);
818 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
826 * qed_iov_config_perm_table() - Configure the permission zone table.
828 * @p_hwfn: HW device data.
829 * @p_ptt: PTT window for writing the registers.
831 * @enable: The actual permision for this VF.
833 * In E4, queue zone permission table size is 320x9. There
834 * are 320 VF queues for single engine device (256 for dual
835 * engine device), and each entry has the following format:
838 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
839 struct qed_ptt *p_ptt,
840 struct qed_vf_info *vf, u8 enable)
846 for (qid = 0; qid < vf->num_rxqs; qid++) {
847 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
850 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
851 val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
852 qed_wr(p_hwfn, p_ptt, reg_addr, val);
856 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
857 struct qed_ptt *p_ptt,
858 struct qed_vf_info *vf)
860 /* Reset vf in IGU - interrupts are still disabled */
861 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
863 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
865 /* Permission Table */
866 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
869 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
870 struct qed_ptt *p_ptt,
871 struct qed_vf_info *vf, u16 num_rx_queues)
873 struct qed_igu_block *p_block;
874 struct cau_sb_entry sb_entry;
878 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
879 num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
880 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
882 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
883 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
884 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
886 for (qid = 0; qid < num_rx_queues; qid++) {
887 p_block = qed_get_igu_free_sb(p_hwfn, false);
888 vf->igu_sbs[qid] = p_block->igu_sb_id;
889 p_block->status &= ~QED_IGU_STATUS_FREE;
890 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
892 qed_wr(p_hwfn, p_ptt,
893 IGU_REG_MAPPING_MEMORY +
894 sizeof(u32) * p_block->igu_sb_id, val);
896 /* Configure igu sb in CAU which were marked valid */
897 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
898 p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
900 qed_dmae_host2grc(p_hwfn, p_ptt,
901 (u64)(uintptr_t)&sb_entry,
902 CAU_REG_SB_VAR_MEMORY +
903 p_block->igu_sb_id * sizeof(u64), 2, NULL);
906 vf->num_sbs = (u8) num_rx_queues;
911 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
912 struct qed_ptt *p_ptt,
913 struct qed_vf_info *vf)
915 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
919 /* Invalidate igu CAM lines and mark them as free */
920 for (idx = 0; idx < vf->num_sbs; idx++) {
921 igu_id = vf->igu_sbs[idx];
922 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
924 val = qed_rd(p_hwfn, p_ptt, addr);
925 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
926 qed_wr(p_hwfn, p_ptt, addr, val);
928 p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
929 p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
935 static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
937 struct qed_mcp_link_params *params,
938 struct qed_mcp_link_state *link,
939 struct qed_mcp_link_capabilities *p_caps)
941 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
944 struct qed_bulletin_content *p_bulletin;
949 p_bulletin = p_vf->bulletin.p_virt;
950 p_bulletin->req_autoneg = params->speed.autoneg;
951 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
952 p_bulletin->req_forced_speed = params->speed.forced_speed;
953 p_bulletin->req_autoneg_pause = params->pause.autoneg;
954 p_bulletin->req_forced_rx = params->pause.forced_rx;
955 p_bulletin->req_forced_tx = params->pause.forced_tx;
956 p_bulletin->req_loopback = params->loopback_mode;
958 p_bulletin->link_up = link->link_up;
959 p_bulletin->speed = link->speed;
960 p_bulletin->full_duplex = link->full_duplex;
961 p_bulletin->autoneg = link->an;
962 p_bulletin->autoneg_complete = link->an_complete;
963 p_bulletin->parallel_detection = link->parallel_detection;
964 p_bulletin->pfc_enabled = link->pfc_enabled;
965 p_bulletin->partner_adv_speed = link->partner_adv_speed;
966 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
967 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
968 p_bulletin->partner_adv_pause = link->partner_adv_pause;
969 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
971 p_bulletin->capability_speed = p_caps->speed_capabilities;
974 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
975 struct qed_ptt *p_ptt,
976 struct qed_iov_vf_init_params *p_params)
978 struct qed_mcp_link_capabilities link_caps;
979 struct qed_mcp_link_params link_params;
980 struct qed_mcp_link_state link_state;
981 u8 num_of_vf_avaiable_chains = 0;
982 struct qed_vf_info *vf = NULL;
988 vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
990 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
995 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
996 p_params->rel_vf_id);
1000 /* Perform sanity checking on the requested queue_id */
1001 for (i = 0; i < p_params->num_queues; i++) {
1002 u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
1003 u16 max_vf_qzone = min_vf_qzone +
1004 FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
1006 qid = p_params->req_rx_queue[i];
1007 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1009 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1011 p_params->rel_vf_id,
1012 min_vf_qzone, max_vf_qzone);
1016 qid = p_params->req_tx_queue[i];
1017 if (qid > max_vf_qzone) {
1019 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1020 qid, p_params->rel_vf_id, max_vf_qzone);
1024 /* If client *really* wants, Tx qid can be shared with PF */
1025 if (qid < min_vf_qzone)
1028 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1029 p_params->rel_vf_id, qid, i);
1032 /* Limit number of queues according to number of CIDs */
1033 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1036 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1037 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1038 num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
1040 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
1043 if (!num_of_vf_avaiable_chains) {
1044 DP_ERR(p_hwfn, "no available igu sbs\n");
1048 /* Choose queue number and index ranges */
1049 vf->num_rxqs = num_of_vf_avaiable_chains;
1050 vf->num_txqs = num_of_vf_avaiable_chains;
1052 for (i = 0; i < vf->num_rxqs; i++) {
1053 struct qed_vf_queue *p_queue = &vf->vf_queues[i];
1055 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1056 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1058 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1059 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1060 vf->relative_vf_id, i, vf->igu_sbs[i],
1061 p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1064 /* Update the link configuration in bulletin */
1065 memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
1066 sizeof(link_params));
1067 memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
1068 memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
1070 qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
1071 &link_params, &link_state, &link_caps);
1073 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1077 if (IS_LEAD_HWFN(p_hwfn))
1078 p_hwfn->cdev->p_iov_info->num_vfs++;
1084 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
1085 struct qed_ptt *p_ptt, u16 rel_vf_id)
1087 struct qed_mcp_link_capabilities caps;
1088 struct qed_mcp_link_params params;
1089 struct qed_mcp_link_state link;
1090 struct qed_vf_info *vf = NULL;
1092 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1094 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
1098 if (vf->bulletin.p_virt)
1099 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
1101 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1103 /* Get the link configuration back in bulletin so
1104 * that when VFs are re-enabled they get the actual
1105 * link configuration.
1107 memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1108 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1109 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1110 qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1112 /* Forget the VF's acquisition message */
1113 memset(&vf->acquire, 0, sizeof(vf->acquire));
1115 /* disablng interrupts and resetting permission table was done during
1116 * vf-close, however, we could get here without going through vf_close
1118 /* Disable Interrupts for VF */
1119 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1121 /* Reset Permission table */
1122 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1126 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1131 if (IS_LEAD_HWFN(p_hwfn))
1132 p_hwfn->cdev->p_iov_info->num_vfs--;
1138 static bool qed_iov_tlv_supported(u16 tlvtype)
1140 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1143 /* place a given tlv on the tlv buffer, continuing current tlv list */
1144 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1146 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1149 tl->length = length;
1151 /* Offset should keep pointing to next TLV (the end of the last) */
1154 /* Return a pointer to the start of the added tlv */
1155 return *offset - length;
1158 /* list the types and lengths of the tlvs on the buffer */
1159 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1161 u16 i = 1, total_length = 0;
1162 struct channel_tlv *tlv;
1165 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1168 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1169 "TLV number %d: type %d, length %d\n",
1170 i, tlv->type, tlv->length);
1172 if (tlv->type == CHANNEL_TLV_LIST_END)
1175 /* Validate entry - protect against malicious VFs */
1177 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1181 total_length += tlv->length;
1183 if (total_length >= sizeof(struct tlv_buffer_size)) {
1184 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1192 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1193 struct qed_ptt *p_ptt,
1194 struct qed_vf_info *p_vf,
1195 u16 length, u8 status)
1197 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1198 struct qed_dmae_params params;
1201 mbx->reply_virt->default_resp.hdr.status = status;
1203 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1205 eng_vf_id = p_vf->abs_vf_id;
1207 memset(¶ms, 0, sizeof(params));
1208 SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
1209 params.dst_vfid = eng_vf_id;
1211 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1212 mbx->req_virt->first_tlv.reply_address +
1214 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1217 /* Once PF copies the rc to the VF, the latter can continue
1218 * and send an additional message. So we have to make sure the
1219 * channel would be re-set to ready prior to that.
1222 GTT_BAR0_MAP_REG_USDM_RAM +
1223 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1225 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1226 mbx->req_virt->first_tlv.reply_address,
1227 sizeof(u64) / 4, ¶ms);
1230 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1231 enum qed_iov_vport_update_flag flag)
1234 case QED_IOV_VP_UPDATE_ACTIVATE:
1235 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1236 case QED_IOV_VP_UPDATE_VLAN_STRIP:
1237 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1238 case QED_IOV_VP_UPDATE_TX_SWITCH:
1239 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1240 case QED_IOV_VP_UPDATE_MCAST:
1241 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1242 case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1243 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1244 case QED_IOV_VP_UPDATE_RSS:
1245 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1246 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1247 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1248 case QED_IOV_VP_UPDATE_SGE_TPA:
1249 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1255 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1256 struct qed_vf_info *p_vf,
1257 struct qed_iov_vf_mbx *p_mbx,
1259 u16 tlvs_mask, u16 tlvs_accepted)
1261 struct pfvf_def_resp_tlv *resp;
1262 u16 size, total_len, i;
1264 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1265 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1266 size = sizeof(struct pfvf_def_resp_tlv);
1269 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1271 /* Prepare response for all extended tlvs if they are found by PF */
1272 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1273 if (!(tlvs_mask & BIT(i)))
1276 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1277 qed_iov_vport_to_tlv(p_hwfn, i), size);
1279 if (tlvs_accepted & BIT(i))
1280 resp->hdr.status = status;
1282 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1286 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1287 p_vf->relative_vf_id,
1288 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1293 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1294 sizeof(struct channel_list_end_tlv));
1299 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1300 struct qed_ptt *p_ptt,
1301 struct qed_vf_info *vf_info,
1302 u16 type, u16 length, u8 status)
1304 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1306 mbx->offset = (u8 *)mbx->reply_virt;
1308 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1309 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1310 sizeof(struct channel_list_end_tlv));
1312 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1316 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1318 bool b_enabled_only)
1320 struct qed_vf_info *vf = NULL;
1322 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1326 return &vf->p_vf_info;
1329 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1331 struct qed_public_vf_info *vf_info;
1333 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1338 /* Clear the VF mac */
1339 eth_zero_addr(vf_info->mac);
1341 vf_info->rx_accept_mode = 0;
1342 vf_info->tx_accept_mode = 0;
1345 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1346 struct qed_vf_info *p_vf)
1350 p_vf->vf_bulletin = 0;
1351 p_vf->vport_instance = 0;
1352 p_vf->configured_features = 0;
1354 /* If VF previously requested less resources, go back to default */
1355 p_vf->num_rxqs = p_vf->num_sbs;
1356 p_vf->num_txqs = p_vf->num_sbs;
1358 p_vf->num_active_rxqs = 0;
1360 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1361 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1363 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1364 if (!p_queue->cids[j].p_cid)
1367 qed_eth_queue_cid_release(p_hwfn,
1368 p_queue->cids[j].p_cid);
1369 p_queue->cids[j].p_cid = NULL;
1373 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1374 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1375 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1378 /* Returns either 0, or log(size) */
1379 static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
1380 struct qed_ptt *p_ptt)
1382 u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
1390 qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
1391 struct qed_ptt *p_ptt,
1392 struct qed_vf_info *p_vf,
1393 struct vf_pf_resc_request *p_req,
1394 struct pf_vf_resc *p_resp)
1396 u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
1397 u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
1398 qed_db_addr_vf(0, DQ_DEMS_LEGACY);
1401 p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
1403 /* If VF didn't bother asking for QIDs than don't bother limiting
1404 * number of CIDs. The VF doesn't care about the number, and this
1405 * has the likely result of causing an additional acquisition.
1407 if (!(p_vf->acquire.vfdev_info.capabilities &
1408 VFPF_ACQUIRE_CAP_QUEUE_QIDS))
1411 /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
1412 * that would make sure doorbells for all CIDs fall within the bar.
1413 * If it doesn't, make sure regview window is sufficient.
1415 if (p_vf->acquire.vfdev_info.capabilities &
1416 VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
1417 bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1419 bar_size = 1 << bar_size;
1421 if (p_hwfn->cdev->num_hwfns > 1)
1424 bar_size = PXP_VF_BAR0_DQ_LENGTH;
1427 if (bar_size / db_size < 256)
1428 p_resp->num_cids = min_t(u8, p_resp->num_cids,
1429 (u8)(bar_size / db_size));
1432 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1433 struct qed_ptt *p_ptt,
1434 struct qed_vf_info *p_vf,
1435 struct vf_pf_resc_request *p_req,
1436 struct pf_vf_resc *p_resp)
1440 /* Queue related information */
1441 p_resp->num_rxqs = p_vf->num_rxqs;
1442 p_resp->num_txqs = p_vf->num_txqs;
1443 p_resp->num_sbs = p_vf->num_sbs;
1445 for (i = 0; i < p_resp->num_sbs; i++) {
1446 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1447 p_resp->hw_sbs[i].sb_qid = 0;
1450 /* These fields are filled for backward compatibility.
1451 * Unused by modern vfs.
1453 for (i = 0; i < p_resp->num_rxqs; i++) {
1454 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1455 (u16 *)&p_resp->hw_qid[i]);
1459 /* Filter related information */
1460 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1461 p_req->num_mac_filters);
1462 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1463 p_req->num_vlan_filters);
1465 qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
1467 /* This isn't really needed/enforced, but some legacy VFs might depend
1468 * on the correct filling of this field.
1470 p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1472 /* Validate sufficient resources for VF */
1473 if (p_resp->num_rxqs < p_req->num_rxqs ||
1474 p_resp->num_txqs < p_req->num_txqs ||
1475 p_resp->num_sbs < p_req->num_sbs ||
1476 p_resp->num_mac_filters < p_req->num_mac_filters ||
1477 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1478 p_resp->num_mc_filters < p_req->num_mc_filters ||
1479 p_resp->num_cids < p_req->num_cids) {
1482 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
1490 p_req->num_mac_filters,
1491 p_resp->num_mac_filters,
1492 p_req->num_vlan_filters,
1493 p_resp->num_vlan_filters,
1494 p_req->num_mc_filters,
1495 p_resp->num_mc_filters,
1496 p_req->num_cids, p_resp->num_cids);
1498 /* Some legacy OSes are incapable of correctly handling this
1501 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1502 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1503 (p_vf->acquire.vfdev_info.os_type ==
1504 VFPF_ACQUIRE_OS_WINDOWS))
1505 return PFVF_STATUS_SUCCESS;
1507 return PFVF_STATUS_NO_RESOURCE;
1510 return PFVF_STATUS_SUCCESS;
1513 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1514 struct pfvf_stats_info *p_stats)
1516 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1517 offsetof(struct mstorm_vf_zone,
1518 non_trigger.eth_queue_stat);
1519 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1520 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1521 offsetof(struct ustorm_vf_zone,
1522 non_trigger.eth_queue_stat);
1523 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1524 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1525 offsetof(struct pstorm_vf_zone,
1526 non_trigger.eth_queue_stat);
1527 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1528 p_stats->tstats.address = 0;
1529 p_stats->tstats.len = 0;
1532 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1533 struct qed_ptt *p_ptt,
1534 struct qed_vf_info *vf)
1536 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1537 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1538 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1539 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1540 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1541 struct pf_vf_resc *resc = &resp->resc;
1544 memset(resp, 0, sizeof(*resp));
1546 /* Write the PF version so that VF would know which version
1547 * is supported - might be later overriden. This guarantees that
1548 * VF could recognize legacy PF based on lack of versions in reply.
1550 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1551 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1553 if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1556 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1557 vf->abs_vf_id, vf->state);
1561 /* Validate FW compatibility */
1562 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1563 if (req->vfdev_info.capabilities &
1564 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1565 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1567 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1568 "VF[%d] is pre-fastpath HSI\n",
1570 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1571 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1574 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
1576 req->vfdev_info.eth_fp_hsi_major,
1577 req->vfdev_info.eth_fp_hsi_minor,
1578 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1584 /* On 100g PFs, prevent old VFs from loading */
1585 if ((p_hwfn->cdev->num_hwfns > 1) &&
1586 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1588 "VF[%d] is running an old driver that doesn't support 100g\n",
1593 /* Store the acquire message */
1594 memcpy(&vf->acquire, req, sizeof(vf->acquire));
1596 vf->opaque_fid = req->vfdev_info.opaque_fid;
1598 vf->vf_bulletin = req->bulletin_addr;
1599 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1600 vf->bulletin.size : req->bulletin_size;
1602 /* fill in pfdev info */
1603 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1604 pfdev_info->db_size = 0;
1605 pfdev_info->indices_per_sb = PIS_PER_SB_E4;
1607 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1608 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1609 if (p_hwfn->cdev->num_hwfns > 1)
1610 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1612 /* Share our ability to use multiple queue-ids only with VFs
1615 if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
1616 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
1618 /* Share the sizes of the bars with VF */
1619 resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
1621 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1623 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1625 pfdev_info->fw_major = FW_MAJOR_VERSION;
1626 pfdev_info->fw_minor = FW_MINOR_VERSION;
1627 pfdev_info->fw_rev = FW_REVISION_VERSION;
1628 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1630 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1633 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
1634 req->vfdev_info.eth_fp_hsi_minor);
1635 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1636 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1638 pfdev_info->dev_type = p_hwfn->cdev->type;
1639 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1641 /* Fill resources available to VF; Make sure there are enough to
1642 * satisfy the VF's request.
1644 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1645 &req->resc_request, resc);
1646 if (vfpf_status != PFVF_STATUS_SUCCESS)
1649 /* Start the VF in FW */
1650 rc = qed_sp_vf_start(p_hwfn, vf);
1652 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1653 vfpf_status = PFVF_STATUS_FAILURE;
1657 /* Fill agreed size of bulletin board in response */
1658 resp->bulletin_size = vf->bulletin.size;
1659 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1663 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1664 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1666 resp->pfdev_info.chip_num,
1667 resp->pfdev_info.db_size,
1668 resp->pfdev_info.indices_per_sb,
1669 resp->pfdev_info.capabilities,
1673 resc->num_mac_filters,
1674 resc->num_vlan_filters);
1675 vf->state = VF_ACQUIRED;
1677 /* Prepare Response */
1679 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1680 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1683 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1684 struct qed_vf_info *p_vf, bool val)
1686 struct qed_sp_vport_update_params params;
1689 if (val == p_vf->spoof_chk) {
1690 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1691 "Spoofchk value[%d] is already configured\n", val);
1695 memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params));
1696 params.opaque_fid = p_vf->opaque_fid;
1697 params.vport_id = p_vf->vport_id;
1698 params.update_anti_spoofing_en_flg = 1;
1699 params.anti_spoofing_en = val;
1701 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
1703 p_vf->spoof_chk = val;
1704 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1705 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1706 "Spoofchk val[%d] configured\n", val);
1708 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1709 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1710 val, p_vf->relative_vf_id);
1716 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1717 struct qed_vf_info *p_vf)
1719 struct qed_filter_ucast filter;
1723 memset(&filter, 0, sizeof(filter));
1724 filter.is_rx_filter = 1;
1725 filter.is_tx_filter = 1;
1726 filter.vport_to_add_to = p_vf->vport_id;
1727 filter.opcode = QED_FILTER_ADD;
1729 /* Reconfigure vlans */
1730 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1731 if (!p_vf->shadow_config.vlans[i].used)
1734 filter.type = QED_FILTER_VLAN;
1735 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1736 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1737 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1738 filter.vlan, p_vf->relative_vf_id);
1739 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1740 &filter, QED_SPQ_MODE_CB, NULL);
1743 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1744 filter.vlan, p_vf->relative_vf_id);
1753 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1754 struct qed_vf_info *p_vf, u64 events)
1758 if ((events & BIT(VLAN_ADDR_FORCED)) &&
1759 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1760 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1765 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1766 struct qed_vf_info *p_vf, u64 events)
1769 struct qed_filter_ucast filter;
1771 if (!p_vf->vport_instance)
1774 if ((events & BIT(MAC_ADDR_FORCED)) ||
1775 p_vf->p_vf_info.is_trusted_configured) {
1776 /* Since there's no way [currently] of removing the MAC,
1777 * we can always assume this means we need to force it.
1779 memset(&filter, 0, sizeof(filter));
1780 filter.type = QED_FILTER_MAC;
1781 filter.opcode = QED_FILTER_REPLACE;
1782 filter.is_rx_filter = 1;
1783 filter.is_tx_filter = 1;
1784 filter.vport_to_add_to = p_vf->vport_id;
1785 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1787 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1788 &filter, QED_SPQ_MODE_CB, NULL);
1791 "PF failed to configure MAC for VF\n");
1794 if (p_vf->p_vf_info.is_trusted_configured)
1795 p_vf->configured_features |=
1796 BIT(VFPF_BULLETIN_MAC_ADDR);
1798 p_vf->configured_features |=
1799 BIT(MAC_ADDR_FORCED);
1802 if (events & BIT(VLAN_ADDR_FORCED)) {
1803 struct qed_sp_vport_update_params vport_update;
1807 memset(&filter, 0, sizeof(filter));
1808 filter.type = QED_FILTER_VLAN;
1809 filter.is_rx_filter = 1;
1810 filter.is_tx_filter = 1;
1811 filter.vport_to_add_to = p_vf->vport_id;
1812 filter.vlan = p_vf->bulletin.p_virt->pvid;
1813 filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1816 /* Send the ramrod */
1817 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1818 &filter, QED_SPQ_MODE_CB, NULL);
1821 "PF failed to configure VLAN for VF\n");
1825 /* Update the default-vlan & silent vlan stripping */
1826 memset(&vport_update, 0, sizeof(vport_update));
1827 vport_update.opaque_fid = p_vf->opaque_fid;
1828 vport_update.vport_id = p_vf->vport_id;
1829 vport_update.update_default_vlan_enable_flg = 1;
1830 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1831 vport_update.update_default_vlan_flg = 1;
1832 vport_update.default_vlan = filter.vlan;
1834 vport_update.update_inner_vlan_removal_flg = 1;
1835 removal = filter.vlan ? 1
1836 : p_vf->shadow_config.inner_vlan_removal;
1837 vport_update.inner_vlan_removal_flg = removal;
1838 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1839 rc = qed_sp_vport_update(p_hwfn,
1841 QED_SPQ_MODE_EBLOCK, NULL);
1844 "PF failed to configure VF vport for vlan\n");
1848 /* Update all the Rx queues */
1849 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1850 struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
1851 struct qed_queue_cid *p_cid = NULL;
1853 /* There can be at most 1 Rx queue on qzone. Find it */
1854 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
1858 rc = qed_sp_eth_rx_queues_update(p_hwfn,
1861 QED_SPQ_MODE_EBLOCK,
1865 "Failed to send Rx update fo queue[0x%04x]\n",
1866 p_cid->rel.queue_id);
1872 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1874 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
1877 /* If forced features are terminated, we need to configure the shadow
1878 * configuration back again.
1881 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1886 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1887 struct qed_ptt *p_ptt,
1888 struct qed_vf_info *vf)
1890 struct qed_sp_vport_start_params params = { 0 };
1891 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1892 struct vfpf_vport_start_tlv *start;
1893 u8 status = PFVF_STATUS_SUCCESS;
1894 struct qed_vf_info *vf_info;
1899 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1901 DP_NOTICE(p_hwfn->cdev,
1902 "Failed to get VF info, invalid vfid [%d]\n",
1903 vf->relative_vf_id);
1907 vf->state = VF_ENABLED;
1908 start = &mbx->req_virt->start_vport;
1910 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1912 /* Initialize Status block in CAU */
1913 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1914 if (!start->sb_addr[sb_id]) {
1915 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1916 "VF[%d] did not fill the address of SB %d\n",
1917 vf->relative_vf_id, sb_id);
1921 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1922 start->sb_addr[sb_id],
1923 vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1926 vf->mtu = start->mtu;
1927 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1929 /* Take into consideration configuration forced by hypervisor;
1930 * If none is configured, use the supplied VF values [for old
1931 * vfs that would still be fine, since they passed '0' as padding].
1933 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1934 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1935 u8 vf_req = start->only_untagged;
1937 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1938 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1941 params.tpa_mode = start->tpa_mode;
1942 params.remove_inner_vlan = start->inner_vlan_removal;
1943 params.tx_switching = true;
1945 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1946 params.drop_ttl0 = false;
1947 params.concrete_fid = vf->concrete_fid;
1948 params.opaque_fid = vf->opaque_fid;
1949 params.vport_id = vf->vport_id;
1950 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1951 params.mtu = vf->mtu;
1953 /* Non trusted VFs should enable control frame filtering */
1954 params.check_mac = !vf->p_vf_info.is_trusted_configured;
1956 rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
1959 "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1960 status = PFVF_STATUS_FAILURE;
1962 vf->vport_instance++;
1964 /* Force configuration if needed on the newly opened vport */
1965 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1967 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1969 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1970 sizeof(struct pfvf_def_resp_tlv), status);
1973 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1974 struct qed_ptt *p_ptt,
1975 struct qed_vf_info *vf)
1977 u8 status = PFVF_STATUS_SUCCESS;
1980 vf->vport_instance--;
1981 vf->spoof_chk = false;
1983 if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
1984 (qed_iov_validate_active_txq(p_hwfn, vf))) {
1985 vf->b_malicious = true;
1987 "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
1989 status = PFVF_STATUS_MALICIOUS;
1993 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1995 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1997 status = PFVF_STATUS_FAILURE;
2000 /* Forget the configuration on the vport */
2001 vf->configured_features = 0;
2002 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2005 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2006 sizeof(struct pfvf_def_resp_tlv), status);
2009 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
2010 struct qed_ptt *p_ptt,
2011 struct qed_vf_info *vf,
2012 u8 status, bool b_legacy)
2014 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2015 struct pfvf_start_queue_resp_tlv *p_tlv;
2016 struct vfpf_start_rxq_tlv *req;
2019 mbx->offset = (u8 *)mbx->reply_virt;
2021 /* Taking a bigger struct instead of adding a TLV to list was a
2022 * mistake, but one which we're now stuck with, as some older
2023 * clients assume the size of the previous response.
2026 length = sizeof(*p_tlv);
2028 length = sizeof(struct pfvf_def_resp_tlv);
2030 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2032 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2033 sizeof(struct channel_list_end_tlv));
2035 /* Update the TLV with the response */
2036 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2037 req = &mbx->req_virt->start_rxq;
2038 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2039 offsetof(struct mstorm_vf_zone,
2040 non_trigger.eth_rx_queue_producers) +
2041 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2044 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2047 static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
2048 struct qed_vf_info *p_vf, bool b_is_tx)
2050 struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
2051 struct vfpf_qid_tlv *p_qid_tlv;
2053 /* Search for the qid if the VF published its going to provide it */
2054 if (!(p_vf->acquire.vfdev_info.capabilities &
2055 VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
2057 return QED_IOV_LEGACY_QID_TX;
2059 return QED_IOV_LEGACY_QID_RX;
2062 p_qid_tlv = (struct vfpf_qid_tlv *)
2063 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2066 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2067 "VF[%2x]: Failed to provide qid\n",
2068 p_vf->relative_vf_id);
2070 return QED_IOV_QID_INVALID;
2073 if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
2074 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2075 "VF[%02x]: Provided qid out-of-bounds %02x\n",
2076 p_vf->relative_vf_id, p_qid_tlv->qid);
2077 return QED_IOV_QID_INVALID;
2080 return p_qid_tlv->qid;
2083 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
2084 struct qed_ptt *p_ptt,
2085 struct qed_vf_info *vf)
2087 struct qed_queue_start_common_params params;
2088 struct qed_queue_cid_vf_params vf_params;
2089 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2090 u8 status = PFVF_STATUS_NO_RESOURCE;
2091 u8 qid_usage_idx, vf_legacy = 0;
2092 struct vfpf_start_rxq_tlv *req;
2093 struct qed_vf_queue *p_queue;
2094 struct qed_queue_cid *p_cid;
2095 struct qed_sb_info sb_dummy;
2098 req = &mbx->req_virt->start_rxq;
2100 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2101 QED_IOV_VALIDATE_Q_DISABLE) ||
2102 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2105 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2106 if (qid_usage_idx == QED_IOV_QID_INVALID)
2109 p_queue = &vf->vf_queues[req->rx_qid];
2110 if (p_queue->cids[qid_usage_idx].p_cid)
2113 vf_legacy = qed_vf_calculate_legacy(vf);
2115 /* Acquire a new queue-cid */
2116 memset(¶ms, 0, sizeof(params));
2117 params.queue_id = p_queue->fw_rx_qid;
2118 params.vport_id = vf->vport_id;
2119 params.stats_id = vf->abs_vf_id + 0x10;
2120 /* Since IGU index is passed via sb_info, construct a dummy one */
2121 memset(&sb_dummy, 0, sizeof(sb_dummy));
2122 sb_dummy.igu_sb_id = req->hw_sb;
2123 params.p_sb = &sb_dummy;
2124 params.sb_idx = req->sb_index;
2126 memset(&vf_params, 0, sizeof(vf_params));
2127 vf_params.vfid = vf->relative_vf_id;
2128 vf_params.vf_qid = (u8)req->rx_qid;
2129 vf_params.vf_legacy = vf_legacy;
2130 vf_params.qid_usage_idx = qid_usage_idx;
2131 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2132 ¶ms, true, &vf_params);
2136 /* Legacy VFs have their Producers in a different location, which they
2137 * calculate on their own and clean the producer prior to this.
2139 if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
2141 GTT_BAR0_MAP_REG_MSDM_RAM +
2142 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2145 rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
2148 req->cqe_pbl_addr, req->cqe_pbl_size);
2150 status = PFVF_STATUS_FAILURE;
2151 qed_eth_queue_cid_release(p_hwfn, p_cid);
2153 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2154 p_queue->cids[qid_usage_idx].b_is_tx = false;
2155 status = PFVF_STATUS_SUCCESS;
2156 vf->num_active_rxqs++;
2160 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2162 QED_QCID_LEGACY_VF_RX_PROD));
2166 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2167 struct qed_tunnel_info *p_tun,
2168 u16 tunn_feature_mask)
2170 p_resp->tunn_feature_mask = tunn_feature_mask;
2171 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2172 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2173 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2174 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2175 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2176 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2177 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2178 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2179 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2180 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2181 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2182 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2186 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2187 struct qed_tunn_update_type *p_tun,
2188 enum qed_tunn_mode mask, u8 tun_cls)
2190 if (p_req->tun_mode_update_mask & BIT(mask)) {
2191 p_tun->b_update_mode = true;
2193 if (p_req->tunn_mode & BIT(mask))
2194 p_tun->b_mode_enabled = true;
2197 p_tun->tun_cls = tun_cls;
2201 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2202 struct qed_tunn_update_type *p_tun,
2203 struct qed_tunn_update_udp_port *p_port,
2204 enum qed_tunn_mode mask,
2205 u8 tun_cls, u8 update_port, u16 port)
2208 p_port->b_update_port = true;
2209 p_port->port = port;
2212 __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2216 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2218 bool b_update_requested = false;
2220 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2221 p_req->update_geneve_port || p_req->update_vxlan_port)
2222 b_update_requested = true;
2224 return b_update_requested;
2227 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
2229 if (tun->b_update_mode && !tun->b_mode_enabled) {
2230 tun->b_update_mode = false;
2236 qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
2237 u16 *tun_features, bool *update,
2238 struct qed_tunnel_info *tun_src)
2240 struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
2241 struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
2242 u16 bultn_vxlan_port, bultn_geneve_port;
2243 void *cookie = p_hwfn->cdev->ops_cookie;
2246 *tun_features = p_hwfn->cdev->tunn_feature_mask;
2247 bultn_vxlan_port = tun->vxlan_port.port;
2248 bultn_geneve_port = tun->geneve_port.port;
2249 qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
2250 qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
2251 qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
2252 qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
2253 qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
2255 if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
2256 (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2257 tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2258 tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2259 tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2260 tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
2261 tun_src->b_update_rx_cls = false;
2262 tun_src->b_update_tx_cls = false;
2266 if (tun_src->vxlan_port.b_update_port) {
2267 if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
2268 tun_src->vxlan_port.b_update_port = false;
2271 bultn_vxlan_port = tun_src->vxlan_port.port;
2275 if (tun_src->geneve_port.b_update_port) {
2276 if (tun_src->geneve_port.port == tun->geneve_port.port) {
2277 tun_src->geneve_port.b_update_port = false;
2280 bultn_geneve_port = tun_src->geneve_port.port;
2284 qed_for_each_vf(p_hwfn, i) {
2285 qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
2289 qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2290 ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
2295 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
2296 struct qed_ptt *p_ptt,
2297 struct qed_vf_info *p_vf)
2299 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
2300 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2301 struct pfvf_update_tunn_param_tlv *p_resp;
2302 struct vfpf_update_tunn_param_tlv *p_req;
2303 u8 status = PFVF_STATUS_SUCCESS;
2304 bool b_update_required = false;
2305 struct qed_tunnel_info tunn;
2306 u16 tunn_feature_mask = 0;
2309 mbx->offset = (u8 *)mbx->reply_virt;
2311 memset(&tunn, 0, sizeof(tunn));
2312 p_req = &mbx->req_virt->tunn_param_update;
2314 if (!qed_iov_pf_validate_tunn_param(p_req)) {
2315 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2316 "No tunnel update requested by VF\n");
2317 status = PFVF_STATUS_FAILURE;
2321 tunn.b_update_rx_cls = p_req->update_tun_cls;
2322 tunn.b_update_tx_cls = p_req->update_tun_cls;
2324 qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2325 QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2326 p_req->update_vxlan_port,
2328 qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2329 QED_MODE_L2GENEVE_TUNN,
2330 p_req->l2geneve_clss,
2331 p_req->update_geneve_port,
2332 p_req->geneve_port);
2333 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2334 QED_MODE_IPGENEVE_TUNN,
2335 p_req->ipgeneve_clss);
2336 __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2337 QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
2338 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2339 QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
2341 /* If PF modifies VF's req then it should
2342 * still return an error in case of partial configuration
2343 * or modified configuration as opposed to requested one.
2345 rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
2346 &b_update_required, &tunn);
2349 status = PFVF_STATUS_FAILURE;
2351 /* If QED client is willing to update anything ? */
2352 if (b_update_required) {
2355 rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
2356 QED_SPQ_MODE_EBLOCK, NULL);
2358 status = PFVF_STATUS_FAILURE;
2360 geneve_port = p_tun->geneve_port.port;
2361 qed_for_each_vf(p_hwfn, i) {
2362 qed_iov_bulletin_set_udp_ports(p_hwfn, i,
2363 p_tun->vxlan_port.port,
2369 p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
2370 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2372 qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2373 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2374 sizeof(struct channel_list_end_tlv));
2376 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2379 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
2380 struct qed_ptt *p_ptt,
2381 struct qed_vf_info *p_vf,
2384 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2385 struct pfvf_start_queue_resp_tlv *p_tlv;
2386 bool b_legacy = false;
2389 mbx->offset = (u8 *)mbx->reply_virt;
2391 /* Taking a bigger struct instead of adding a TLV to list was a
2392 * mistake, but one which we're now stuck with, as some older
2393 * clients assume the size of the previous response.
2395 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2396 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2400 length = sizeof(*p_tlv);
2402 length = sizeof(struct pfvf_def_resp_tlv);
2404 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2406 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2407 sizeof(struct channel_list_end_tlv));
2409 /* Update the TLV with the response */
2410 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2411 p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
2413 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2416 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
2417 struct qed_ptt *p_ptt,
2418 struct qed_vf_info *vf)
2420 struct qed_queue_start_common_params params;
2421 struct qed_queue_cid_vf_params vf_params;
2422 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2423 u8 status = PFVF_STATUS_NO_RESOURCE;
2424 struct vfpf_start_txq_tlv *req;
2425 struct qed_vf_queue *p_queue;
2426 struct qed_queue_cid *p_cid;
2427 struct qed_sb_info sb_dummy;
2428 u8 qid_usage_idx, vf_legacy;
2433 memset(¶ms, 0, sizeof(params));
2434 req = &mbx->req_virt->start_txq;
2436 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2437 QED_IOV_VALIDATE_Q_NA) ||
2438 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2441 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2442 if (qid_usage_idx == QED_IOV_QID_INVALID)
2445 p_queue = &vf->vf_queues[req->tx_qid];
2446 if (p_queue->cids[qid_usage_idx].p_cid)
2449 vf_legacy = qed_vf_calculate_legacy(vf);
2451 /* Acquire a new queue-cid */
2452 params.queue_id = p_queue->fw_tx_qid;
2453 params.vport_id = vf->vport_id;
2454 params.stats_id = vf->abs_vf_id + 0x10;
2456 /* Since IGU index is passed via sb_info, construct a dummy one */
2457 memset(&sb_dummy, 0, sizeof(sb_dummy));
2458 sb_dummy.igu_sb_id = req->hw_sb;
2459 params.p_sb = &sb_dummy;
2460 params.sb_idx = req->sb_index;
2462 memset(&vf_params, 0, sizeof(vf_params));
2463 vf_params.vfid = vf->relative_vf_id;
2464 vf_params.vf_qid = (u8)req->tx_qid;
2465 vf_params.vf_legacy = vf_legacy;
2466 vf_params.qid_usage_idx = qid_usage_idx;
2468 p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2469 ¶ms, false, &vf_params);
2473 pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
2474 rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
2475 req->pbl_addr, req->pbl_size, pq);
2477 status = PFVF_STATUS_FAILURE;
2478 qed_eth_queue_cid_release(p_hwfn, p_cid);
2480 status = PFVF_STATUS_SUCCESS;
2481 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2482 p_queue->cids[qid_usage_idx].b_is_tx = true;
2487 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
2490 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2491 struct qed_vf_info *vf,
2493 u8 qid_usage_idx, bool cqe_completion)
2495 struct qed_vf_queue *p_queue;
2498 if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
2501 "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
2502 vf->relative_vf_id, rxq_id, qid_usage_idx);
2506 p_queue = &vf->vf_queues[rxq_id];
2508 /* We've validated the index and the existence of the active RXQ -
2509 * now we need to make sure that it's using the correct qid.
2511 if (!p_queue->cids[qid_usage_idx].p_cid ||
2512 p_queue->cids[qid_usage_idx].b_is_tx) {
2513 struct qed_queue_cid *p_cid;
2515 p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
2518 "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
2520 rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
2524 /* Now that we know we have a valid Rx-queue - close it */
2525 rc = qed_eth_rx_queue_stop(p_hwfn,
2526 p_queue->cids[qid_usage_idx].p_cid,
2527 false, cqe_completion);
2531 p_queue->cids[qid_usage_idx].p_cid = NULL;
2532 vf->num_active_rxqs--;
2537 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2538 struct qed_vf_info *vf,
2539 u16 txq_id, u8 qid_usage_idx)
2541 struct qed_vf_queue *p_queue;
2544 if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
2547 p_queue = &vf->vf_queues[txq_id];
2548 if (!p_queue->cids[qid_usage_idx].p_cid ||
2549 !p_queue->cids[qid_usage_idx].b_is_tx)
2552 rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
2556 p_queue->cids[qid_usage_idx].p_cid = NULL;
2560 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2561 struct qed_ptt *p_ptt,
2562 struct qed_vf_info *vf)
2564 u16 length = sizeof(struct pfvf_def_resp_tlv);
2565 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2566 u8 status = PFVF_STATUS_FAILURE;
2567 struct vfpf_stop_rxqs_tlv *req;
2571 /* There has never been an official driver that used this interface
2572 * for stopping multiple queues, and it is now considered deprecated.
2573 * Validate this isn't used here.
2575 req = &mbx->req_virt->stop_rxqs;
2576 if (req->num_rxqs != 1) {
2577 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2578 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2579 vf->relative_vf_id);
2580 status = PFVF_STATUS_NOT_SUPPORTED;
2584 /* Find which qid-index is associated with the queue */
2585 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2586 if (qid_usage_idx == QED_IOV_QID_INVALID)
2589 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2590 qid_usage_idx, req->cqe_completion);
2592 status = PFVF_STATUS_SUCCESS;
2594 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2598 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2599 struct qed_ptt *p_ptt,
2600 struct qed_vf_info *vf)
2602 u16 length = sizeof(struct pfvf_def_resp_tlv);
2603 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2604 u8 status = PFVF_STATUS_FAILURE;
2605 struct vfpf_stop_txqs_tlv *req;
2609 /* There has never been an official driver that used this interface
2610 * for stopping multiple queues, and it is now considered deprecated.
2611 * Validate this isn't used here.
2613 req = &mbx->req_virt->stop_txqs;
2614 if (req->num_txqs != 1) {
2615 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2616 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2617 vf->relative_vf_id);
2618 status = PFVF_STATUS_NOT_SUPPORTED;
2622 /* Find which qid-index is associated with the queue */
2623 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
2624 if (qid_usage_idx == QED_IOV_QID_INVALID)
2627 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
2629 status = PFVF_STATUS_SUCCESS;
2632 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2636 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2637 struct qed_ptt *p_ptt,
2638 struct qed_vf_info *vf)
2640 struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
2641 u16 length = sizeof(struct pfvf_def_resp_tlv);
2642 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2643 struct vfpf_update_rxq_tlv *req;
2644 u8 status = PFVF_STATUS_FAILURE;
2645 u8 complete_event_flg;
2646 u8 complete_cqe_flg;
2651 req = &mbx->req_virt->update_rxq;
2652 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2653 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2655 qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
2656 if (qid_usage_idx == QED_IOV_QID_INVALID)
2659 /* There shouldn't exist a VF that uses queue-qids yet uses this
2660 * API with multiple Rx queues. Validate this.
2662 if ((vf->acquire.vfdev_info.capabilities &
2663 VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
2664 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2665 "VF[%d] supports QIDs but sends multiple queues\n",
2666 vf->relative_vf_id);
2670 /* Validate inputs - for the legacy case this is still true since
2671 * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
2673 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2674 if (!qed_iov_validate_rxq(p_hwfn, vf, i,
2675 QED_IOV_VALIDATE_Q_NA) ||
2676 !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
2677 vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
2678 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2679 "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2680 vf->relative_vf_id, req->rx_qid,
2686 /* Prepare the handlers */
2687 for (i = 0; i < req->num_rxqs; i++) {
2688 u16 qid = req->rx_qid + i;
2690 handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
2693 rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2697 QED_SPQ_MODE_EBLOCK, NULL);
2701 status = PFVF_STATUS_SUCCESS;
2703 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2707 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2708 void *p_tlvs_list, u16 req_type)
2710 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2714 if (!p_tlv->length) {
2715 DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2719 if (p_tlv->type == req_type) {
2720 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2721 "Extended tlv type %d, length %d found\n",
2722 p_tlv->type, p_tlv->length);
2726 len += p_tlv->length;
2727 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2729 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2730 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2733 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2739 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2740 struct qed_sp_vport_update_params *p_data,
2741 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2743 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2744 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2746 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2747 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2751 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2752 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2753 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2754 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2755 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2759 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2760 struct qed_sp_vport_update_params *p_data,
2761 struct qed_vf_info *p_vf,
2762 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2764 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2765 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2767 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2768 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2772 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2774 /* Ignore the VF request if we're forcing a vlan */
2775 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
2776 p_data->update_inner_vlan_removal_flg = 1;
2777 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2780 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2784 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2785 struct qed_sp_vport_update_params *p_data,
2786 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2788 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2789 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2791 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2792 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2794 if (!p_tx_switch_tlv)
2797 p_data->update_tx_switching_flg = 1;
2798 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2799 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2803 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2804 struct qed_sp_vport_update_params *p_data,
2805 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2807 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2808 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2810 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2811 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2815 p_data->update_approx_mcast_flg = 1;
2816 memcpy(p_data->bins, p_mcast_tlv->bins,
2817 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2818 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2822 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2823 struct qed_sp_vport_update_params *p_data,
2824 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2826 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2827 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2828 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2830 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2831 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2835 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2836 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2837 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2838 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2839 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2843 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2844 struct qed_sp_vport_update_params *p_data,
2845 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2847 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2848 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2850 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2851 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2853 if (!p_accept_any_vlan)
2856 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2857 p_data->update_accept_any_vlan_flg =
2858 p_accept_any_vlan->update_accept_any_vlan_flg;
2859 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2863 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2864 struct qed_vf_info *vf,
2865 struct qed_sp_vport_update_params *p_data,
2866 struct qed_rss_params *p_rss,
2867 struct qed_iov_vf_mbx *p_mbx,
2868 u16 *tlvs_mask, u16 *tlvs_accepted)
2870 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2871 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2872 bool b_reject = false;
2876 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2877 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2879 p_data->rss_params = NULL;
2883 memset(p_rss, 0, sizeof(struct qed_rss_params));
2885 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2886 VFPF_UPDATE_RSS_CONFIG_FLAG);
2887 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2888 VFPF_UPDATE_RSS_CAPS_FLAG);
2889 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2890 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2891 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2892 VFPF_UPDATE_RSS_KEY_FLAG);
2894 p_rss->rss_enable = p_rss_tlv->rss_enable;
2895 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2896 p_rss->rss_caps = p_rss_tlv->rss_caps;
2897 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2898 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2900 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2901 (1 << p_rss_tlv->rss_table_size_log));
2903 for (i = 0; i < table_size; i++) {
2904 struct qed_queue_cid *p_cid;
2906 q_idx = p_rss_tlv->rss_ind_table[i];
2907 if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
2908 QED_IOV_VALIDATE_Q_ENABLE)) {
2911 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2912 vf->relative_vf_id, q_idx);
2917 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
2918 p_rss->rss_ind_table[i] = p_cid;
2921 p_data->rss_params = p_rss;
2923 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2925 *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
2929 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2930 struct qed_vf_info *vf,
2931 struct qed_sp_vport_update_params *p_data,
2932 struct qed_sge_tpa_params *p_sge_tpa,
2933 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2935 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2936 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2938 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2939 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2941 if (!p_sge_tpa_tlv) {
2942 p_data->sge_tpa_params = NULL;
2946 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2948 p_sge_tpa->update_tpa_en_flg =
2949 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2950 p_sge_tpa->update_tpa_param_flg =
2951 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2952 VFPF_UPDATE_TPA_PARAM_FLAG);
2954 p_sge_tpa->tpa_ipv4_en_flg =
2955 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2956 p_sge_tpa->tpa_ipv6_en_flg =
2957 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2958 p_sge_tpa->tpa_pkt_split_flg =
2959 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2960 p_sge_tpa->tpa_hdr_data_split_flg =
2961 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2962 p_sge_tpa->tpa_gro_consistent_flg =
2963 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2965 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2966 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2967 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2968 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2969 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2971 p_data->sge_tpa_params = p_sge_tpa;
2973 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2976 static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
2978 struct qed_sp_vport_update_params *params,
2981 u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
2982 struct qed_filter_accept_flags *flags = ¶ms->accept_flags;
2983 struct qed_public_vf_info *vf_info;
2985 /* Untrusted VFs can't even be trusted to know that fact.
2986 * Simply indicate everything is configured fine, and trace
2987 * configuration 'behind their back'.
2989 if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
2992 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
2994 if (flags->update_rx_mode_config) {
2995 vf_info->rx_accept_mode = flags->rx_accept_filter;
2996 if (!vf_info->is_trusted_configured)
2997 flags->rx_accept_filter &= ~mask;
3000 if (flags->update_tx_mode_config) {
3001 vf_info->tx_accept_mode = flags->tx_accept_filter;
3002 if (!vf_info->is_trusted_configured)
3003 flags->tx_accept_filter &= ~mask;
3009 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
3010 struct qed_ptt *p_ptt,
3011 struct qed_vf_info *vf)
3013 struct qed_rss_params *p_rss_params = NULL;
3014 struct qed_sp_vport_update_params params;
3015 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3016 struct qed_sge_tpa_params sge_tpa_params;
3017 u16 tlvs_mask = 0, tlvs_accepted = 0;
3018 u8 status = PFVF_STATUS_SUCCESS;
3022 /* Valiate PF can send such a request */
3023 if (!vf->vport_instance) {
3026 "No VPORT instance available for VF[%d], failing vport update\n",
3028 status = PFVF_STATUS_FAILURE;
3031 p_rss_params = vzalloc(sizeof(*p_rss_params));
3032 if (p_rss_params == NULL) {
3033 status = PFVF_STATUS_FAILURE;
3037 memset(¶ms, 0, sizeof(params));
3038 params.opaque_fid = vf->opaque_fid;
3039 params.vport_id = vf->vport_id;
3040 params.rss_params = NULL;
3042 /* Search for extended tlvs list and update values
3043 * from VF in struct qed_sp_vport_update_params.
3045 qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3046 qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
3047 qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
3048 qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
3049 qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
3050 qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
3051 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
3052 &sge_tpa_params, mbx, &tlvs_mask);
3054 tlvs_accepted = tlvs_mask;
3056 /* Some of the extended TLVs need to be validated first; In that case,
3057 * they can update the mask without updating the accepted [so that
3058 * PF could communicate to VF it has rejected request].
3060 qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
3061 mbx, &tlvs_mask, &tlvs_accepted);
3063 if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
3064 ¶ms, &tlvs_accepted)) {
3066 status = PFVF_STATUS_NOT_SUPPORTED;
3070 if (!tlvs_accepted) {
3072 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3073 "Upper-layer prevents VF vport configuration\n");
3075 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3076 "No feature tlvs found for vport update\n");
3077 status = PFVF_STATUS_NOT_SUPPORTED;
3081 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
3084 status = PFVF_STATUS_FAILURE;
3087 vfree(p_rss_params);
3088 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3089 tlvs_mask, tlvs_accepted);
3090 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3093 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
3094 struct qed_vf_info *p_vf,
3095 struct qed_filter_ucast *p_params)
3099 /* First remove entries and then add new ones */
3100 if (p_params->opcode == QED_FILTER_REMOVE) {
3101 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3102 if (p_vf->shadow_config.vlans[i].used &&
3103 p_vf->shadow_config.vlans[i].vid ==
3105 p_vf->shadow_config.vlans[i].used = false;
3108 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3111 "VF [%d] - Tries to remove a non-existing vlan\n",
3112 p_vf->relative_vf_id);
3115 } else if (p_params->opcode == QED_FILTER_REPLACE ||
3116 p_params->opcode == QED_FILTER_FLUSH) {
3117 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3118 p_vf->shadow_config.vlans[i].used = false;
3121 /* In forced mode, we're willing to remove entries - but we don't add
3124 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
3127 if (p_params->opcode == QED_FILTER_ADD ||
3128 p_params->opcode == QED_FILTER_REPLACE) {
3129 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3130 if (p_vf->shadow_config.vlans[i].used)
3133 p_vf->shadow_config.vlans[i].used = true;
3134 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3138 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
3141 "VF [%d] - Tries to configure more than %d vlan filters\n",
3142 p_vf->relative_vf_id,
3143 QED_ETH_VF_NUM_VLAN_FILTERS + 1);
3151 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
3152 struct qed_vf_info *p_vf,
3153 struct qed_filter_ucast *p_params)
3157 /* If we're in forced-mode, we don't allow any change */
3158 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
3161 /* Don't keep track of shadow copy since we don't intend to restore. */
3162 if (p_vf->p_vf_info.is_trusted_configured)
3165 /* First remove entries and then add new ones */
3166 if (p_params->opcode == QED_FILTER_REMOVE) {
3167 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3168 if (ether_addr_equal(p_vf->shadow_config.macs[i],
3170 eth_zero_addr(p_vf->shadow_config.macs[i]);
3175 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3176 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3177 "MAC isn't configured\n");
3180 } else if (p_params->opcode == QED_FILTER_REPLACE ||
3181 p_params->opcode == QED_FILTER_FLUSH) {
3182 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
3183 eth_zero_addr(p_vf->shadow_config.macs[i]);
3186 /* List the new MAC address */
3187 if (p_params->opcode != QED_FILTER_ADD &&
3188 p_params->opcode != QED_FILTER_REPLACE)
3191 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
3192 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
3193 ether_addr_copy(p_vf->shadow_config.macs[i],
3195 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3196 "Added MAC at %d entry in shadow\n", i);
3201 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
3202 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
3210 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
3211 struct qed_vf_info *p_vf,
3212 struct qed_filter_ucast *p_params)
3216 if (p_params->type == QED_FILTER_MAC) {
3217 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3222 if (p_params->type == QED_FILTER_VLAN)
3223 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3228 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
3229 int vfid, struct qed_filter_ucast *params)
3231 struct qed_public_vf_info *vf;
3233 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3237 /* No real decision to make; Store the configured MAC */
3238 if (params->type == QED_FILTER_MAC ||
3239 params->type == QED_FILTER_MAC_VLAN) {
3240 ether_addr_copy(vf->mac, params->mac);
3242 if (vf->is_trusted_configured) {
3243 qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
3245 /* Update and post bulleitin again */
3246 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
3253 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
3254 struct qed_ptt *p_ptt,
3255 struct qed_vf_info *vf)
3257 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3258 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3259 struct vfpf_ucast_filter_tlv *req;
3260 u8 status = PFVF_STATUS_SUCCESS;
3261 struct qed_filter_ucast params;
3264 /* Prepare the unicast filter params */
3265 memset(¶ms, 0, sizeof(struct qed_filter_ucast));
3266 req = &mbx->req_virt->ucast_filter;
3267 params.opcode = (enum qed_filter_opcode)req->opcode;
3268 params.type = (enum qed_filter_ucast_type)req->type;
3270 params.is_rx_filter = 1;
3271 params.is_tx_filter = 1;
3272 params.vport_to_remove_from = vf->vport_id;
3273 params.vport_to_add_to = vf->vport_id;
3274 memcpy(params.mac, req->mac, ETH_ALEN);
3275 params.vlan = req->vlan;
3279 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3280 vf->abs_vf_id, params.opcode, params.type,
3281 params.is_rx_filter ? "RX" : "",
3282 params.is_tx_filter ? "TX" : "",
3283 params.vport_to_add_to,
3284 params.mac[0], params.mac[1],
3285 params.mac[2], params.mac[3],
3286 params.mac[4], params.mac[5], params.vlan);
3288 if (!vf->vport_instance) {
3291 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3293 status = PFVF_STATUS_FAILURE;
3297 /* Update shadow copy of the VF configuration */
3298 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) {
3299 status = PFVF_STATUS_FAILURE;
3303 /* Determine if the unicast filtering is acceptible by PF */
3304 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
3305 (params.type == QED_FILTER_VLAN ||
3306 params.type == QED_FILTER_MAC_VLAN)) {
3307 /* Once VLAN is forced or PVID is set, do not allow
3308 * to add/replace any further VLANs.
3310 if (params.opcode == QED_FILTER_ADD ||
3311 params.opcode == QED_FILTER_REPLACE)
3312 status = PFVF_STATUS_FORCED;
3316 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
3317 (params.type == QED_FILTER_MAC ||
3318 params.type == QED_FILTER_MAC_VLAN)) {
3319 if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
3320 (params.opcode != QED_FILTER_ADD &&
3321 params.opcode != QED_FILTER_REPLACE))
3322 status = PFVF_STATUS_FORCED;
3326 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms);
3328 status = PFVF_STATUS_FAILURE;
3332 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3333 QED_SPQ_MODE_CB, NULL);
3335 status = PFVF_STATUS_FAILURE;
3338 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3339 sizeof(struct pfvf_def_resp_tlv), status);
3342 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
3343 struct qed_ptt *p_ptt,
3344 struct qed_vf_info *vf)
3349 for (i = 0; i < vf->num_sbs; i++)
3350 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3352 vf->opaque_fid, false);
3354 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3355 sizeof(struct pfvf_def_resp_tlv),
3356 PFVF_STATUS_SUCCESS);
3359 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
3360 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
3362 u16 length = sizeof(struct pfvf_def_resp_tlv);
3363 u8 status = PFVF_STATUS_SUCCESS;
3365 /* Disable Interrupts for VF */
3366 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3368 /* Reset Permission table */
3369 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3371 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3375 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
3376 struct qed_ptt *p_ptt,
3377 struct qed_vf_info *p_vf)
3379 u16 length = sizeof(struct pfvf_def_resp_tlv);
3380 u8 status = PFVF_STATUS_SUCCESS;
3383 qed_iov_vf_cleanup(p_hwfn, p_vf);
3385 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3386 /* Stopping the VF */
3387 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3391 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
3393 status = PFVF_STATUS_FAILURE;
3396 p_vf->state = VF_STOPPED;
3399 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3403 static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
3404 struct qed_ptt *p_ptt,
3405 struct qed_vf_info *p_vf)
3407 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3408 struct pfvf_read_coal_resp_tlv *p_resp;
3409 struct vfpf_read_coal_req_tlv *req;
3410 u8 status = PFVF_STATUS_FAILURE;
3411 struct qed_vf_queue *p_queue;
3412 struct qed_queue_cid *p_cid;
3413 u16 coal = 0, qid, i;
3417 mbx->offset = (u8 *)mbx->reply_virt;
3418 req = &mbx->req_virt->read_coal_req;
3421 b_is_rx = req->is_rx ? true : false;
3424 if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
3425 QED_IOV_VALIDATE_Q_ENABLE)) {
3426 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3427 "VF[%d]: Invalid Rx queue_id = %d\n",
3428 p_vf->abs_vf_id, qid);
3432 p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
3433 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3437 if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
3438 QED_IOV_VALIDATE_Q_ENABLE)) {
3439 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3440 "VF[%d]: Invalid Tx queue_id = %d\n",
3441 p_vf->abs_vf_id, qid);
3444 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3445 p_queue = &p_vf->vf_queues[qid];
3446 if ((!p_queue->cids[i].p_cid) ||
3447 (!p_queue->cids[i].b_is_tx))
3450 p_cid = p_queue->cids[i].p_cid;
3452 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
3459 status = PFVF_STATUS_SUCCESS;
3462 p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
3464 p_resp->coal = coal;
3466 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
3467 sizeof(struct channel_list_end_tlv));
3469 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
3472 static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
3473 struct qed_ptt *p_ptt,
3474 struct qed_vf_info *vf)
3476 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3477 struct vfpf_update_coalesce *req;
3478 u8 status = PFVF_STATUS_FAILURE;
3479 struct qed_queue_cid *p_cid;
3480 u16 rx_coal, tx_coal;
3484 req = &mbx->req_virt->update_coalesce;
3486 rx_coal = req->rx_coal;
3487 tx_coal = req->tx_coal;
3490 if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
3491 QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) {
3492 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3493 "VF[%d]: Invalid Rx queue_id = %d\n",
3494 vf->abs_vf_id, qid);
3498 if (!qed_iov_validate_txq(p_hwfn, vf, qid,
3499 QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) {
3500 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3501 "VF[%d]: Invalid Tx queue_id = %d\n",
3502 vf->abs_vf_id, qid);
3508 "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3509 vf->abs_vf_id, rx_coal, tx_coal, qid);
3512 p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
3514 rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3518 "VF[%d]: Unable to set rx queue = %d coalesce\n",
3519 vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3522 vf->rx_coal = rx_coal;
3526 struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
3528 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3529 if (!p_queue->cids[i].p_cid)
3532 if (!p_queue->cids[i].b_is_tx)
3535 rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3536 p_queue->cids[i].p_cid);
3541 "VF[%d]: Unable to set tx queue coalesce\n",
3546 vf->tx_coal = tx_coal;
3549 status = PFVF_STATUS_SUCCESS;
3551 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3552 sizeof(struct pfvf_def_resp_tlv), status);
3555 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
3556 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3561 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
3563 for (cnt = 0; cnt < 50; cnt++) {
3564 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3569 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
3573 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3574 p_vf->abs_vf_id, val);
3582 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
3583 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3585 u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
3588 /* Read initial consumers & producers */
3589 for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
3592 cons[i] = qed_rd(p_hwfn, p_ptt,
3593 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3595 prod = qed_rd(p_hwfn, p_ptt,
3596 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3598 distance[i] = prod - cons[i];
3601 /* Wait for consumers to pass the producers */
3603 for (cnt = 0; cnt < 50; cnt++) {
3604 for (; i < MAX_NUM_VOQS_E4; i++) {
3607 tmp = qed_rd(p_hwfn, p_ptt,
3608 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3610 if (distance[i] > tmp - cons[i])
3614 if (i == MAX_NUM_VOQS_E4)
3621 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3622 p_vf->abs_vf_id, i);
3629 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
3630 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3634 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3638 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3646 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3647 struct qed_ptt *p_ptt,
3648 u16 rel_vf_id, u32 *ack_vfs)
3650 struct qed_vf_info *p_vf;
3653 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3657 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3658 (1ULL << (rel_vf_id % 64))) {
3659 u16 vfid = p_vf->abs_vf_id;
3661 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3662 "VF[%d] - Handling FLR\n", vfid);
3664 qed_iov_vf_cleanup(p_hwfn, p_vf);
3666 /* If VF isn't active, no need for anything but SW */
3670 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3674 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
3676 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3680 /* Workaround to make VF-PF channel ready, as FW
3681 * doesn't do that as a part of FLR.
3684 GTT_BAR0_MAP_REG_USDM_RAM +
3685 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3687 /* VF_STOPPED has to be set only after final cleanup
3688 * but prior to re-enabling the VF.
3690 p_vf->state = VF_STOPPED;
3692 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3694 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3699 /* Mark VF for ack and clean pending state */
3700 if (p_vf->state == VF_RESET)
3701 p_vf->state = VF_STOPPED;
3702 ack_vfs[vfid / 32] |= BIT((vfid % 32));
3703 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3704 ~(1ULL << (rel_vf_id % 64));
3705 p_vf->vf_mbx.b_pending_msg = false;
3712 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3714 u32 ack_vfs[VF_MAX_STATIC / 32];
3718 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3720 /* Since BRB <-> PRS interface can't be tested as part of the flr
3721 * polling due to HW limitations, simply sleep a bit. And since
3722 * there's no need to wait per-vf, do it before looping.
3726 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3727 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3729 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3733 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
3738 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3739 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3740 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3741 "[%08x,...,%08x]: %08x\n",
3742 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3744 if (!p_hwfn->cdev->p_iov_info) {
3745 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
3750 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3751 struct qed_vf_info *p_vf;
3754 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3758 vfid = p_vf->abs_vf_id;
3759 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3760 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3761 u16 rel_vf_id = p_vf->relative_vf_id;
3763 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3764 "VF[%d] [rel %d] got FLR-ed\n",
3767 p_vf->state = VF_RESET;
3769 /* No need to lock here, since pending_flr should
3770 * only change here and before ACKing MFw. Since
3771 * MFW will not trigger an additional attention for
3772 * VF flr until ACKs, we're safe.
3774 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3782 static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
3784 struct qed_mcp_link_params *p_params,
3785 struct qed_mcp_link_state *p_link,
3786 struct qed_mcp_link_capabilities *p_caps)
3788 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3791 struct qed_bulletin_content *p_bulletin;
3796 p_bulletin = p_vf->bulletin.p_virt;
3799 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3801 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3803 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3807 qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
3808 struct qed_ptt *p_ptt,
3809 struct qed_vf_info *p_vf)
3811 struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
3812 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
3813 struct vfpf_bulletin_update_mac_tlv *p_req;
3814 u8 status = PFVF_STATUS_SUCCESS;
3817 if (!p_vf->p_vf_info.is_trusted_configured) {
3820 "Blocking bulletin update request from untrusted VF[%d]\n",
3822 status = PFVF_STATUS_NOT_SUPPORTED;
3827 p_req = &mbx->req_virt->bulletin_update_mac;
3828 ether_addr_copy(p_bulletin->mac, p_req->mac);
3829 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3830 "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
3831 p_vf->abs_vf_id, p_req->mac);
3834 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3835 CHANNEL_TLV_BULLETIN_UPDATE_MAC,
3836 sizeof(struct pfvf_def_resp_tlv), status);
3840 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3841 struct qed_ptt *p_ptt, int vfid)
3843 struct qed_iov_vf_mbx *mbx;
3844 struct qed_vf_info *p_vf;
3846 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3850 mbx = &p_vf->vf_mbx;
3852 /* qed_iov_process_mbx_request */
3853 if (!mbx->b_pending_msg) {
3855 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3859 mbx->b_pending_msg = false;
3861 mbx->first_tlv = mbx->req_virt->first_tlv;
3863 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3864 "VF[%02x]: Processing mailbox message [type %04x]\n",
3865 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3867 /* check if tlv type is known */
3868 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3869 !p_vf->b_malicious) {
3870 switch (mbx->first_tlv.tl.type) {
3871 case CHANNEL_TLV_ACQUIRE:
3872 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3874 case CHANNEL_TLV_VPORT_START:
3875 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3877 case CHANNEL_TLV_VPORT_TEARDOWN:
3878 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3880 case CHANNEL_TLV_START_RXQ:
3881 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3883 case CHANNEL_TLV_START_TXQ:
3884 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3886 case CHANNEL_TLV_STOP_RXQS:
3887 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3889 case CHANNEL_TLV_STOP_TXQS:
3890 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3892 case CHANNEL_TLV_UPDATE_RXQ:
3893 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3895 case CHANNEL_TLV_VPORT_UPDATE:
3896 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3898 case CHANNEL_TLV_UCAST_FILTER:
3899 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3901 case CHANNEL_TLV_CLOSE:
3902 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3904 case CHANNEL_TLV_INT_CLEANUP:
3905 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3907 case CHANNEL_TLV_RELEASE:
3908 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3910 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3911 qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3913 case CHANNEL_TLV_COALESCE_UPDATE:
3914 qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3916 case CHANNEL_TLV_COALESCE_READ:
3917 qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
3919 case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
3920 qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
3923 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3924 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3925 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3926 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3928 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3929 mbx->first_tlv.tl.type,
3930 sizeof(struct pfvf_def_resp_tlv),
3931 PFVF_STATUS_MALICIOUS);
3933 /* unknown TLV - this may belong to a VF driver from the future
3934 * - a version written after this PF driver was written, which
3935 * supports features unknown as of yet. Too bad since we don't
3936 * support them. Or this may be because someone wrote a crappy
3937 * VF driver and is sending garbage over the channel.
3940 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3942 mbx->first_tlv.tl.type,
3943 mbx->first_tlv.tl.length,
3944 mbx->first_tlv.padding, mbx->first_tlv.reply_address);
3946 /* Try replying in case reply address matches the acquisition's
3949 if (p_vf->acquire.first_tlv.reply_address &&
3950 (mbx->first_tlv.reply_address ==
3951 p_vf->acquire.first_tlv.reply_address)) {
3952 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3953 mbx->first_tlv.tl.type,
3954 sizeof(struct pfvf_def_resp_tlv),
3955 PFVF_STATUS_NOT_SUPPORTED);
3959 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3965 static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
3969 memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3971 qed_for_each_vf(p_hwfn, i) {
3972 struct qed_vf_info *p_vf;
3974 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
3975 if (p_vf->vf_mbx.b_pending_msg)
3976 events[i / 64] |= 1ULL << (i % 64);
3980 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
3983 u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
3985 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3988 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
3993 return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
3996 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
3997 u16 abs_vfid, struct regpair *vf_msg)
3999 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
4005 /* List the physical address of the request so that handler
4006 * could later on copy the message from it.
4008 p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo);
4010 /* Mark the event and schedule the workqueue */
4011 p_vf->vf_mbx.b_pending_msg = true;
4012 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
4017 static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
4018 struct malicious_vf_eqe_data *p_data)
4020 struct qed_vf_info *p_vf;
4022 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
4027 if (!p_vf->b_malicious) {
4029 "VF [%d] - Malicious behavior [%02x]\n",
4030 p_vf->abs_vf_id, p_data->err_id);
4032 p_vf->b_malicious = true;
4035 "VF [%d] - Malicious behavior [%02x]\n",
4036 p_vf->abs_vf_id, p_data->err_id);
4040 static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
4041 union event_ring_data *data, u8 fw_return_code)
4044 case COMMON_EVENT_VF_PF_CHANNEL:
4045 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
4046 &data->vf_pf_channel.msg_addr);
4047 case COMMON_EVENT_MALICIOUS_VF:
4048 qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
4051 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
4057 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4059 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
4065 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4066 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4073 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
4076 struct qed_dmae_params params;
4077 struct qed_vf_info *vf_info;
4079 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4083 memset(¶ms, 0, sizeof(params));
4084 SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
4085 SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
4086 params.src_vfid = vf_info->abs_vf_id;
4088 if (qed_dmae_host2host(p_hwfn, ptt,
4089 vf_info->vf_mbx.pending_req,
4090 vf_info->vf_mbx.req_phys,
4091 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
4092 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4093 "Failed to copy message from VF 0x%02x\n", vfid);
4101 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
4104 struct qed_vf_info *vf_info;
4107 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4109 DP_NOTICE(p_hwfn->cdev,
4110 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4114 if (vf_info->b_malicious) {
4115 DP_NOTICE(p_hwfn->cdev,
4116 "Can't set forced MAC to malicious VF [%d]\n", vfid);
4120 if (vf_info->p_vf_info.is_trusted_configured) {
4121 feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4122 /* Trust mode will disable Forced MAC */
4123 vf_info->bulletin.p_virt->valid_bitmap &=
4124 ~BIT(MAC_ADDR_FORCED);
4126 feature = BIT(MAC_ADDR_FORCED);
4127 /* Forced MAC will disable MAC_ADDR */
4128 vf_info->bulletin.p_virt->valid_bitmap &=
4129 ~BIT(VFPF_BULLETIN_MAC_ADDR);
4132 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4134 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4136 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4139 static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
4141 struct qed_vf_info *vf_info;
4144 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4146 DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
4151 if (vf_info->b_malicious) {
4152 DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
4157 if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
4158 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4159 "Can not set MAC, Forced MAC is configured\n");
4163 feature = BIT(VFPF_BULLETIN_MAC_ADDR);
4164 ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
4166 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4168 if (vf_info->p_vf_info.is_trusted_configured)
4169 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4174 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
4177 struct qed_vf_info *vf_info;
4180 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4182 DP_NOTICE(p_hwfn->cdev,
4183 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4187 if (vf_info->b_malicious) {
4188 DP_NOTICE(p_hwfn->cdev,
4189 "Can't set forced vlan to malicious VF [%d]\n", vfid);
4193 feature = 1 << VLAN_ADDR_FORCED;
4194 vf_info->bulletin.p_virt->pvid = pvid;
4196 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4198 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4200 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4203 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
4204 int vfid, u16 vxlan_port, u16 geneve_port)
4206 struct qed_vf_info *vf_info;
4208 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4210 DP_NOTICE(p_hwfn->cdev,
4211 "Can not set udp ports, invalid vfid [%d]\n", vfid);
4215 if (vf_info->b_malicious) {
4216 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
4217 "Can not set udp ports to malicious VF [%d]\n",
4222 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4223 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4226 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
4228 struct qed_vf_info *p_vf_info;
4230 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4234 return !!p_vf_info->vport_instance;
4237 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
4239 struct qed_vf_info *p_vf_info;
4241 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4245 return p_vf_info->state == VF_STOPPED;
4248 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
4250 struct qed_vf_info *vf_info;
4252 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4256 return vf_info->spoof_chk;
4259 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
4261 struct qed_vf_info *vf;
4264 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4266 "SR-IOV sanity check failed, can't set spoofchk\n");
4270 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4274 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4275 /* After VF VPORT start PF will configure spoof check */
4276 vf->req_spoofchk_val = val;
4281 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
4287 static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4289 struct qed_vf_info *p_vf;
4291 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4292 if (!p_vf || !p_vf->bulletin.p_virt)
4295 if (!(p_vf->bulletin.p_virt->valid_bitmap &
4296 BIT(VFPF_BULLETIN_MAC_ADDR)))
4299 return p_vf->bulletin.p_virt->mac;
4302 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
4305 struct qed_vf_info *p_vf;
4307 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4308 if (!p_vf || !p_vf->bulletin.p_virt)
4311 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
4314 return p_vf->bulletin.p_virt->mac;
4318 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
4320 struct qed_vf_info *p_vf;
4322 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4323 if (!p_vf || !p_vf->bulletin.p_virt)
4326 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
4329 return p_vf->bulletin.p_virt->pvid;
4332 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
4333 struct qed_ptt *p_ptt, int vfid, int val)
4335 struct qed_vf_info *vf;
4340 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4344 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4348 rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
4349 return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
4353 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
4355 struct qed_vf_info *vf;
4359 for_each_hwfn(cdev, i) {
4360 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4362 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4364 "SR-IOV sanity check failed, can't set min rate\n");
4369 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
4370 vport_id = vf->vport_id;
4372 return qed_configure_vport_wfq(cdev, vport_id, rate);
4375 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
4377 struct qed_wfq_data *vf_vp_wfq;
4378 struct qed_vf_info *vf_info;
4380 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
4384 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4386 if (vf_vp_wfq->configured)
4387 return vf_vp_wfq->min_speed;
4393 * qed_schedule_iov - schedules IOV task for VF and PF
4394 * @hwfn: hardware function pointer
4395 * @flag: IOV flag for VF/PF
4397 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
4399 smp_mb__before_atomic();
4400 set_bit(flag, &hwfn->iov_task_flags);
4401 smp_mb__after_atomic();
4402 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
4403 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
4406 void qed_vf_start_iov_wq(struct qed_dev *cdev)
4410 for_each_hwfn(cdev, i)
4411 queue_delayed_work(cdev->hwfns[i].iov_wq,
4412 &cdev->hwfns[i].iov_task, 0);
4415 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
4419 for_each_hwfn(cdev, i)
4420 if (cdev->hwfns[i].iov_wq)
4421 flush_workqueue(cdev->hwfns[i].iov_wq);
4423 /* Mark VFs for disablement */
4424 qed_iov_set_vfs_to_disable(cdev, true);
4426 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
4427 pci_disable_sriov(cdev->pdev);
4429 if (cdev->recov_in_prog) {
4432 "Skip SRIOV disable operations in the device since a recovery is in progress\n");
4436 for_each_hwfn(cdev, i) {
4437 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4438 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4440 /* Failure to acquire the ptt in 100g creates an odd error
4441 * where the first engine has already relased IOV.
4444 DP_ERR(hwfn, "Failed to acquire ptt\n");
4448 /* Clean WFQ db and configure equal weight for all vports */
4449 qed_clean_wfq_db(hwfn, ptt);
4451 qed_for_each_vf(hwfn, j) {
4454 if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
4457 /* Wait until VF is disabled before releasing */
4458 for (k = 0; k < 100; k++) {
4459 if (!qed_iov_is_vf_stopped(hwfn, j))
4466 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
4470 "Timeout waiting for VF's FLR to end\n");
4473 qed_ptt_release(hwfn, ptt);
4476 qed_iov_set_vfs_to_disable(cdev, false);
4481 static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4483 struct qed_iov_vf_init_params *params)
4487 /* Since we have an equal resource distribution per-VF, and we assume
4488 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
4489 * sequentially from there.
4491 base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
4493 params->rel_vf_id = vfid;
4494 for (i = 0; i < params->num_queues; i++) {
4495 params->req_rx_queue[i] = base + i;
4496 params->req_tx_queue[i] = base + i;
4500 static int qed_sriov_enable(struct qed_dev *cdev, int num)
4502 struct qed_iov_vf_init_params params;
4503 struct qed_hwfn *hwfn;
4504 struct qed_ptt *ptt;
4507 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
4508 DP_NOTICE(cdev, "Can start at most %d VFs\n",
4509 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
4513 memset(¶ms, 0, sizeof(params));
4515 /* Initialize HW for VF access */
4516 for_each_hwfn(cdev, j) {
4517 hwfn = &cdev->hwfns[j];
4518 ptt = qed_ptt_acquire(hwfn);
4520 /* Make sure not to use more than 16 queues per VF */
4521 params.num_queues = min_t(int,
4522 FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
4526 DP_ERR(hwfn, "Failed to acquire ptt\n");
4531 for (i = 0; i < num; i++) {
4532 if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
4535 qed_sriov_enable_qid_config(hwfn, i, ¶ms);
4536 rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
4538 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
4539 qed_ptt_release(hwfn, ptt);
4544 qed_ptt_release(hwfn, ptt);
4547 /* Enable SRIOV PCIe functions */
4548 rc = pci_enable_sriov(cdev->pdev, num);
4550 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
4554 hwfn = QED_LEADING_HWFN(cdev);
4555 ptt = qed_ptt_acquire(hwfn);
4557 DP_ERR(hwfn, "Failed to acquire ptt\n");
4562 rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4564 DP_INFO(cdev, "Failed to update eswitch mode\n");
4565 qed_ptt_release(hwfn, ptt);
4570 qed_sriov_disable(cdev, false);
4574 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
4576 if (!IS_QED_SRIOV(cdev)) {
4577 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
4582 return qed_sriov_enable(cdev, num_vfs_param);
4584 return qed_sriov_disable(cdev, true);
4587 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
4591 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4592 DP_VERBOSE(cdev, QED_MSG_IOV,
4593 "Cannot set a VF MAC; Sriov is not enabled\n");
4597 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4598 DP_VERBOSE(cdev, QED_MSG_IOV,
4599 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4603 for_each_hwfn(cdev, i) {
4604 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4605 struct qed_public_vf_info *vf_info;
4607 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4611 /* Set the MAC, and schedule the IOV task */
4612 if (vf_info->is_trusted_configured)
4613 ether_addr_copy(vf_info->mac, mac);
4615 ether_addr_copy(vf_info->forced_mac, mac);
4617 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4623 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
4627 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4628 DP_VERBOSE(cdev, QED_MSG_IOV,
4629 "Cannot set a VF MAC; Sriov is not enabled\n");
4633 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4634 DP_VERBOSE(cdev, QED_MSG_IOV,
4635 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4639 for_each_hwfn(cdev, i) {
4640 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4641 struct qed_public_vf_info *vf_info;
4643 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4647 /* Set the forced vlan, and schedule the IOV task */
4648 vf_info->forced_vlan = vid;
4649 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4655 static int qed_get_vf_config(struct qed_dev *cdev,
4656 int vf_id, struct ifla_vf_info *ivi)
4658 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
4659 struct qed_public_vf_info *vf_info;
4660 struct qed_mcp_link_state link;
4663 /* Sanitize request */
4667 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
4668 DP_VERBOSE(cdev, QED_MSG_IOV,
4669 "VF index [%d] isn't active\n", vf_id);
4673 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4675 qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
4677 /* Fill information about VF */
4680 if (is_valid_ether_addr(vf_info->forced_mac))
4681 ether_addr_copy(ivi->mac, vf_info->forced_mac);
4683 ether_addr_copy(ivi->mac, vf_info->mac);
4685 ivi->vlan = vf_info->forced_vlan;
4686 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
4687 ivi->linkstate = vf_info->link_state;
4688 tx_rate = vf_info->tx_rate;
4689 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
4690 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
4695 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
4697 struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
4698 struct qed_mcp_link_capabilities caps;
4699 struct qed_mcp_link_params params;
4700 struct qed_mcp_link_state link;
4703 if (!hwfn->pf_iov_info)
4706 /* Update bulletin of all future possible VFs with link configuration */
4707 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
4708 struct qed_public_vf_info *vf_info;
4710 vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
4714 /* Only hwfn0 is actually interested in the link speed.
4715 * But since only it would receive an MFW indication of link,
4716 * need to take configuration from it - otherwise things like
4717 * rate limiting for hwfn1 VF would not work.
4719 memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn),
4721 memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
4722 memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
4725 /* Modify link according to the VF's configured link state */
4726 switch (vf_info->link_state) {
4727 case IFLA_VF_LINK_STATE_DISABLE:
4728 link.link_up = false;
4730 case IFLA_VF_LINK_STATE_ENABLE:
4731 link.link_up = true;
4732 /* Set speed according to maximum supported by HW.
4733 * that is 40G for regular devices and 100G for CMT
4736 link.speed = (hwfn->cdev->num_hwfns > 1) ?
4739 /* In auto mode pass PF link image to VF */
4743 if (link.link_up && vf_info->tx_rate) {
4744 struct qed_ptt *ptt;
4747 rate = min_t(int, vf_info->tx_rate, link.speed);
4749 ptt = qed_ptt_acquire(hwfn);
4751 DP_NOTICE(hwfn, "Failed to acquire PTT\n");
4755 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
4756 vf_info->tx_rate = rate;
4760 qed_ptt_release(hwfn, ptt);
4763 qed_iov_set_link(hwfn, i, ¶ms, &link, &caps);
4766 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4769 static int qed_set_vf_link_state(struct qed_dev *cdev,
4770 int vf_id, int link_state)
4774 /* Sanitize request */
4778 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
4779 DP_VERBOSE(cdev, QED_MSG_IOV,
4780 "VF index [%d] isn't active\n", vf_id);
4784 /* Handle configuration of link state */
4785 for_each_hwfn(cdev, i) {
4786 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4787 struct qed_public_vf_info *vf;
4789 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4793 if (vf->link_state == link_state)
4796 vf->link_state = link_state;
4797 qed_inform_vf_link_state(&cdev->hwfns[i]);
4803 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
4805 int i, rc = -EINVAL;
4807 for_each_hwfn(cdev, i) {
4808 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4810 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
4818 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
4822 for_each_hwfn(cdev, i) {
4823 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4824 struct qed_public_vf_info *vf;
4826 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4828 "SR-IOV sanity check failed, can't set tx rate\n");
4832 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
4836 qed_inform_vf_link_state(p_hwfn);
4842 static int qed_set_vf_rate(struct qed_dev *cdev,
4843 int vfid, u32 min_rate, u32 max_rate)
4845 int rc_min = 0, rc_max = 0;
4848 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
4851 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
4853 if (rc_max | rc_min)
4859 static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
4863 for_each_hwfn(cdev, i) {
4864 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4865 struct qed_public_vf_info *vf;
4867 if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
4869 "SR-IOV sanity check failed, can't set trust\n");
4873 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
4875 if (vf->is_trusted_request == trust)
4877 vf->is_trusted_request = trust;
4879 qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4885 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4887 u64 events[QED_VF_ARRAY_LENGTH];
4888 struct qed_ptt *ptt;
4891 ptt = qed_ptt_acquire(hwfn);
4893 DP_VERBOSE(hwfn, QED_MSG_IOV,
4894 "Can't acquire PTT; re-scheduling\n");
4895 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4899 qed_iov_pf_get_pending_events(hwfn, events);
4901 DP_VERBOSE(hwfn, QED_MSG_IOV,
4902 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4903 events[0], events[1], events[2]);
4905 qed_for_each_vf(hwfn, i) {
4906 /* Skip VFs with no pending messages */
4907 if (!(events[i / 64] & (1ULL << (i % 64))))
4910 DP_VERBOSE(hwfn, QED_MSG_IOV,
4911 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4912 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4914 /* Copy VF's message to PF's request buffer for that VF */
4915 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4918 qed_iov_process_mbx_req(hwfn, ptt, i);
4921 qed_ptt_release(hwfn, ptt);
4924 static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
4926 struct qed_public_vf_info *info)
4928 if (info->is_trusted_configured) {
4929 if (is_valid_ether_addr(info->mac) &&
4930 (!mac || !ether_addr_equal(mac, info->mac)))
4933 if (is_valid_ether_addr(info->forced_mac) &&
4934 (!mac || !ether_addr_equal(mac, info->forced_mac)))
4941 static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
4942 struct qed_public_vf_info *info,
4945 if (info->is_trusted_configured)
4946 qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
4948 qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
4951 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4955 qed_for_each_vf(hwfn, i) {
4956 struct qed_public_vf_info *info;
4957 bool update = false;
4960 info = qed_iov_get_public_vf_info(hwfn, i, true);
4964 /* Update data on bulletin board */
4965 if (info->is_trusted_configured)
4966 mac = qed_iov_bulletin_get_mac(hwfn, i);
4968 mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4970 if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
4973 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4975 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4977 /* Update bulletin board with MAC */
4978 qed_set_bulletin_mac(hwfn, info, i);
4982 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
4983 info->forced_vlan) {
4986 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
4989 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4990 qed_iov_bulletin_set_forced_vlan(hwfn,
4991 info->forced_vlan, i);
4996 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5000 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
5002 struct qed_ptt *ptt;
5005 ptt = qed_ptt_acquire(hwfn);
5007 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
5008 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5012 qed_for_each_vf(hwfn, i)
5013 qed_iov_post_vf_bulletin(hwfn, i, ptt);
5015 qed_ptt_release(hwfn, ptt);
5018 static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
5020 struct qed_public_vf_info *vf_info;
5021 struct qed_vf_info *vf;
5025 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
5026 vf = qed_iov_get_vf_info(hwfn, vf_id, true);
5028 if (!vf_info || !vf)
5031 /* Force MAC converted to generic MAC in case of VF trust on */
5032 if (vf_info->is_trusted_configured &&
5033 (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
5034 force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
5037 /* Clear existing shadow copy of MAC to have a clean
5040 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5041 if (ether_addr_equal(vf->shadow_config.macs[i],
5043 memset(vf->shadow_config.macs[i], 0,
5045 DP_VERBOSE(hwfn, QED_MSG_IOV,
5046 "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
5047 vf_info->mac, vf_id);
5052 ether_addr_copy(vf_info->mac, force_mac);
5053 memset(vf_info->forced_mac, 0, ETH_ALEN);
5054 vf->bulletin.p_virt->valid_bitmap &=
5055 ~BIT(MAC_ADDR_FORCED);
5056 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5060 /* Update shadow copy with VF MAC when trust mode is turned off */
5061 if (!vf_info->is_trusted_configured) {
5062 u8 empty_mac[ETH_ALEN];
5064 memset(empty_mac, 0, ETH_ALEN);
5065 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
5066 if (ether_addr_equal(vf->shadow_config.macs[i],
5068 ether_addr_copy(vf->shadow_config.macs[i],
5070 DP_VERBOSE(hwfn, QED_MSG_IOV,
5071 "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
5072 vf_info->mac, vf_id);
5076 /* Clear bulletin when trust mode is turned off,
5077 * to have a clean slate for next (normal) operations.
5079 qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
5080 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
5084 static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
5086 struct qed_sp_vport_update_params params;
5087 struct qed_filter_accept_flags *flags;
5088 struct qed_public_vf_info *vf_info;
5089 struct qed_vf_info *vf;
5093 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
5094 flags = ¶ms.accept_flags;
5096 qed_for_each_vf(hwfn, i) {
5097 /* Need to make sure current requested configuration didn't
5098 * flip so that we'll end up configuring something that's not
5101 vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
5102 if (vf_info->is_trusted_configured ==
5103 vf_info->is_trusted_request)
5105 vf_info->is_trusted_configured = vf_info->is_trusted_request;
5107 /* Handle forced MAC mode */
5108 qed_update_mac_for_vf_trust_change(hwfn, i);
5110 /* Validate that the VF has a configured vport */
5111 vf = qed_iov_get_vf_info(hwfn, i, true);
5112 if (!vf->vport_instance)
5115 memset(¶ms, 0, sizeof(params));
5116 params.opaque_fid = vf->opaque_fid;
5117 params.vport_id = vf->vport_id;
5119 params.update_ctl_frame_check = 1;
5120 params.mac_chk_en = !vf_info->is_trusted_configured;
5122 if (vf_info->rx_accept_mode & mask) {
5123 flags->update_rx_mode_config = 1;
5124 flags->rx_accept_filter = vf_info->rx_accept_mode;
5127 if (vf_info->tx_accept_mode & mask) {
5128 flags->update_tx_mode_config = 1;
5129 flags->tx_accept_filter = vf_info->tx_accept_mode;
5132 /* Remove if needed; Otherwise this would set the mask */
5133 if (!vf_info->is_trusted_configured) {
5134 flags->rx_accept_filter &= ~mask;
5135 flags->tx_accept_filter &= ~mask;
5138 if (flags->update_rx_mode_config ||
5139 flags->update_tx_mode_config ||
5140 params.update_ctl_frame_check)
5141 qed_sp_vport_update(hwfn, ¶ms,
5142 QED_SPQ_MODE_EBLOCK, NULL);
5146 static void qed_iov_pf_task(struct work_struct *work)
5149 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
5153 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
5156 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
5157 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
5160 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5164 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
5166 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
5168 qed_ptt_release(hwfn, ptt);
5171 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
5172 qed_handle_vf_msg(hwfn);
5174 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
5175 &hwfn->iov_task_flags))
5176 qed_handle_pf_set_vf_unicast(hwfn);
5178 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
5179 &hwfn->iov_task_flags))
5180 qed_handle_bulletin_post(hwfn);
5182 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
5183 qed_iov_handle_trust_change(hwfn);
5186 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
5190 for_each_hwfn(cdev, i) {
5191 if (!cdev->hwfns[i].iov_wq)
5194 if (schedule_first) {
5195 qed_schedule_iov(&cdev->hwfns[i],
5196 QED_IOV_WQ_STOP_WQ_FLAG);
5197 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
5200 flush_workqueue(cdev->hwfns[i].iov_wq);
5201 destroy_workqueue(cdev->hwfns[i].iov_wq);
5205 int qed_iov_wq_start(struct qed_dev *cdev)
5207 char name[NAME_SIZE];
5210 for_each_hwfn(cdev, i) {
5211 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
5213 /* PFs needs a dedicated workqueue only if they support IOV.
5214 * VFs always require one.
5216 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
5219 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
5220 cdev->pdev->bus->number,
5221 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
5223 p_hwfn->iov_wq = create_singlethread_workqueue(name);
5224 if (!p_hwfn->iov_wq) {
5225 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
5230 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
5232 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
5238 const struct qed_iov_hv_ops qed_iov_ops_pass = {
5239 .configure = &qed_sriov_configure,
5240 .set_mac = &qed_sriov_pf_set_mac,
5241 .set_vlan = &qed_sriov_pf_set_vlan,
5242 .get_config = &qed_get_vf_config,
5243 .set_link_state = &qed_set_vf_link_state,
5244 .set_spoof = &qed_spoof_configure,
5245 .set_rate = &qed_set_vf_rate,
5246 .set_trust = &qed_set_vf_trust,