1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
9 * ice_validate_vf_id - helper to check if VF ID is valid
10 * @pf: pointer to the PF structure
11 * @vf_id: the ID of the VF to check
13 static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
15 if (vf_id >= pf->num_alloc_vfs) {
16 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id);
23 * ice_check_vf_init - helper to check if VF init complete
24 * @pf: pointer to the PF structure
25 * @vf: the pointer to the VF to check
27 static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
29 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
30 dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n",
38 * ice_err_to_virt_err - translate errors for VF return code
39 * @ice_err: error return code
41 static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
45 return VIRTCHNL_STATUS_SUCCESS;
47 case ICE_ERR_INVAL_SIZE:
48 case ICE_ERR_DEVICE_NOT_SUPPORTED:
51 return VIRTCHNL_STATUS_ERR_PARAM;
52 case ICE_ERR_NO_MEMORY:
53 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
54 case ICE_ERR_NOT_READY:
55 case ICE_ERR_RESET_FAILED:
56 case ICE_ERR_FW_API_VER:
57 case ICE_ERR_AQ_ERROR:
58 case ICE_ERR_AQ_TIMEOUT:
60 case ICE_ERR_AQ_NO_WORK:
61 case ICE_ERR_AQ_EMPTY:
62 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
64 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
69 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
70 * @pf: pointer to the PF structure
71 * @v_opcode: operation code
72 * @v_retval: return value
73 * @msg: pointer to the msg buffer
77 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
78 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
80 struct ice_hw *hw = &pf->hw;
83 ice_for_each_vf(pf, i) {
84 struct ice_vf *vf = &pf->vf[i];
86 /* Not all vfs are enabled so skip the ones that are not */
87 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
88 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
91 /* Ignore return value on purpose - a given VF may fail, but
92 * we need to keep going and send to all of them
94 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
100 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
101 * @vf: pointer to the VF structure
102 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
103 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
104 * @link_up: whether or not to set the link up/down
107 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
108 int ice_link_speed, bool link_up)
110 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
111 pfe->event_data.link_event_adv.link_status = link_up;
113 pfe->event_data.link_event_adv.link_speed =
114 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
116 pfe->event_data.link_event.link_status = link_up;
117 /* Legacy method for virtchnl link speeds */
118 pfe->event_data.link_event.link_speed =
119 (enum virtchnl_link_speed)
120 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
125 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
126 * @vf: the VF to check
128 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
131 static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
133 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
134 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
138 * ice_is_vf_link_up - check if the VF's link is up
139 * @vf: VF to check if link is up
141 static bool ice_is_vf_link_up(struct ice_vf *vf)
143 struct ice_pf *pf = vf->pf;
145 if (ice_check_vf_init(pf, vf))
148 if (ice_vf_has_no_qs_ena(vf))
150 else if (vf->link_forced)
153 return pf->hw.port_info->phy.link_info.link_info &
158 * ice_vc_notify_vf_link_state - Inform a VF of link status
159 * @vf: pointer to the VF structure
161 * send a link status message to a single VF
163 static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
165 struct virtchnl_pf_event pfe = { 0 };
166 struct ice_hw *hw = &vf->pf->hw;
168 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
169 pfe.severity = PF_EVENT_SEVERITY_INFO;
171 if (ice_is_vf_link_up(vf))
172 ice_set_pfe_link(vf, &pfe,
173 hw->port_info->phy.link_info.link_speed, true);
175 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
177 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
178 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
183 * ice_free_vf_res - Free a VF's resources
184 * @vf: pointer to the VF info
186 static void ice_free_vf_res(struct ice_vf *vf)
188 struct ice_pf *pf = vf->pf;
189 int i, last_vector_idx;
191 /* First, disable VF's configuration API to prevent OS from
192 * accessing the VF's VSI after it's freed or invalidated.
194 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
196 /* free VSI and disconnect it from the parent uplink */
197 if (vf->lan_vsi_idx) {
198 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
204 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
206 /* clear VF MDD event information */
207 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
208 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
210 /* Disable interrupts so that VF starts in a known state */
211 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
212 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
215 /* reset some of the state variables keeping track of the resources */
216 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
217 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
221 * ice_dis_vf_mappings
222 * @vf: pointer to the VF structure
224 static void ice_dis_vf_mappings(struct ice_vf *vf)
226 struct ice_pf *pf = vf->pf;
233 vsi = pf->vsi[vf->lan_vsi_idx];
235 dev = ice_pf_to_dev(pf);
236 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
237 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
239 first = vf->first_vector_idx;
240 last = first + pf->num_msix_per_vf - 1;
241 for (v = first; v <= last; v++) {
244 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
245 GLINT_VECT2FUNC_IS_PF_M) |
246 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
247 GLINT_VECT2FUNC_PF_NUM_M));
248 wr32(hw, GLINT_VECT2FUNC(v), reg);
251 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
252 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
254 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
256 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
257 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
259 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
263 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
264 * @pf: pointer to the PF structure
266 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
267 * the pf->sriov_base_vector.
269 * Returns 0 on success, and -EINVAL on error.
271 static int ice_sriov_free_msix_res(struct ice_pf *pf)
273 struct ice_res_tracker *res;
278 res = pf->irq_tracker;
282 /* give back irq_tracker resources used */
283 WARN_ON(pf->sriov_base_vector < res->num_entries);
285 pf->sriov_base_vector = 0;
291 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
292 * @vf: pointer to the VF structure
294 void ice_set_vf_state_qs_dis(struct ice_vf *vf)
296 /* Clear Rx/Tx enabled queues flag */
297 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
298 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
299 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
303 * ice_dis_vf_qs - Disable the VF queues
304 * @vf: pointer to the VF structure
306 static void ice_dis_vf_qs(struct ice_vf *vf)
308 struct ice_pf *pf = vf->pf;
311 vsi = pf->vsi[vf->lan_vsi_idx];
313 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
314 ice_vsi_stop_all_rx_rings(vsi);
315 ice_set_vf_state_qs_dis(vf);
319 * ice_free_vfs - Free all VFs
320 * @pf: pointer to the PF structure
322 void ice_free_vfs(struct ice_pf *pf)
324 struct device *dev = ice_pf_to_dev(pf);
325 struct ice_hw *hw = &pf->hw;
331 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
332 usleep_range(1000, 2000);
334 /* Disable IOV before freeing resources. This lets any VF drivers
335 * running in the host get themselves cleaned up before we yank
336 * the carpet out from underneath their feet.
338 if (!pci_vfs_assigned(pf->pdev))
339 pci_disable_sriov(pf->pdev);
341 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
343 /* Avoid wait time by stopping all VFs at the same time */
344 ice_for_each_vf(pf, i)
345 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
346 ice_dis_vf_qs(&pf->vf[i]);
348 tmp = pf->num_alloc_vfs;
349 pf->num_qps_per_vf = 0;
350 pf->num_alloc_vfs = 0;
351 for (i = 0; i < tmp; i++) {
352 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
353 /* disable VF qp mappings and set VF disable state */
354 ice_dis_vf_mappings(&pf->vf[i]);
355 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
356 ice_free_vf_res(&pf->vf[i]);
360 if (ice_sriov_free_msix_res(pf))
361 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
363 devm_kfree(dev, pf->vf);
366 /* This check is for when the driver is unloaded while VFs are
367 * assigned. Setting the number of VFs to 0 through sysfs is caught
368 * before this function ever gets called.
370 if (!pci_vfs_assigned(pf->pdev)) {
373 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
374 * work correctly when SR-IOV gets re-enabled.
376 for (vf_id = 0; vf_id < tmp; vf_id++) {
377 u32 reg_idx, bit_idx;
379 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
380 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
381 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
384 clear_bit(__ICE_VF_DIS, pf->state);
385 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
389 * ice_trigger_vf_reset - Reset a VF on HW
390 * @vf: pointer to the VF structure
391 * @is_vflr: true if VFLR was issued, false if not
392 * @is_pfr: true if the reset was triggered due to a previous PFR
394 * Trigger hardware to start a reset for a particular VF. Expects the caller
395 * to wait the proper amount of time to allow hardware to reset the VF before
396 * it cleans up and restores VF functionality.
398 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
400 struct ice_pf *pf = vf->pf;
401 u32 reg, reg_idx, bit_idx;
406 dev = ice_pf_to_dev(pf);
408 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
410 /* Inform VF that it is no longer active, as a warning */
411 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
413 /* Disable VF's configuration API during reset. The flag is re-enabled
414 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
415 * It's normally disabled in ice_free_vf_res(), but it's safer
416 * to do it earlier to give some time to finish to any VF config
417 * functions that may still be running at this point.
419 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
421 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
422 * in the case of VFR. If this is done for PFR, it can mess up VF
423 * resets because the VF driver may already have started cleanup
424 * by the time we get here.
427 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
429 /* In the case of a VFLR, the HW has already reset the VF and we
430 * just need to clean up, so don't hit the VFRTRIG register.
433 /* reset VF using VPGEN_VFRTRIG reg */
434 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
435 reg |= VPGEN_VFRTRIG_VFSWR_M;
436 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
438 /* clear the VFLR bit in GLGEN_VFLRSTAT */
439 reg_idx = (vf_abs_id) / 32;
440 bit_idx = (vf_abs_id) % 32;
441 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
444 wr32(hw, PF_PCI_CIAA,
445 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
446 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
447 reg = rd32(hw, PF_PCI_CIAD);
448 /* no transactions pending so stop polling */
449 if ((reg & VF_TRANS_PENDING_M) == 0)
452 dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
453 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
458 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
459 * @vsi: the VSI to update
460 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
461 * @enable: true for enable PVID false for disable
463 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
465 struct ice_hw *hw = &vsi->back->hw;
466 struct ice_aqc_vsi_props *info;
467 struct ice_vsi_ctx *ctxt;
468 enum ice_status status;
471 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
475 ctxt->info = vsi->info;
478 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
479 ICE_AQ_VSI_PVLAN_INSERT_PVID |
480 ICE_AQ_VSI_VLAN_EMOD_STR;
481 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
483 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
484 ICE_AQ_VSI_VLAN_MODE_ALL;
485 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
488 info->pvid = cpu_to_le16(pvid_info);
489 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
490 ICE_AQ_VSI_PROP_SW_VALID);
492 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
494 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %d\n",
495 status, hw->adminq.sq_last_status);
500 vsi->info.vlan_flags = info->vlan_flags;
501 vsi->info.sw_flags2 = info->sw_flags2;
502 vsi->info.pvid = info->pvid;
509 * ice_vf_vsi_setup - Set up a VF VSI
510 * @pf: board private structure
511 * @pi: pointer to the port_info instance
512 * @vf_id: defines VF ID to which this VSI connects.
514 * Returns pointer to the successfully allocated VSI struct on success,
515 * otherwise returns NULL on failure.
517 static struct ice_vsi *
518 ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
520 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
524 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
525 * @pf: pointer to PF structure
526 * @vf: pointer to VF that the first MSIX vector index is being calculated for
528 * This returns the first MSIX vector index in PF space that is used by this VF.
529 * This index is used when accessing PF relative registers such as
530 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
531 * This will always be the OICR index in the AVF driver so any functionality
532 * using vf->first_vector_idx for queue configuration will have to increment by
533 * 1 to avoid meddling with the OICR index.
535 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
537 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
541 * ice_alloc_vsi_res - Setup VF VSI and its resources
542 * @vf: pointer to the VF structure
544 * Returns 0 on success, negative value on failure
546 static int ice_alloc_vsi_res(struct ice_vf *vf)
548 struct ice_pf *pf = vf->pf;
549 LIST_HEAD(tmp_add_list);
550 u8 broadcast[ETH_ALEN];
555 dev = ice_pf_to_dev(pf);
556 /* first vector index is the VFs OICR index */
557 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
559 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
561 dev_err(dev, "Failed to create VF VSI\n");
565 vf->lan_vsi_idx = vsi->idx;
566 vf->lan_vsi_num = vsi->vsi_num;
568 /* Check if port VLAN exist before, and restore it accordingly */
569 if (vf->port_vlan_info) {
570 ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
571 if (ice_vsi_add_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK))
572 dev_warn(ice_pf_to_dev(pf), "Failed to add Port VLAN %d filter for VF %d\n",
573 vf->port_vlan_info & VLAN_VID_MASK, vf->vf_id);
575 /* set VLAN 0 filter by default when no port VLAN is
576 * enabled. If a port VLAN is enabled we don't want
577 * untagged broadcast/multicast traffic seen on the VF
580 if (ice_vsi_add_vlan(vsi, 0))
581 dev_warn(ice_pf_to_dev(pf), "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest\n",
585 eth_broadcast_addr(broadcast);
587 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
589 goto ice_alloc_vsi_res_exit;
591 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
592 status = ice_add_mac_to_list(vsi, &tmp_add_list,
593 vf->dflt_lan_addr.addr);
595 goto ice_alloc_vsi_res_exit;
598 status = ice_add_mac(&pf->hw, &tmp_add_list);
600 dev_err(dev, "could not add mac filters error %d\n", status);
604 /* Clear this bit after VF initialization since we shouldn't reclaim
605 * and reassign interrupts for synchronous or asynchronous VFR events.
606 * We don't want to reconfigure interrupts since AVF driver doesn't
607 * expect vector assignment to be changed unless there is a request for
610 ice_alloc_vsi_res_exit:
611 ice_free_fltr_list(dev, &tmp_add_list);
616 * ice_alloc_vf_res - Allocate VF resources
617 * @vf: pointer to the VF structure
619 static int ice_alloc_vf_res(struct ice_vf *vf)
621 struct ice_pf *pf = vf->pf;
622 int tx_rx_queue_left;
625 /* Update number of VF queues, in case VF had requested for queue
628 tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
629 ice_get_avail_rxq_count(pf));
630 tx_rx_queue_left += pf->num_qps_per_vf;
631 if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
632 vf->num_req_qs != vf->num_vf_qs)
633 vf->num_vf_qs = vf->num_req_qs;
635 /* setup VF VSI and necessary resources */
636 status = ice_alloc_vsi_res(vf);
638 goto ice_alloc_vf_res_exit;
641 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
643 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
645 /* VF is now completely initialized */
646 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
650 ice_alloc_vf_res_exit:
656 * ice_ena_vf_mappings
657 * @vf: pointer to the VF structure
659 * Enable VF vectors and queues allocation by writing the details into
660 * respective registers.
662 static void ice_ena_vf_mappings(struct ice_vf *vf)
664 int abs_vf_id, abs_first, abs_last;
665 struct ice_pf *pf = vf->pf;
672 dev = ice_pf_to_dev(pf);
674 vsi = pf->vsi[vf->lan_vsi_idx];
675 first = vf->first_vector_idx;
676 last = (first + pf->num_msix_per_vf) - 1;
677 abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
678 abs_last = (abs_first + pf->num_msix_per_vf) - 1;
679 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
681 /* VF Vector allocation */
682 reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
683 ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
684 VPINT_ALLOC_VALID_M);
685 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
687 reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
688 & VPINT_ALLOC_PCI_FIRST_M) |
689 ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
690 VPINT_ALLOC_PCI_VALID_M);
691 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
692 /* map the interrupts to its functions */
693 for (v = first; v <= last; v++) {
694 reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
695 GLINT_VECT2FUNC_VF_NUM_M) |
696 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
697 GLINT_VECT2FUNC_PF_NUM_M));
698 wr32(hw, GLINT_VECT2FUNC(v), reg);
701 /* Map mailbox interrupt. We put an explicit 0 here to remind us that
702 * VF admin queue interrupts will go to VF MSI-X vector 0.
704 wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
705 /* set regardless of mapping mode */
706 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
708 /* VF Tx queues allocation */
709 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
710 /* set the VF PF Tx queue range
711 * VFNUMQ value should be set to (number of queues - 1). A value
712 * of 0 means 1 queue and a value of 255 means 256 queues
714 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
715 VPLAN_TX_QBASE_VFFIRSTQ_M) |
716 (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
717 VPLAN_TX_QBASE_VFNUMQ_M));
718 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
720 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
723 /* set regardless of mapping mode */
724 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
726 /* VF Rx queues allocation */
727 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
728 /* set the VF PF Rx queue range
729 * VFNUMQ value should be set to (number of queues - 1). A value
730 * of 0 means 1 queue and a value of 255 means 256 queues
732 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
733 VPLAN_RX_QBASE_VFFIRSTQ_M) |
734 (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
735 VPLAN_RX_QBASE_VFNUMQ_M));
736 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
738 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
744 * @pf: pointer to the PF structure
745 * @avail_res: available resources in the PF structure
746 * @max_res: maximum resources that can be given per VF
747 * @min_res: minimum resources that can be given per VF
749 * Returns non-zero value if resources (queues/vectors) are available or
750 * returns zero if PF cannot accommodate for all num_alloc_vfs.
753 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
755 bool checked_min_res = false;
758 /* start by checking if PF can assign max number of resources for
760 * if yes, return number per VF
761 * If no, divide by 2 and roundup, check again
762 * repeat the loop till we reach a point where even minimum resources
763 * are not available, in that case return 0
766 while ((res >= min_res) && !checked_min_res) {
769 num_all_res = pf->num_alloc_vfs * res;
770 if (num_all_res <= avail_res)
774 checked_min_res = true;
776 res = DIV_ROUND_UP(res, 2);
782 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
783 * @vf: VF to calculate the register index for
784 * @q_vector: a q_vector associated to the VF
786 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
790 if (!vf || !q_vector)
795 /* always add one to account for the OICR being the first MSIX */
796 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
801 * ice_get_max_valid_res_idx - Get the max valid resource index
802 * @res: pointer to the resource to find the max valid index for
804 * Start from the end of the ice_res_tracker and return right when we find the
805 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
806 * valid for SR-IOV because it is the only consumer that manipulates the
807 * res->end and this is always called when res->end is set to res->num_entries.
809 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
816 for (i = res->num_entries - 1; i >= 0; i--)
817 if (res->list[i] & ICE_RES_VALID_BIT)
824 * ice_sriov_set_msix_res - Set any used MSIX resources
825 * @pf: pointer to PF structure
826 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
828 * This function allows SR-IOV resources to be taken from the end of the PF's
829 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
830 * just set the pf->sriov_base_vector and return success.
832 * If there are not enough resources available, return an error. This should
833 * always be caught by ice_set_per_vf_res().
835 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
836 * in the PF's space available for SR-IOV.
838 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
840 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
841 int vectors_used = pf->irq_tracker->num_entries;
842 int sriov_base_vector;
844 sriov_base_vector = total_vectors - num_msix_needed;
846 /* make sure we only grab irq_tracker entries from the list end and
847 * that we have enough available MSIX vectors
849 if (sriov_base_vector < vectors_used)
852 pf->sriov_base_vector = sriov_base_vector;
858 * ice_set_per_vf_res - check if vectors and queues are available
859 * @pf: pointer to the PF structure
861 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
862 * get more vectors and can enable more queues per VF. Note that this does not
863 * grab any vectors from the SW pool already allocated. Also note, that all
864 * vector counts include one for each VF's miscellaneous interrupt vector
867 * Minimum VFs - 2 vectors, 1 queue pair
868 * Small VFs - 5 vectors, 4 queue pairs
869 * Medium VFs - 17 vectors, 16 queue pairs
871 * Second, determine number of queue pairs per VF by starting with a pre-defined
872 * maximum each VF supports. If this is not possible, then we adjust based on
873 * queue pairs available on the device.
875 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
876 * by each VF during VF initialization and reset.
878 static int ice_set_per_vf_res(struct ice_pf *pf)
880 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
881 int msix_avail_per_vf, msix_avail_for_sriov;
882 struct device *dev = ice_pf_to_dev(pf);
883 u16 num_msix_per_vf, num_txq, num_rxq;
885 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
888 /* determine MSI-X resources per VF */
889 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
890 pf->irq_tracker->num_entries;
891 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
892 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
893 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
894 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
895 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
896 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
897 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
899 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
900 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
905 /* determine queue resources per VF */
906 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
908 num_msix_per_vf - ICE_NONQ_VECS_VF,
909 ICE_MAX_RSS_QS_PER_VF),
912 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
914 num_msix_per_vf - ICE_NONQ_VECS_VF,
915 ICE_MAX_RSS_QS_PER_VF),
918 if (!num_txq || !num_rxq) {
919 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
920 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
924 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
925 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
930 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
931 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
932 pf->num_msix_per_vf = num_msix_per_vf;
933 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
934 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
940 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
941 * @vf: pointer to the VF structure
943 * Cleanup a VF after the hardware reset is finished. Expects the caller to
944 * have verified whether the reset is finished properly, and ensure the
945 * minimum amount of wait time has passed. Reallocate VF resources back to make
948 static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
950 struct ice_pf *pf = vf->pf;
956 /* PF software completes the flow by notifying VF that reset flow is
957 * completed. This is done by enabling hardware by clearing the reset
958 * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
959 * register to VFR completed (done at the end of this function)
960 * By doing this we allow HW to access VF memory at any point. If we
961 * did it any sooner, HW could access memory while it was being freed
962 * in ice_free_vf_res(), causing an IOMMU fault.
964 * On the other hand, this needs to be done ASAP, because the VF driver
965 * is waiting for this to happen and may report a timeout. It's
966 * harmless, but it gets logged into Guest OS kernel log, so best avoid
969 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
970 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
971 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
973 /* reallocate VF resources to finish resetting the VSI state */
974 if (!ice_alloc_vf_res(vf)) {
975 ice_ena_vf_mappings(vf);
976 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
977 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
980 /* Tell the VF driver the reset is done. This needs to be done only
981 * after VF has been fully initialized, because the VF driver may
982 * request resources immediately after setting this flag.
984 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
988 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
989 * @vf: pointer to the VF info
990 * @vsi: the VSI being configured
991 * @promisc_m: mask of promiscuous config bits
992 * @rm_promisc: promisc flag request from the VF to remove or add filter
994 * This function configures VF VSI promiscuous mode, based on the VF requests,
995 * for Unicast, Multicast and VLAN
997 static enum ice_status
998 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1001 struct ice_pf *pf = vf->pf;
1002 enum ice_status status = 0;
1006 if (vsi->num_vlan) {
1007 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1009 } else if (vf->port_vlan_info) {
1011 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1012 vf->port_vlan_info);
1014 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1015 vf->port_vlan_info);
1018 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1021 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1029 * ice_config_res_vfs - Finalize allocation of VFs resources in one go
1030 * @pf: pointer to the PF structure
1032 * This function is being called as last part of resetting all VFs, or when
1033 * configuring VFs for the first time, where there is no resource to be freed
1034 * Returns true if resources were properly allocated for all VFs, and false
1037 static bool ice_config_res_vfs(struct ice_pf *pf)
1039 struct device *dev = ice_pf_to_dev(pf);
1040 struct ice_hw *hw = &pf->hw;
1043 if (ice_set_per_vf_res(pf)) {
1044 dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
1048 /* rearm global interrupts */
1049 if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
1050 ice_irq_dynamic_ena(hw, NULL, NULL);
1052 /* Finish resetting each VF and allocate resources */
1053 ice_for_each_vf(pf, v) {
1054 struct ice_vf *vf = &pf->vf[v];
1056 vf->num_vf_qs = pf->num_qps_per_vf;
1057 dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
1059 ice_cleanup_and_realloc_vf(vf);
1063 clear_bit(__ICE_VF_DIS, pf->state);
1069 * ice_reset_all_vfs - reset all allocated VFs in one go
1070 * @pf: pointer to the PF structure
1071 * @is_vflr: true if VFLR was issued, false if not
1073 * First, tell the hardware to reset each VF, then do all the waiting in one
1074 * chunk, and finally finish restoring each VF after the wait. This is useful
1075 * during PF routines which need to reset all VFs, as otherwise it must perform
1076 * these resets in a serialized fashion.
1078 * Returns true if any VFs were reset, and false otherwise.
1080 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1082 struct device *dev = ice_pf_to_dev(pf);
1083 struct ice_hw *hw = &pf->hw;
1087 /* If we don't have any VFs, then there is nothing to reset */
1088 if (!pf->num_alloc_vfs)
1091 /* If VFs have been disabled, there is no need to reset */
1092 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1095 /* Begin reset on all VFs at once */
1096 ice_for_each_vf(pf, v)
1097 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1099 ice_for_each_vf(pf, v) {
1100 struct ice_vsi *vsi;
1103 vsi = pf->vsi[vf->lan_vsi_idx];
1104 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1106 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1107 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1110 /* HW requires some time to make sure it can flush the FIFO for a VF
1111 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1112 * sequence to make sure that it has completed. We'll keep track of
1113 * the VFs using a simple iterator that increments once that VF has
1114 * finished resetting.
1116 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1117 /* Check each VF in sequence */
1118 while (v < pf->num_alloc_vfs) {
1122 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1123 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1124 /* only delay if the check failed */
1125 usleep_range(10, 20);
1129 /* If the current VF has finished resetting, move on
1130 * to the next VF in sequence.
1136 /* Display a warning if at least one VF didn't manage to reset in
1137 * time, but continue on with the operation.
1139 if (v < pf->num_alloc_vfs)
1140 dev_warn(dev, "VF reset check timeout\n");
1142 /* free VF resources to begin resetting the VSI state */
1143 ice_for_each_vf(pf, v) {
1146 ice_free_vf_res(vf);
1148 /* Free VF queues as well, and reallocate later.
1149 * If a given VF has different number of queues
1150 * configured, the request for update will come
1151 * via mailbox communication.
1156 if (ice_sriov_free_msix_res(pf))
1157 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
1159 if (!ice_config_res_vfs(pf))
1166 * ice_is_vf_disabled
1167 * @vf: pointer to the VF info
1169 * Returns true if the PF or VF is disabled, false otherwise.
1171 static bool ice_is_vf_disabled(struct ice_vf *vf)
1173 struct ice_pf *pf = vf->pf;
1175 /* If the PF has been disabled, there is no need resetting VF until
1176 * PF is active again. Similarly, if the VF has been disabled, this
1177 * means something else is resetting the VF, so we shouldn't continue.
1178 * Otherwise, set disable VF state bit for actual reset, and continue.
1180 return (test_bit(__ICE_VF_DIS, pf->state) ||
1181 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1185 * ice_reset_vf - Reset a particular VF
1186 * @vf: pointer to the VF structure
1187 * @is_vflr: true if VFLR was issued, false if not
1189 * Returns true if the VF is currently in reset, resets successfully, or resets
1190 * are disabled and false otherwise.
1192 bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1194 struct ice_pf *pf = vf->pf;
1195 struct ice_vsi *vsi;
1203 dev = ice_pf_to_dev(pf);
1205 if (test_bit(__ICE_VF_RESETS_DISABLED, pf->state)) {
1206 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1211 if (ice_is_vf_disabled(vf)) {
1212 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1217 /* Set VF disable bit state here, before triggering reset */
1218 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1219 ice_trigger_vf_reset(vf, is_vflr, false);
1221 vsi = pf->vsi[vf->lan_vsi_idx];
1223 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1226 /* Call Disable LAN Tx queue AQ whether or not queues are
1227 * enabled. This is needed for successful completion of VFR.
1229 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1230 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1233 /* poll VPGEN_VFRSTAT reg to make sure
1234 * that reset is complete
1236 for (i = 0; i < 10; i++) {
1237 /* VF reset requires driver to first reset the VF and then
1238 * poll the status register to make sure that the reset
1239 * completed successfully.
1241 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1242 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1247 /* only sleep if the reset is not done */
1248 usleep_range(10, 20);
1251 /* Display a warning if VF didn't manage to reset in time, but need to
1252 * continue on with the operation.
1255 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1257 /* disable promiscuous modes in case they were enabled
1258 * ignore any error if disabling process failed
1260 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1261 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1262 if (vf->port_vlan_info || vsi->num_vlan)
1263 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1265 promisc_m = ICE_UCAST_PROMISC_BITS;
1267 vsi = pf->vsi[vf->lan_vsi_idx];
1268 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1269 dev_err(dev, "disabling promiscuous mode failed\n");
1272 /* free VF resources to begin resetting the VSI state */
1273 ice_free_vf_res(vf);
1275 ice_cleanup_and_realloc_vf(vf);
1283 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1284 * @pf: pointer to the PF structure
1286 void ice_vc_notify_link_state(struct ice_pf *pf)
1290 ice_for_each_vf(pf, i)
1291 ice_vc_notify_vf_link_state(&pf->vf[i]);
1295 * ice_vc_notify_reset - Send pending reset message to all VFs
1296 * @pf: pointer to the PF structure
1298 * indicate a pending reset to all VFs on a given PF
1300 void ice_vc_notify_reset(struct ice_pf *pf)
1302 struct virtchnl_pf_event pfe;
1304 if (!pf->num_alloc_vfs)
1307 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1308 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1309 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1310 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1314 * ice_vc_notify_vf_reset - Notify VF of a reset event
1315 * @vf: pointer to the VF structure
1317 static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1319 struct virtchnl_pf_event pfe;
1326 if (ice_validate_vf_id(pf, vf->vf_id))
1329 /* Bail out if VF is in disabled state, neither initialized, nor active
1330 * state - otherwise proceed with notifications
1332 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1333 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1334 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1337 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1338 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1339 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1340 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1345 * ice_alloc_vfs - Allocate and set up VFs resources
1346 * @pf: pointer to the PF structure
1347 * @num_alloc_vfs: number of VFs to allocate
1349 static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
1351 struct device *dev = ice_pf_to_dev(pf);
1352 struct ice_hw *hw = &pf->hw;
1356 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1357 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1358 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1359 set_bit(__ICE_OICR_INTR_DIS, pf->state);
1362 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1364 pf->num_alloc_vfs = 0;
1365 goto err_unroll_intr;
1367 /* allocate memory */
1368 vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL);
1371 goto err_pci_disable_sriov;
1374 pf->num_alloc_vfs = num_alloc_vfs;
1376 /* apply default profile */
1377 ice_for_each_vf(pf, i) {
1379 vfs[i].vf_sw_id = pf->first_sw;
1382 /* assign default capabilities */
1383 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1384 vfs[i].spoofchk = true;
1387 /* VF resources get allocated with initialization */
1388 if (!ice_config_res_vfs(pf)) {
1390 goto err_unroll_sriov;
1397 devm_kfree(dev, vfs);
1399 pf->num_alloc_vfs = 0;
1400 err_pci_disable_sriov:
1401 pci_disable_sriov(pf->pdev);
1403 /* rearm interrupts here */
1404 ice_irq_dynamic_ena(hw, NULL, NULL);
1405 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1410 * ice_pf_state_is_nominal - checks the PF for nominal state
1411 * @pf: pointer to PF to check
1413 * Check the PF's state for a collection of bits that would indicate
1414 * the PF is in a state that would inhibit normal operation for
1415 * driver functionality.
1417 * Returns true if PF is in a nominal state.
1418 * Returns false otherwise
1420 static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1422 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1427 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1428 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1435 * ice_pci_sriov_ena - Enable or change number of VFs
1436 * @pf: pointer to the PF structure
1437 * @num_vfs: number of VFs to allocate
1439 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1441 int pre_existing_vfs = pci_num_vf(pf->pdev);
1442 struct device *dev = ice_pf_to_dev(pf);
1445 if (!ice_pf_state_is_nominal(pf)) {
1446 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1450 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1451 dev_err(dev, "This device is not capable of SR-IOV\n");
1455 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1457 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1460 if (num_vfs > pf->num_vfs_supported) {
1461 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1462 num_vfs, pf->num_vfs_supported);
1466 dev_info(dev, "Allocating %d VFs\n", num_vfs);
1467 err = ice_alloc_vfs(pf, num_vfs);
1469 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1473 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1478 * ice_sriov_configure - Enable or change number of VFs via sysfs
1479 * @pdev: pointer to a pci_dev structure
1480 * @num_vfs: number of VFs to allocate
1482 * This function is called when the user updates the number of VFs in sysfs.
1484 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1486 struct ice_pf *pf = pci_get_drvdata(pdev);
1487 struct device *dev = ice_pf_to_dev(pf);
1489 if (ice_is_safe_mode(pf)) {
1490 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1495 return ice_pci_sriov_ena(pf, num_vfs);
1497 if (!pci_vfs_assigned(pdev)) {
1500 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1508 * ice_process_vflr_event - Free VF resources via IRQ calls
1509 * @pf: pointer to the PF structure
1511 * called from the VFLR IRQ handler to
1512 * free up VF resources and state variables
1514 void ice_process_vflr_event(struct ice_pf *pf)
1516 struct ice_hw *hw = &pf->hw;
1520 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1524 ice_for_each_vf(pf, vf_id) {
1525 struct ice_vf *vf = &pf->vf[vf_id];
1526 u32 reg_idx, bit_idx;
1528 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1529 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1530 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1531 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1532 if (reg & BIT(bit_idx))
1533 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1534 ice_reset_vf(vf, true);
1539 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
1540 * @vf: pointer to the VF info
1542 static void ice_vc_reset_vf(struct ice_vf *vf)
1544 ice_vc_notify_vf_reset(vf);
1545 ice_reset_vf(vf, false);
1549 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1550 * @pf: PF used to index all VFs
1551 * @pfq: queue index relative to the PF's function space
1553 * If no VF is found who owns the pfq then return NULL, otherwise return a
1554 * pointer to the VF who owns the pfq
1556 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1560 ice_for_each_vf(pf, vf_id) {
1561 struct ice_vf *vf = &pf->vf[vf_id];
1562 struct ice_vsi *vsi;
1565 vsi = pf->vsi[vf->lan_vsi_idx];
1567 ice_for_each_rxq(vsi, rxq_idx)
1568 if (vsi->rxq_map[rxq_idx] == pfq)
1576 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1577 * @pf: PF used for conversion
1578 * @globalq: global queue index used to convert to PF space queue index
1580 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1582 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1586 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1587 * @pf: PF that the LAN overflow event happened on
1588 * @event: structure holding the event information for the LAN overflow event
1590 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1591 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1592 * reset on the offending VF.
1595 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1597 u32 gldcb_rtctq, queue;
1600 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1601 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1603 /* event returns device global Rx queue number */
1604 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
1605 GLDCB_RTCTQ_RXQNUM_S;
1607 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1611 ice_vc_reset_vf(vf);
1615 * ice_vc_send_msg_to_vf - Send message to VF
1616 * @vf: pointer to the VF info
1617 * @v_opcode: virtual channel opcode
1618 * @v_retval: virtual channel return value
1619 * @msg: pointer to the msg buffer
1620 * @msglen: msg length
1625 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1626 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1628 enum ice_status aq_ret;
1636 if (ice_validate_vf_id(pf, vf->vf_id))
1639 dev = ice_pf_to_dev(pf);
1641 /* single place to detect unsuccessful return values */
1643 vf->num_inval_msgs++;
1644 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1645 v_opcode, v_retval);
1646 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1647 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1649 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1650 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1654 vf->num_valid_msgs++;
1655 /* reset the invalid counter, if a valid message is received. */
1656 vf->num_inval_msgs = 0;
1659 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1661 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1662 dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
1663 vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
1671 * ice_vc_get_ver_msg
1672 * @vf: pointer to the VF info
1673 * @msg: pointer to the msg buffer
1675 * called from the VF to request the API version used by the PF
1677 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1679 struct virtchnl_version_info info = {
1680 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1683 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1684 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1685 if (VF_IS_V10(&vf->vf_ver))
1686 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1688 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1689 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1690 sizeof(struct virtchnl_version_info));
1694 * ice_vc_get_vf_res_msg
1695 * @vf: pointer to the VF info
1696 * @msg: pointer to the msg buffer
1698 * called from the VF to request its resources
1700 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1702 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1703 struct virtchnl_vf_resource *vfres = NULL;
1704 struct ice_pf *pf = vf->pf;
1705 struct ice_vsi *vsi;
1709 if (ice_check_vf_init(pf, vf)) {
1710 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1714 len = sizeof(struct virtchnl_vf_resource);
1716 vfres = kzalloc(len, GFP_KERNEL);
1718 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1722 if (VF_IS_V11(&vf->vf_ver))
1723 vf->driver_caps = *(u32 *)msg;
1725 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1726 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1727 VIRTCHNL_VF_OFFLOAD_VLAN;
1729 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1730 vsi = pf->vsi[vf->lan_vsi_idx];
1732 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1736 if (!vsi->info.pvid)
1737 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1739 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1740 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1742 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1743 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1745 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1748 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1749 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1751 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1752 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1754 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1755 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1757 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1758 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1760 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1761 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1763 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1764 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1766 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1767 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1769 vfres->num_vsis = 1;
1770 /* Tx and Rx queue are equal for VF */
1771 vfres->num_queue_pairs = vsi->num_txq;
1772 vfres->max_vectors = pf->num_msix_per_vf;
1773 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1774 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1776 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1777 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1778 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1779 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1780 vf->dflt_lan_addr.addr);
1782 /* match guest capabilities */
1783 vf->driver_caps = vfres->vf_cap_flags;
1785 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1788 /* send the response back to the VF */
1789 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1797 * ice_vc_reset_vf_msg
1798 * @vf: pointer to the VF info
1800 * called from the VF to reset itself,
1801 * unlike other virtchnl messages, PF driver
1802 * doesn't send the response back to the VF
1804 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1806 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1807 ice_reset_vf(vf, false);
1811 * ice_find_vsi_from_id
1812 * @pf: the PF structure to search for the VSI
1813 * @id: ID of the VSI it is searching for
1815 * searches for the VSI with the given ID
1817 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1821 ice_for_each_vsi(pf, i)
1822 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1829 * ice_vc_isvalid_vsi_id
1830 * @vf: pointer to the VF info
1831 * @vsi_id: VF relative VSI ID
1833 * check for the valid VSI ID
1835 static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1837 struct ice_pf *pf = vf->pf;
1838 struct ice_vsi *vsi;
1840 vsi = ice_find_vsi_from_id(pf, vsi_id);
1842 return (vsi && (vsi->vf_id == vf->vf_id));
1846 * ice_vc_isvalid_q_id
1847 * @vf: pointer to the VF info
1849 * @qid: VSI relative queue ID
1851 * check for the valid queue ID
1853 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
1855 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1856 /* allocated Tx and Rx queues should be always equal for VF VSI */
1857 return (vsi && (qid < vsi->alloc_txq));
1861 * ice_vc_isvalid_ring_len
1862 * @ring_len: length of ring
1864 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
1867 static bool ice_vc_isvalid_ring_len(u16 ring_len)
1869 return ring_len == 0 ||
1870 (ring_len >= ICE_MIN_NUM_DESC &&
1871 ring_len <= ICE_MAX_NUM_DESC &&
1872 !(ring_len % ICE_REQ_DESC_MULTIPLE));
1876 * ice_vc_config_rss_key
1877 * @vf: pointer to the VF info
1878 * @msg: pointer to the msg buffer
1880 * Configure the VF's RSS key
1882 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1884 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1885 struct virtchnl_rss_key *vrk =
1886 (struct virtchnl_rss_key *)msg;
1887 struct ice_pf *pf = vf->pf;
1888 struct ice_vsi *vsi;
1890 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1891 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1895 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
1896 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1900 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
1901 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1905 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1906 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1910 vsi = pf->vsi[vf->lan_vsi_idx];
1912 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1916 if (ice_set_rss(vsi, vrk->key, NULL, 0))
1917 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1919 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1924 * ice_vc_config_rss_lut
1925 * @vf: pointer to the VF info
1926 * @msg: pointer to the msg buffer
1928 * Configure the VF's RSS LUT
1930 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1932 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
1933 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1934 struct ice_pf *pf = vf->pf;
1935 struct ice_vsi *vsi;
1937 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1938 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1942 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
1943 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1947 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
1948 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1952 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1953 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1957 vsi = pf->vsi[vf->lan_vsi_idx];
1959 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1963 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
1964 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1966 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1971 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
1972 * @vf: The VF being resseting
1974 * The max poll time is about ~800ms, which is about the maximum time it takes
1975 * for a VF to be reset and/or a VF driver to be removed.
1977 static void ice_wait_on_vf_reset(struct ice_vf *vf)
1981 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
1982 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
1984 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
1989 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
1990 * @vf: VF to check if it's ready to be configured/queried
1992 * The purpose of this function is to make sure the VF is not in reset, not
1993 * disabled, and initialized so it can be configured and/or queried by a host
1996 static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2000 ice_wait_on_vf_reset(vf);
2002 if (ice_is_vf_disabled(vf))
2006 if (ice_check_vf_init(pf, vf))
2013 * ice_set_vf_spoofchk
2014 * @netdev: network interface device structure
2015 * @vf_id: VF identifier
2016 * @ena: flag to enable or disable feature
2018 * Enable or disable VF spoof checking
2020 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2022 struct ice_netdev_priv *np = netdev_priv(netdev);
2023 struct ice_pf *pf = np->vsi->back;
2024 struct ice_vsi_ctx *ctx;
2025 struct ice_vsi *vf_vsi;
2026 enum ice_status status;
2031 dev = ice_pf_to_dev(pf);
2032 if (ice_validate_vf_id(pf, vf_id))
2035 vf = &pf->vf[vf_id];
2036 ret = ice_check_vf_ready_for_cfg(vf);
2040 vf_vsi = pf->vsi[vf->lan_vsi_idx];
2042 netdev_err(netdev, "VSI %d for VF %d is null\n",
2043 vf->lan_vsi_idx, vf->vf_id);
2047 if (vf_vsi->type != ICE_VSI_VF) {
2048 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2049 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2053 if (ena == vf->spoofchk) {
2054 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2058 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2062 ctx->info.sec_flags = vf_vsi->info.sec_flags;
2063 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2065 ctx->info.sec_flags |=
2066 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2067 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2068 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2070 ctx->info.sec_flags &=
2071 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2072 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2073 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2076 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2078 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d\n",
2079 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
2084 /* only update spoofchk state and VSI context on success */
2085 vf_vsi->info.sec_flags = ctx->info.sec_flags;
2094 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2095 * @pf: PF structure for accessing VF(s)
2097 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2100 bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2104 ice_for_each_vf(pf, vf_idx) {
2105 struct ice_vf *vf = &pf->vf[vf_idx];
2107 /* found a VF that has promiscuous mode configured */
2108 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2109 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2117 * ice_vc_cfg_promiscuous_mode_msg
2118 * @vf: pointer to the VF info
2119 * @msg: pointer to the msg buffer
2121 * called from the VF to configure VF VSIs promiscuous mode
2123 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2125 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2126 struct virtchnl_promisc_info *info =
2127 (struct virtchnl_promisc_info *)msg;
2128 struct ice_pf *pf = vf->pf;
2129 struct ice_vsi *vsi;
2134 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2135 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2139 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2140 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2144 vsi = pf->vsi[vf->lan_vsi_idx];
2146 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2150 dev = ice_pf_to_dev(pf);
2151 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2152 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2154 /* Leave v_ret alone, lie to the VF on purpose. */
2158 rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
2159 !(info->flags & FLAG_VF_MULTICAST_PROMISC);
2161 if (vsi->num_vlan || vf->port_vlan_info) {
2162 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2163 struct net_device *pf_netdev;
2166 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2170 pf_netdev = pf_vsi->netdev;
2172 ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2174 dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
2175 rm_promisc ? "ON" : "OFF", vf->vf_id,
2177 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2180 ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
2182 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
2183 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2188 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
2189 bool set_dflt_vsi = !!(info->flags & FLAG_VF_UNICAST_PROMISC);
2191 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
2192 /* only attempt to set the default forwarding VSI if
2193 * it's not currently set
2195 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
2196 else if (!set_dflt_vsi &&
2197 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
2198 /* only attempt to free the default forwarding VSI if we
2201 ret = ice_clear_dflt_vsi(pf->first_sw);
2204 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
2205 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
2206 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2210 enum ice_status status;
2213 if (info->flags & FLAG_VF_UNICAST_PROMISC) {
2214 if (vf->port_vlan_info || vsi->num_vlan)
2215 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2217 promisc_m = ICE_UCAST_PROMISC_BITS;
2218 } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
2219 if (vf->port_vlan_info || vsi->num_vlan)
2220 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
2222 promisc_m = ICE_MCAST_PROMISC_BITS;
2224 if (vf->port_vlan_info || vsi->num_vlan)
2225 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
2227 promisc_m = ICE_UCAST_PROMISC_BITS;
2230 /* Configure multicast/unicast with or without VLAN promiscuous
2233 status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
2235 dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %d\n",
2236 rm_promisc ? "dis" : "en", vf->vf_id, status);
2237 v_ret = ice_err_to_virt_err(status);
2240 dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
2241 rm_promisc ? "dis" : "en", vf->vf_id);
2245 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2246 set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2248 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
2250 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2251 set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2253 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
2256 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
2261 * ice_vc_get_stats_msg
2262 * @vf: pointer to the VF info
2263 * @msg: pointer to the msg buffer
2265 * called from the VF to get VSI stats
2267 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
2269 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2270 struct virtchnl_queue_select *vqs =
2271 (struct virtchnl_queue_select *)msg;
2272 struct ice_eth_stats stats = { 0 };
2273 struct ice_pf *pf = vf->pf;
2274 struct ice_vsi *vsi;
2276 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2277 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2281 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2282 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2286 vsi = pf->vsi[vf->lan_vsi_idx];
2288 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2292 ice_update_eth_stats(vsi);
2294 stats = vsi->eth_stats;
2297 /* send the response to the VF */
2298 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
2299 (u8 *)&stats, sizeof(stats));
2303 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
2304 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
2306 * Return true on successful validation, else false
2308 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
2310 if ((!vqs->rx_queues && !vqs->tx_queues) ||
2311 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
2312 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
2320 * @vf: pointer to the VF info
2321 * @msg: pointer to the msg buffer
2323 * called from the VF to enable all or specific queue(s)
2325 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2327 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2328 struct virtchnl_queue_select *vqs =
2329 (struct virtchnl_queue_select *)msg;
2330 struct ice_pf *pf = vf->pf;
2331 struct ice_vsi *vsi;
2332 unsigned long q_map;
2335 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2336 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2340 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2341 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2345 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2346 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2350 vsi = pf->vsi[vf->lan_vsi_idx];
2352 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2356 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2357 * Tx queue group list was configured and the context bits were
2358 * programmed using ice_vsi_cfg_txqs
2360 q_map = vqs->rx_queues;
2361 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2362 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2363 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2367 /* Skip queue if enabled */
2368 if (test_bit(vf_q_id, vf->rxq_ena))
2371 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
2372 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
2373 vf_q_id, vsi->vsi_num);
2374 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2378 set_bit(vf_q_id, vf->rxq_ena);
2381 vsi = pf->vsi[vf->lan_vsi_idx];
2382 q_map = vqs->tx_queues;
2383 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2384 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2385 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2389 /* Skip queue if enabled */
2390 if (test_bit(vf_q_id, vf->txq_ena))
2393 set_bit(vf_q_id, vf->txq_ena);
2396 /* Set flag to indicate that queues are enabled */
2397 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
2398 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2401 /* send the response to the VF */
2402 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2408 * @vf: pointer to the VF info
2409 * @msg: pointer to the msg buffer
2411 * called from the VF to disable all or specific
2414 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2416 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2417 struct virtchnl_queue_select *vqs =
2418 (struct virtchnl_queue_select *)msg;
2419 struct ice_pf *pf = vf->pf;
2420 struct ice_vsi *vsi;
2421 unsigned long q_map;
2424 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2425 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2426 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2430 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2431 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2435 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
2436 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2440 vsi = pf->vsi[vf->lan_vsi_idx];
2442 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2446 if (vqs->tx_queues) {
2447 q_map = vqs->tx_queues;
2449 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2450 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2451 struct ice_txq_meta txq_meta = { 0 };
2453 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2454 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2458 /* Skip queue if not enabled */
2459 if (!test_bit(vf_q_id, vf->txq_ena))
2462 ice_fill_txq_meta(vsi, ring, &txq_meta);
2464 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2466 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
2467 vf_q_id, vsi->vsi_num);
2468 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2472 /* Clear enabled queues flag */
2473 clear_bit(vf_q_id, vf->txq_ena);
2477 q_map = vqs->rx_queues;
2478 /* speed up Rx queue disable by batching them if possible */
2480 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
2481 if (ice_vsi_stop_all_rx_rings(vsi)) {
2482 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
2484 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2488 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
2490 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
2491 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2492 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2496 /* Skip queue if not enabled */
2497 if (!test_bit(vf_q_id, vf->rxq_ena))
2500 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
2502 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
2503 vf_q_id, vsi->vsi_num);
2504 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2508 /* Clear enabled queues flag */
2509 clear_bit(vf_q_id, vf->rxq_ena);
2513 /* Clear enabled queues flag */
2514 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
2515 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2518 /* send the response to the VF */
2519 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2525 * @vf: pointer to the VF info
2526 * @vsi: the VSI being configured
2527 * @vector_id: vector ID
2528 * @map: vector map for mapping vectors to queues
2529 * @q_vector: structure for interrupt vector
2530 * configure the IRQ to queue map
2533 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
2534 struct virtchnl_vector_map *map,
2535 struct ice_q_vector *q_vector)
2537 u16 vsi_q_id, vsi_q_id_idx;
2540 q_vector->num_ring_rx = 0;
2541 q_vector->num_ring_tx = 0;
2543 qmap = map->rxq_map;
2544 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2545 vsi_q_id = vsi_q_id_idx;
2547 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2548 return VIRTCHNL_STATUS_ERR_PARAM;
2550 q_vector->num_ring_rx++;
2551 q_vector->rx.itr_idx = map->rxitr_idx;
2552 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2553 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2554 q_vector->rx.itr_idx);
2557 qmap = map->txq_map;
2558 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
2559 vsi_q_id = vsi_q_id_idx;
2561 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
2562 return VIRTCHNL_STATUS_ERR_PARAM;
2564 q_vector->num_ring_tx++;
2565 q_vector->tx.itr_idx = map->txitr_idx;
2566 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2567 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2568 q_vector->tx.itr_idx);
2571 return VIRTCHNL_STATUS_SUCCESS;
2575 * ice_vc_cfg_irq_map_msg
2576 * @vf: pointer to the VF info
2577 * @msg: pointer to the msg buffer
2579 * called from the VF to configure the IRQ to queue map
2581 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2583 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2584 u16 num_q_vectors_mapped, vsi_id, vector_id;
2585 struct virtchnl_irq_map_info *irqmap_info;
2586 struct virtchnl_vector_map *map;
2587 struct ice_pf *pf = vf->pf;
2588 struct ice_vsi *vsi;
2591 irqmap_info = (struct virtchnl_irq_map_info *)msg;
2592 num_q_vectors_mapped = irqmap_info->num_vectors;
2594 /* Check to make sure number of VF vectors mapped is not greater than
2595 * number of VF vectors originally allocated, and check that
2596 * there is actually at least a single VF queue vector mapped
2598 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2599 pf->num_msix_per_vf < num_q_vectors_mapped ||
2600 !num_q_vectors_mapped) {
2601 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2605 vsi = pf->vsi[vf->lan_vsi_idx];
2607 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2611 for (i = 0; i < num_q_vectors_mapped; i++) {
2612 struct ice_q_vector *q_vector;
2614 map = &irqmap_info->vecmap[i];
2616 vector_id = map->vector_id;
2617 vsi_id = map->vsi_id;
2618 /* vector_id is always 0-based for each VF, and can never be
2619 * larger than or equal to the max allowed interrupts per VF
2621 if (!(vector_id < pf->num_msix_per_vf) ||
2622 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2623 (!vector_id && (map->rxq_map || map->txq_map))) {
2624 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2628 /* No need to map VF miscellaneous or rogue vector */
2632 /* Subtract non queue vector from vector_id passed by VF
2633 * to get actual number of VSI queue vector array index
2635 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2637 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2641 /* lookout for the invalid queue index */
2642 v_ret = (enum virtchnl_status_code)
2643 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
2649 /* send the response to the VF */
2650 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2656 * @vf: pointer to the VF info
2657 * @msg: pointer to the msg buffer
2659 * called from the VF to configure the Rx/Tx queues
2661 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2663 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2664 struct virtchnl_vsi_queue_config_info *qci =
2665 (struct virtchnl_vsi_queue_config_info *)msg;
2666 struct virtchnl_queue_pair_info *qpi;
2667 u16 num_rxq = 0, num_txq = 0;
2668 struct ice_pf *pf = vf->pf;
2669 struct ice_vsi *vsi;
2672 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2673 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2677 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2678 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2682 vsi = pf->vsi[vf->lan_vsi_idx];
2684 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2688 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
2689 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2690 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
2691 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2692 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2696 for (i = 0; i < qci->num_queue_pairs; i++) {
2697 qpi = &qci->qpair[i];
2698 if (qpi->txq.vsi_id != qci->vsi_id ||
2699 qpi->rxq.vsi_id != qci->vsi_id ||
2700 qpi->rxq.queue_id != qpi->txq.queue_id ||
2701 qpi->txq.headwb_enabled ||
2702 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2703 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2704 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2705 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2708 /* copy Tx queue info from VF into VSI */
2709 if (qpi->txq.ring_len > 0) {
2711 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2712 vsi->tx_rings[i]->count = qpi->txq.ring_len;
2715 /* copy Rx queue info from VF into VSI */
2716 if (qpi->rxq.ring_len > 0) {
2718 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2719 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2721 if (qpi->rxq.databuffer_size != 0 &&
2722 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2723 qpi->rxq.databuffer_size < 1024)) {
2724 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2727 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2728 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2729 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2730 qpi->rxq.max_pkt_size < 64) {
2731 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2736 vsi->max_frame = qpi->rxq.max_pkt_size;
2739 /* VF can request to configure less than allocated queues
2740 * or default allocated queues. So update the VSI with new number
2742 vsi->num_txq = num_txq;
2743 vsi->num_rxq = num_rxq;
2744 /* All queues of VF VSI are in TC 0 */
2745 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2746 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
2748 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2749 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2752 /* send the response to the VF */
2753 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2759 * @vf: pointer to the VF info
2761 static bool ice_is_vf_trusted(struct ice_vf *vf)
2763 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2767 * ice_can_vf_change_mac
2768 * @vf: pointer to the VF info
2770 * Return true if the VF is allowed to change its MAC filters, false otherwise
2772 static bool ice_can_vf_change_mac(struct ice_vf *vf)
2774 /* If the VF MAC address has been set administratively (via the
2775 * ndo_set_vf_mac command), then deny permission to the VF to
2776 * add/delete unicast MAC addresses, unless the VF is trusted
2778 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
2785 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
2786 * @vf: pointer to the VF info
2787 * @vsi: pointer to the VF's VSI
2788 * @mac_addr: MAC address to add
2791 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
2793 struct device *dev = ice_pf_to_dev(vf->pf);
2794 enum ice_status status;
2796 /* default unicast MAC already added */
2797 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2800 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
2801 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2805 status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
2806 if (status == ICE_ERR_ALREADY_EXISTS) {
2807 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
2810 } else if (status) {
2811 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
2812 mac_addr, vf->vf_id, status);
2816 /* only set dflt_lan_addr once */
2817 if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
2818 is_unicast_ether_addr(mac_addr))
2819 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
2827 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
2828 * @vf: pointer to the VF info
2829 * @vsi: pointer to the VF's VSI
2830 * @mac_addr: MAC address to delete
2833 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
2835 struct device *dev = ice_pf_to_dev(vf->pf);
2836 enum ice_status status;
2838 if (!ice_can_vf_change_mac(vf) &&
2839 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2842 status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
2843 if (status == ICE_ERR_DOES_NOT_EXIST) {
2844 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
2847 } else if (status) {
2848 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
2849 mac_addr, vf->vf_id, status);
2853 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2854 eth_zero_addr(vf->dflt_lan_addr.addr);
2862 * ice_vc_handle_mac_addr_msg
2863 * @vf: pointer to the VF info
2864 * @msg: pointer to the msg buffer
2865 * @set: true if MAC filters are being set, false otherwise
2867 * add guest MAC address filter
2870 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
2872 int (*ice_vc_cfg_mac)
2873 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
2874 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2875 struct virtchnl_ether_addr_list *al =
2876 (struct virtchnl_ether_addr_list *)msg;
2877 struct ice_pf *pf = vf->pf;
2878 enum virtchnl_ops vc_op;
2879 struct ice_vsi *vsi;
2883 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
2884 ice_vc_cfg_mac = ice_vc_add_mac_addr;
2886 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
2887 ice_vc_cfg_mac = ice_vc_del_mac_addr;
2890 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2891 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2892 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2893 goto handle_mac_exit;
2896 /* If this VF is not privileged, then we can't add more than a
2897 * limited number of addresses. Check to make sure that the
2898 * additions do not push us over the limit.
2900 if (set && !ice_is_vf_trusted(vf) &&
2901 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
2902 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
2904 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2905 goto handle_mac_exit;
2908 vsi = pf->vsi[vf->lan_vsi_idx];
2910 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2911 goto handle_mac_exit;
2914 for (i = 0; i < al->num_elements; i++) {
2915 u8 *mac_addr = al->list[i].addr;
2918 if (is_broadcast_ether_addr(mac_addr) ||
2919 is_zero_ether_addr(mac_addr))
2922 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
2923 if (result == -EEXIST || result == -ENOENT) {
2925 } else if (result) {
2926 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2927 goto handle_mac_exit;
2932 /* send the response to the VF */
2933 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2937 * ice_vc_add_mac_addr_msg
2938 * @vf: pointer to the VF info
2939 * @msg: pointer to the msg buffer
2941 * add guest MAC address filter
2943 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2945 return ice_vc_handle_mac_addr_msg(vf, msg, true);
2949 * ice_vc_del_mac_addr_msg
2950 * @vf: pointer to the VF info
2951 * @msg: pointer to the msg buffer
2953 * remove guest MAC address filter
2955 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2957 return ice_vc_handle_mac_addr_msg(vf, msg, false);
2961 * ice_vc_request_qs_msg
2962 * @vf: pointer to the VF info
2963 * @msg: pointer to the msg buffer
2965 * VFs get a default number of queues but can use this message to request a
2966 * different number. If the request is successful, PF will reset the VF and
2967 * return 0. If unsuccessful, PF will send message informing VF of number of
2968 * available queue pairs via virtchnl message response to VF.
2970 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2972 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2973 struct virtchnl_vf_res_request *vfres =
2974 (struct virtchnl_vf_res_request *)msg;
2975 u16 req_queues = vfres->num_queue_pairs;
2976 struct ice_pf *pf = vf->pf;
2977 u16 max_allowed_vf_queues;
2978 u16 tx_rx_queue_left;
2982 dev = ice_pf_to_dev(pf);
2983 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2984 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2988 cur_queues = vf->num_vf_qs;
2989 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2990 ice_get_avail_rxq_count(pf));
2991 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2993 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
2995 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
2996 dev_err(dev, "VF %d tried to request more than %d queues.\n",
2997 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
2998 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
2999 } else if (req_queues > cur_queues &&
3000 req_queues - cur_queues > tx_rx_queue_left) {
3001 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
3002 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
3003 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
3004 ICE_MAX_RSS_QS_PER_VF);
3006 /* request is successful, then reset VF */
3007 vf->num_req_qs = req_queues;
3008 ice_vc_reset_vf(vf);
3009 dev_info(dev, "VF %d granted request of %u queues.\n",
3010 vf->vf_id, req_queues);
3015 /* send the response to the VF */
3016 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
3017 v_ret, (u8 *)vfres, sizeof(*vfres));
3021 * ice_set_vf_port_vlan
3022 * @netdev: network interface device structure
3023 * @vf_id: VF identifier
3024 * @vlan_id: VLAN ID being set
3025 * @qos: priority setting
3026 * @vlan_proto: VLAN protocol
3028 * program VF Port VLAN ID and/or QoS
3031 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3034 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3035 struct ice_vsi *vsi;
3041 dev = ice_pf_to_dev(pf);
3042 if (ice_validate_vf_id(pf, vf_id))
3045 if (vlan_id >= VLAN_N_VID || qos > 7) {
3046 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3047 vf_id, vlan_id, qos);
3051 if (vlan_proto != htons(ETH_P_8021Q)) {
3052 dev_err(dev, "VF VLAN protocol is not supported\n");
3053 return -EPROTONOSUPPORT;
3056 vf = &pf->vf[vf_id];
3057 vsi = pf->vsi[vf->lan_vsi_idx];
3059 ret = ice_check_vf_ready_for_cfg(vf);
3063 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3065 if (vf->port_vlan_info == vlanprio) {
3066 /* duplicate request, so just return success */
3067 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
3071 if (vlan_id || qos) {
3072 /* remove VLAN 0 filter set by default when transitioning from
3073 * no port VLAN to a port VLAN. No change to old port VLAN on
3076 ret = ice_vsi_kill_vlan(vsi, 0);
3079 ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
3083 /* add VLAN 0 filter back when transitioning from port VLAN to
3084 * no port VLAN. No change to old port VLAN on failure.
3086 ret = ice_vsi_add_vlan(vsi, 0);
3089 ret = ice_vsi_manage_pvid(vsi, 0, false);
3095 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
3096 vlan_id, qos, vf_id);
3098 /* add VLAN filter for the port VLAN */
3099 ret = ice_vsi_add_vlan(vsi, vlan_id);
3103 /* remove old port VLAN filter with valid VLAN ID or QoS fields */
3104 if (vf->port_vlan_info)
3105 ice_vsi_kill_vlan(vsi, vf->port_vlan_info & VLAN_VID_MASK);
3107 /* keep port VLAN information persistent on resets */
3108 vf->port_vlan_info = le16_to_cpu(vsi->info.pvid);
3114 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3115 * @caps: VF driver negotiated capabilities
3117 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3119 static bool ice_vf_vlan_offload_ena(u32 caps)
3121 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3125 * ice_vc_process_vlan_msg
3126 * @vf: pointer to the VF info
3127 * @msg: pointer to the msg buffer
3128 * @add_v: Add VLAN if true, otherwise delete VLAN
3130 * Process virtchnl op to add or remove programmed guest VLAN ID
3132 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3134 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3135 struct virtchnl_vlan_filter_list *vfl =
3136 (struct virtchnl_vlan_filter_list *)msg;
3137 struct ice_pf *pf = vf->pf;
3138 bool vlan_promisc = false;
3139 struct ice_vsi *vsi;
3146 dev = ice_pf_to_dev(pf);
3147 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3148 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3152 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3153 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3157 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
3158 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3162 for (i = 0; i < vfl->num_elements; i++) {
3163 if (vfl->vlan_id[i] >= VLAN_N_VID) {
3164 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3165 dev_err(dev, "invalid VF VLAN id %d\n",
3172 vsi = pf->vsi[vf->lan_vsi_idx];
3174 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3178 if (add_v && !ice_is_vf_trusted(vf) &&
3179 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3180 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3182 /* There is no need to let VF know about being not trusted,
3183 * so we can just return success message here
3188 if (vsi->info.pvid) {
3189 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3193 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
3194 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
3195 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
3196 vlan_promisc = true;
3199 for (i = 0; i < vfl->num_elements; i++) {
3200 u16 vid = vfl->vlan_id[i];
3202 if (!ice_is_vf_trusted(vf) &&
3203 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
3204 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
3206 /* There is no need to let VF know about being
3207 * not trusted, so we can just return success
3208 * message here as well.
3213 /* we add VLAN 0 by default for each VF so we can enable
3214 * Tx VLAN anti-spoof without triggering MDD events so
3215 * we don't need to add it again here
3220 status = ice_vsi_add_vlan(vsi, vid);
3222 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3226 /* Enable VLAN pruning when non-zero VLAN is added */
3227 if (!vlan_promisc && vid &&
3228 !ice_vsi_is_vlan_pruning_ena(vsi)) {
3229 status = ice_cfg_vlan_pruning(vsi, true, false);
3231 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3232 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
3236 } else if (vlan_promisc) {
3237 /* Enable Ucast/Mcast VLAN promiscuous mode */
3238 promisc_m = ICE_PROMISC_VLAN_TX |
3239 ICE_PROMISC_VLAN_RX;
3241 status = ice_set_vsi_promisc(hw, vsi->idx,
3244 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3245 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
3251 /* In case of non_trusted VF, number of VLAN elements passed
3252 * to PF for removal might be greater than number of VLANs
3253 * filter programmed for that VF - So, use actual number of
3254 * VLANS added earlier with add VLAN opcode. In order to avoid
3255 * removing VLAN that doesn't exist, which result to sending
3256 * erroneous failed message back to the VF
3260 num_vf_vlan = vsi->num_vlan;
3261 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
3262 u16 vid = vfl->vlan_id[i];
3264 /* we add VLAN 0 by default for each VF so we can enable
3265 * Tx VLAN anti-spoof without triggering MDD events so
3266 * we don't want a VIRTCHNL request to remove it
3271 /* Make sure ice_vsi_kill_vlan is successful before
3272 * updating VLAN information
3274 status = ice_vsi_kill_vlan(vsi, vid);
3276 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3280 /* Disable VLAN pruning when only VLAN 0 is left */
3281 if (vsi->num_vlan == 1 &&
3282 ice_vsi_is_vlan_pruning_ena(vsi))
3283 ice_cfg_vlan_pruning(vsi, false, false);
3285 /* Disable Unicast/Multicast VLAN promiscuous mode */
3287 promisc_m = ICE_PROMISC_VLAN_TX |
3288 ICE_PROMISC_VLAN_RX;
3290 ice_clear_vsi_promisc(hw, vsi->idx,
3297 /* send the response to the VF */
3299 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
3302 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
3307 * ice_vc_add_vlan_msg
3308 * @vf: pointer to the VF info
3309 * @msg: pointer to the msg buffer
3311 * Add and program guest VLAN ID
3313 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
3315 return ice_vc_process_vlan_msg(vf, msg, true);
3319 * ice_vc_remove_vlan_msg
3320 * @vf: pointer to the VF info
3321 * @msg: pointer to the msg buffer
3323 * remove programmed guest VLAN ID
3325 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
3327 return ice_vc_process_vlan_msg(vf, msg, false);
3331 * ice_vc_ena_vlan_stripping
3332 * @vf: pointer to the VF info
3334 * Enable VLAN header stripping for a given VF
3336 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3338 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3339 struct ice_pf *pf = vf->pf;
3340 struct ice_vsi *vsi;
3342 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3343 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3347 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3348 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3352 vsi = pf->vsi[vf->lan_vsi_idx];
3353 if (ice_vsi_manage_vlan_stripping(vsi, true))
3354 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3357 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3362 * ice_vc_dis_vlan_stripping
3363 * @vf: pointer to the VF info
3365 * Disable VLAN header stripping for a given VF
3367 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3369 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3370 struct ice_pf *pf = vf->pf;
3371 struct ice_vsi *vsi;
3373 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3374 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3378 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3379 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3383 vsi = pf->vsi[vf->lan_vsi_idx];
3385 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3389 if (ice_vsi_manage_vlan_stripping(vsi, false))
3390 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3393 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3398 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3399 * @vf: VF to enable/disable VLAN stripping for on initialization
3401 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3402 * the flag is cleared then we want to disable stripping. For example, the flag
3403 * will be cleared when port VLANs are configured by the administrator before
3404 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3407 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3409 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3414 /* don't modify stripping if port VLAN is configured */
3418 if (ice_vf_vlan_offload_ena(vf->driver_caps))
3419 return ice_vsi_manage_vlan_stripping(vsi, true);
3421 return ice_vsi_manage_vlan_stripping(vsi, false);
3425 * ice_vc_process_vf_msg - Process request from VF
3426 * @pf: pointer to the PF structure
3427 * @event: pointer to the AQ event
3429 * called from the common asq/arq handler to
3430 * process request from VF
3432 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3434 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3435 s16 vf_id = le16_to_cpu(event->desc.retval);
3436 u16 msglen = event->msg_len;
3437 u8 *msg = event->msg_buf;
3438 struct ice_vf *vf = NULL;
3442 dev = ice_pf_to_dev(pf);
3443 if (ice_validate_vf_id(pf, vf_id)) {
3448 vf = &pf->vf[vf_id];
3450 /* Check if VF is disabled. */
3451 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3456 /* Perform basic checks on the msg */
3457 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3459 if (err == VIRTCHNL_STATUS_ERR_PARAM)
3467 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3469 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3470 vf_id, v_opcode, msglen, err);
3475 case VIRTCHNL_OP_VERSION:
3476 err = ice_vc_get_ver_msg(vf, msg);
3478 case VIRTCHNL_OP_GET_VF_RESOURCES:
3479 err = ice_vc_get_vf_res_msg(vf, msg);
3480 if (ice_vf_init_vlan_stripping(vf))
3481 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
3483 ice_vc_notify_vf_link_state(vf);
3485 case VIRTCHNL_OP_RESET_VF:
3486 ice_vc_reset_vf_msg(vf);
3488 case VIRTCHNL_OP_ADD_ETH_ADDR:
3489 err = ice_vc_add_mac_addr_msg(vf, msg);
3491 case VIRTCHNL_OP_DEL_ETH_ADDR:
3492 err = ice_vc_del_mac_addr_msg(vf, msg);
3494 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3495 err = ice_vc_cfg_qs_msg(vf, msg);
3497 case VIRTCHNL_OP_ENABLE_QUEUES:
3498 err = ice_vc_ena_qs_msg(vf, msg);
3499 ice_vc_notify_vf_link_state(vf);
3501 case VIRTCHNL_OP_DISABLE_QUEUES:
3502 err = ice_vc_dis_qs_msg(vf, msg);
3504 case VIRTCHNL_OP_REQUEST_QUEUES:
3505 err = ice_vc_request_qs_msg(vf, msg);
3507 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3508 err = ice_vc_cfg_irq_map_msg(vf, msg);
3510 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3511 err = ice_vc_config_rss_key(vf, msg);
3513 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3514 err = ice_vc_config_rss_lut(vf, msg);
3516 case VIRTCHNL_OP_GET_STATS:
3517 err = ice_vc_get_stats_msg(vf, msg);
3519 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
3520 err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
3522 case VIRTCHNL_OP_ADD_VLAN:
3523 err = ice_vc_add_vlan_msg(vf, msg);
3525 case VIRTCHNL_OP_DEL_VLAN:
3526 err = ice_vc_remove_vlan_msg(vf, msg);
3528 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3529 err = ice_vc_ena_vlan_stripping(vf);
3531 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3532 err = ice_vc_dis_vlan_stripping(vf);
3534 case VIRTCHNL_OP_UNKNOWN:
3536 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3538 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3539 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3544 /* Helper function cares less about error return values here
3545 * as it is busy with pending work.
3547 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3548 vf_id, v_opcode, err);
3554 * @netdev: network interface device structure
3555 * @vf_id: VF identifier
3556 * @ivi: VF configuration structure
3558 * return VF configuration
3561 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3563 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3566 if (ice_validate_vf_id(pf, vf_id))
3569 vf = &pf->vf[vf_id];
3571 if (ice_check_vf_init(pf, vf))
3575 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3577 /* VF configuration for VLAN and applicable QoS */
3578 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
3579 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
3581 ivi->trusted = vf->trusted;
3582 ivi->spoofchk = vf->spoofchk;
3583 if (!vf->link_forced)
3584 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3585 else if (vf->link_up)
3586 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3588 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3589 ivi->max_tx_rate = vf->tx_rate;
3590 ivi->min_tx_rate = 0;
3596 * @netdev: network interface device structure
3597 * @vf_id: VF identifier
3600 * program VF MAC address
3602 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3604 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3608 if (ice_validate_vf_id(pf, vf_id))
3611 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
3612 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3616 vf = &pf->vf[vf_id];
3617 ret = ice_check_vf_ready_for_cfg(vf);
3621 /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
3622 * flow will use the updated dflt_lan_addr and add a MAC filter
3623 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
3624 * set the MAC address for this VF.
3626 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3627 vf->pf_set_mac = true;
3628 netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
3631 ice_vc_reset_vf(vf);
3637 * @netdev: network interface device structure
3638 * @vf_id: VF identifier
3639 * @trusted: Boolean value to enable/disable trusted VF
3641 * Enable or disable a given VF as trusted
3643 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3645 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3649 if (ice_validate_vf_id(pf, vf_id))
3652 vf = &pf->vf[vf_id];
3653 ret = ice_check_vf_ready_for_cfg(vf);
3657 /* Check if already trusted */
3658 if (trusted == vf->trusted)
3661 vf->trusted = trusted;
3662 ice_vc_reset_vf(vf);
3663 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
3664 vf_id, trusted ? "" : "un");
3670 * ice_set_vf_link_state
3671 * @netdev: network interface device structure
3672 * @vf_id: VF identifier
3673 * @link_state: required link state
3675 * Set VF's link state, irrespective of physical link state status
3677 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3679 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3683 if (ice_validate_vf_id(pf, vf_id))
3686 vf = &pf->vf[vf_id];
3687 ret = ice_check_vf_ready_for_cfg(vf);
3691 switch (link_state) {
3692 case IFLA_VF_LINK_STATE_AUTO:
3693 vf->link_forced = false;
3695 case IFLA_VF_LINK_STATE_ENABLE:
3696 vf->link_forced = true;
3699 case IFLA_VF_LINK_STATE_DISABLE:
3700 vf->link_forced = true;
3701 vf->link_up = false;
3707 ice_vc_notify_vf_link_state(vf);
3713 * ice_get_vf_stats - populate some stats for the VF
3714 * @netdev: the netdev of the PF
3715 * @vf_id: the host OS identifier (0-255)
3716 * @vf_stats: pointer to the OS memory to be initialized
3718 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3719 struct ifla_vf_stats *vf_stats)
3721 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3722 struct ice_eth_stats *stats;
3723 struct ice_vsi *vsi;
3727 if (ice_validate_vf_id(pf, vf_id))
3730 vf = &pf->vf[vf_id];
3731 ret = ice_check_vf_ready_for_cfg(vf);
3735 vsi = pf->vsi[vf->lan_vsi_idx];
3739 ice_update_eth_stats(vsi);
3740 stats = &vsi->eth_stats;
3742 memset(vf_stats, 0, sizeof(*vf_stats));
3744 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
3745 stats->rx_multicast;
3746 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
3747 stats->tx_multicast;
3748 vf_stats->rx_bytes = stats->rx_bytes;
3749 vf_stats->tx_bytes = stats->tx_bytes;
3750 vf_stats->broadcast = stats->rx_broadcast;
3751 vf_stats->multicast = stats->rx_multicast;
3752 vf_stats->rx_dropped = stats->rx_discards;
3753 vf_stats->tx_dropped = stats->tx_discards;
3759 * ice_print_vfs_mdd_event - print VFs malicious driver detect event
3760 * @pf: pointer to the PF structure
3762 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
3764 void ice_print_vfs_mdd_events(struct ice_pf *pf)
3766 struct device *dev = ice_pf_to_dev(pf);
3767 struct ice_hw *hw = &pf->hw;
3770 /* check that there are pending MDD events to print */
3771 if (!test_and_clear_bit(__ICE_MDD_VF_PRINT_PENDING, pf->state))
3774 /* VF MDD event logs are rate limited to one second intervals */
3775 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
3778 pf->last_printed_mdd_jiffies = jiffies;
3780 ice_for_each_vf(pf, i) {
3781 struct ice_vf *vf = &pf->vf[i];
3783 /* only print Rx MDD event message if there are new events */
3784 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
3785 vf->mdd_rx_events.last_printed =
3786 vf->mdd_rx_events.count;
3788 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
3789 vf->mdd_rx_events.count, hw->pf_id, i,
3790 vf->dflt_lan_addr.addr,
3791 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
3795 /* only print Tx MDD event message if there are new events */
3796 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
3797 vf->mdd_tx_events.last_printed =
3798 vf->mdd_tx_events.count;
3800 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
3801 vf->mdd_tx_events.count, hw->pf_id, i,
3802 vf->dflt_lan_addr.addr);