1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
9 * ice_validate_vf_id - helper to check if VF ID is valid
10 * @pf: pointer to the PF structure
11 * @vf_id: the ID of the VF to check
13 static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
15 if (vf_id >= pf->num_alloc_vfs) {
16 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id);
23 * ice_check_vf_init - helper to check if VF init complete
24 * @pf: pointer to the PF structure
25 * @vf: the pointer to the VF to check
27 static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
29 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
30 dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n",
38 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
39 * @pf: pointer to the PF structure
40 * @v_opcode: operation code
41 * @v_retval: return value
42 * @msg: pointer to the msg buffer
46 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
47 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
49 struct ice_hw *hw = &pf->hw;
52 ice_for_each_vf(pf, i) {
53 struct ice_vf *vf = &pf->vf[i];
55 /* Not all vfs are enabled so skip the ones that are not */
56 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
57 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
60 /* Ignore return value on purpose - a given VF may fail, but
61 * we need to keep going and send to all of them
63 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
69 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
70 * @vf: pointer to the VF structure
71 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
72 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
73 * @link_up: whether or not to set the link up/down
76 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
77 int ice_link_speed, bool link_up)
79 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
80 pfe->event_data.link_event_adv.link_status = link_up;
82 pfe->event_data.link_event_adv.link_speed =
83 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
85 pfe->event_data.link_event.link_status = link_up;
86 /* Legacy method for virtchnl link speeds */
87 pfe->event_data.link_event.link_speed =
88 (enum virtchnl_link_speed)
89 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
94 * ice_vc_notify_vf_link_state - Inform a VF of link status
95 * @vf: pointer to the VF structure
97 * send a link status message to a single VF
99 static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
101 struct virtchnl_pf_event pfe = { 0 };
102 struct ice_link_status *ls;
103 struct ice_pf *pf = vf->pf;
107 ls = &hw->port_info->phy.link_info;
109 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
110 pfe.severity = PF_EVENT_SEVERITY_INFO;
112 /* Always report link is down if the VF queues aren't enabled */
113 if (!vf->num_qs_ena) {
114 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
115 } else if (vf->link_forced) {
116 u16 link_speed = vf->link_up ?
117 ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN;
119 ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up);
121 ice_set_pfe_link(vf, &pfe, ls->link_speed,
122 ls->link_info & ICE_AQ_LINK_UP);
125 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
126 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
131 * ice_free_vf_res - Free a VF's resources
132 * @vf: pointer to the VF info
134 static void ice_free_vf_res(struct ice_vf *vf)
136 struct ice_pf *pf = vf->pf;
137 int i, last_vector_idx;
139 /* First, disable VF's configuration API to prevent OS from
140 * accessing the VF's VSI after it's freed or invalidated.
142 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
144 /* free VSI and disconnect it from the parent uplink */
145 if (vf->lan_vsi_idx) {
146 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
152 last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
153 /* Disable interrupts so that VF starts in a known state */
154 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
155 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
158 /* reset some of the state variables keeping track of the resources */
159 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
160 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
164 * ice_dis_vf_mappings
165 * @vf: pointer to the VF structure
167 static void ice_dis_vf_mappings(struct ice_vf *vf)
169 struct ice_pf *pf = vf->pf;
176 vsi = pf->vsi[vf->lan_vsi_idx];
178 dev = ice_pf_to_dev(pf);
179 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
180 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
182 first = vf->first_vector_idx;
183 last = first + pf->num_vf_msix - 1;
184 for (v = first; v <= last; v++) {
187 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
188 GLINT_VECT2FUNC_IS_PF_M) |
189 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
190 GLINT_VECT2FUNC_PF_NUM_M));
191 wr32(hw, GLINT_VECT2FUNC(v), reg);
194 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
195 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
197 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
199 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
200 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
203 "Scattered mode for VF Rx queues is not yet implemented\n");
207 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
208 * @pf: pointer to the PF structure
210 * If MSIX entries from the pf->irq_tracker were needed then we need to
211 * reset the irq_tracker->end and give back the entries we needed to
214 * If no MSIX entries were taken from the pf->irq_tracker then just clear
215 * the pf->sriov_base_vector.
217 * Returns 0 on success, and -EINVAL on error.
219 static int ice_sriov_free_msix_res(struct ice_pf *pf)
221 struct ice_res_tracker *res;
226 res = pf->irq_tracker;
230 /* give back irq_tracker resources used */
231 if (pf->sriov_base_vector < res->num_entries) {
232 res->end = res->num_entries;
233 pf->num_avail_sw_msix +=
234 res->num_entries - pf->sriov_base_vector;
237 pf->sriov_base_vector = 0;
243 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
244 * @vf: pointer to the VF structure
246 void ice_set_vf_state_qs_dis(struct ice_vf *vf)
248 /* Clear Rx/Tx enabled queues flag */
249 bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
250 bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
252 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
256 * ice_dis_vf_qs - Disable the VF queues
257 * @vf: pointer to the VF structure
259 static void ice_dis_vf_qs(struct ice_vf *vf)
261 struct ice_pf *pf = vf->pf;
264 vsi = pf->vsi[vf->lan_vsi_idx];
266 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
267 ice_vsi_stop_rx_rings(vsi);
268 ice_set_vf_state_qs_dis(vf);
272 * ice_free_vfs - Free all VFs
273 * @pf: pointer to the PF structure
275 void ice_free_vfs(struct ice_pf *pf)
277 struct device *dev = ice_pf_to_dev(pf);
278 struct ice_hw *hw = &pf->hw;
284 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
285 usleep_range(1000, 2000);
287 /* Avoid wait time by stopping all VFs at the same time */
288 ice_for_each_vf(pf, i)
289 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
290 ice_dis_vf_qs(&pf->vf[i]);
292 /* Disable IOV before freeing resources. This lets any VF drivers
293 * running in the host get themselves cleaned up before we yank
294 * the carpet out from underneath their feet.
296 if (!pci_vfs_assigned(pf->pdev))
297 pci_disable_sriov(pf->pdev);
299 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
301 tmp = pf->num_alloc_vfs;
303 pf->num_alloc_vfs = 0;
304 for (i = 0; i < tmp; i++) {
305 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
306 /* disable VF qp mappings and set VF disable state */
307 ice_dis_vf_mappings(&pf->vf[i]);
308 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
309 ice_free_vf_res(&pf->vf[i]);
313 if (ice_sriov_free_msix_res(pf))
314 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
316 devm_kfree(dev, pf->vf);
319 /* This check is for when the driver is unloaded while VFs are
320 * assigned. Setting the number of VFs to 0 through sysfs is caught
321 * before this function ever gets called.
323 if (!pci_vfs_assigned(pf->pdev)) {
326 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
327 * work correctly when SR-IOV gets re-enabled.
329 for (vf_id = 0; vf_id < tmp; vf_id++) {
330 u32 reg_idx, bit_idx;
332 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
333 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
334 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
337 clear_bit(__ICE_VF_DIS, pf->state);
338 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
342 * ice_trigger_vf_reset - Reset a VF on HW
343 * @vf: pointer to the VF structure
344 * @is_vflr: true if VFLR was issued, false if not
345 * @is_pfr: true if the reset was triggered due to a previous PFR
347 * Trigger hardware to start a reset for a particular VF. Expects the caller
348 * to wait the proper amount of time to allow hardware to reset the VF before
349 * it cleans up and restores VF functionality.
351 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
353 struct ice_pf *pf = vf->pf;
354 u32 reg, reg_idx, bit_idx;
359 dev = ice_pf_to_dev(pf);
361 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
363 /* Inform VF that it is no longer active, as a warning */
364 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
366 /* Disable VF's configuration API during reset. The flag is re-enabled
367 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
368 * It's normally disabled in ice_free_vf_res(), but it's safer
369 * to do it earlier to give some time to finish to any VF config
370 * functions that may still be running at this point.
372 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
374 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
375 * in the case of VFR. If this is done for PFR, it can mess up VF
376 * resets because the VF driver may already have started cleanup
377 * by the time we get here.
380 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
382 /* In the case of a VFLR, the HW has already reset the VF and we
383 * just need to clean up, so don't hit the VFRTRIG register.
386 /* reset VF using VPGEN_VFRTRIG reg */
387 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
388 reg |= VPGEN_VFRTRIG_VFSWR_M;
389 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
391 /* clear the VFLR bit in GLGEN_VFLRSTAT */
392 reg_idx = (vf_abs_id) / 32;
393 bit_idx = (vf_abs_id) % 32;
394 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
397 wr32(hw, PF_PCI_CIAA,
398 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
399 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
400 reg = rd32(hw, PF_PCI_CIAD);
401 /* no transactions pending so stop polling */
402 if ((reg & VF_TRANS_PENDING_M) == 0)
406 "VF %d PCI transactions stuck\n", vf->vf_id);
407 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
412 * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
413 * @ctxt: the VSI ctxt to fill
414 * @vid: the VLAN ID to set as a PVID
416 static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
418 ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
419 ICE_AQ_VSI_PVLAN_INSERT_PVID |
420 ICE_AQ_VSI_VLAN_EMOD_STR);
421 ctxt->info.pvid = cpu_to_le16(vid);
422 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
423 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
424 ICE_AQ_VSI_PROP_SW_VALID);
428 * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
429 * @ctxt: the VSI ctxt to fill
431 static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
433 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
434 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
435 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
436 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
437 ICE_AQ_VSI_PROP_SW_VALID);
441 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
442 * @vsi: the VSI to update
443 * @vid: the VLAN ID to set as a PVID
444 * @enable: true for enable PVID false for disable
446 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
448 struct ice_hw *hw = &vsi->back->hw;
449 struct ice_vsi_ctx *ctxt;
450 enum ice_status status;
453 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
457 ctxt->info = vsi->info;
459 ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
461 ice_vsi_kill_pvid_fill_ctxt(ctxt);
463 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
465 dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
466 status, hw->adminq.sq_last_status);
471 vsi->info = ctxt->info;
478 * ice_vf_vsi_setup - Set up a VF VSI
479 * @pf: board private structure
480 * @pi: pointer to the port_info instance
481 * @vf_id: defines VF ID to which this VSI connects.
483 * Returns pointer to the successfully allocated VSI struct on success,
484 * otherwise returns NULL on failure.
486 static struct ice_vsi *
487 ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
489 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
493 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
494 * @pf: pointer to PF structure
495 * @vf: pointer to VF that the first MSIX vector index is being calculated for
497 * This returns the first MSIX vector index in PF space that is used by this VF.
498 * This index is used when accessing PF relative registers such as
499 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
500 * This will always be the OICR index in the AVF driver so any functionality
501 * using vf->first_vector_idx for queue configuration will have to increment by
502 * 1 to avoid meddling with the OICR index.
504 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
506 return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
510 * ice_alloc_vsi_res - Setup VF VSI and its resources
511 * @vf: pointer to the VF structure
513 * Returns 0 on success, negative value on failure
515 static int ice_alloc_vsi_res(struct ice_vf *vf)
517 struct ice_pf *pf = vf->pf;
518 LIST_HEAD(tmp_add_list);
519 u8 broadcast[ETH_ALEN];
524 dev = ice_pf_to_dev(pf);
525 /* first vector index is the VFs OICR index */
526 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
528 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
530 dev_err(dev, "Failed to create VF VSI\n");
534 vf->lan_vsi_idx = vsi->idx;
535 vf->lan_vsi_num = vsi->vsi_num;
537 /* Check if port VLAN exist before, and restore it accordingly */
538 if (vf->port_vlan_id) {
539 ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
540 ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
543 eth_broadcast_addr(broadcast);
545 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
547 goto ice_alloc_vsi_res_exit;
549 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
550 status = ice_add_mac_to_list(vsi, &tmp_add_list,
551 vf->dflt_lan_addr.addr);
553 goto ice_alloc_vsi_res_exit;
556 status = ice_add_mac(&pf->hw, &tmp_add_list);
558 dev_err(dev, "could not add mac filters error %d\n", status);
562 /* Clear this bit after VF initialization since we shouldn't reclaim
563 * and reassign interrupts for synchronous or asynchronous VFR events.
564 * We don't want to reconfigure interrupts since AVF driver doesn't
565 * expect vector assignment to be changed unless there is a request for
568 ice_alloc_vsi_res_exit:
569 ice_free_fltr_list(dev, &tmp_add_list);
574 * ice_alloc_vf_res - Allocate VF resources
575 * @vf: pointer to the VF structure
577 static int ice_alloc_vf_res(struct ice_vf *vf)
579 struct ice_pf *pf = vf->pf;
580 int tx_rx_queue_left;
583 /* Update number of VF queues, in case VF had requested for queue
586 tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
587 ice_get_avail_rxq_count(pf));
588 tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
589 if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
590 vf->num_req_qs != vf->num_vf_qs)
591 vf->num_vf_qs = vf->num_req_qs;
593 /* setup VF VSI and necessary resources */
594 status = ice_alloc_vsi_res(vf);
596 goto ice_alloc_vf_res_exit;
599 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
601 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
603 /* VF is now completely initialized */
604 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
608 ice_alloc_vf_res_exit:
614 * ice_ena_vf_mappings
615 * @vf: pointer to the VF structure
617 * Enable VF vectors and queues allocation by writing the details into
618 * respective registers.
620 static void ice_ena_vf_mappings(struct ice_vf *vf)
622 int abs_vf_id, abs_first, abs_last;
623 struct ice_pf *pf = vf->pf;
630 dev = ice_pf_to_dev(pf);
632 vsi = pf->vsi[vf->lan_vsi_idx];
633 first = vf->first_vector_idx;
634 last = (first + pf->num_vf_msix) - 1;
635 abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
636 abs_last = (abs_first + pf->num_vf_msix) - 1;
637 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
639 /* VF Vector allocation */
640 reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
641 ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
642 VPINT_ALLOC_VALID_M);
643 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
645 reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
646 & VPINT_ALLOC_PCI_FIRST_M) |
647 ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
648 VPINT_ALLOC_PCI_VALID_M);
649 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
650 /* map the interrupts to its functions */
651 for (v = first; v <= last; v++) {
652 reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
653 GLINT_VECT2FUNC_VF_NUM_M) |
654 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
655 GLINT_VECT2FUNC_PF_NUM_M));
656 wr32(hw, GLINT_VECT2FUNC(v), reg);
659 /* Map mailbox interrupt. We put an explicit 0 here to remind us that
660 * VF admin queue interrupts will go to VF MSI-X vector 0.
662 wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
663 /* set regardless of mapping mode */
664 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
666 /* VF Tx queues allocation */
667 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
668 /* set the VF PF Tx queue range
669 * VFNUMQ value should be set to (number of queues - 1). A value
670 * of 0 means 1 queue and a value of 255 means 256 queues
672 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
673 VPLAN_TX_QBASE_VFFIRSTQ_M) |
674 (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
675 VPLAN_TX_QBASE_VFNUMQ_M));
676 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
678 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
681 /* set regardless of mapping mode */
682 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
684 /* VF Rx queues allocation */
685 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
686 /* set the VF PF Rx queue range
687 * VFNUMQ value should be set to (number of queues - 1). A value
688 * of 0 means 1 queue and a value of 255 means 256 queues
690 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
691 VPLAN_RX_QBASE_VFFIRSTQ_M) |
692 (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
693 VPLAN_RX_QBASE_VFNUMQ_M));
694 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
696 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
702 * @pf: pointer to the PF structure
703 * @avail_res: available resources in the PF structure
704 * @max_res: maximum resources that can be given per VF
705 * @min_res: minimum resources that can be given per VF
707 * Returns non-zero value if resources (queues/vectors) are available or
708 * returns zero if PF cannot accommodate for all num_alloc_vfs.
711 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
713 bool checked_min_res = false;
716 /* start by checking if PF can assign max number of resources for
718 * if yes, return number per VF
719 * If no, divide by 2 and roundup, check again
720 * repeat the loop till we reach a point where even minimum resources
721 * are not available, in that case return 0
724 while ((res >= min_res) && !checked_min_res) {
727 num_all_res = pf->num_alloc_vfs * res;
728 if (num_all_res <= avail_res)
732 checked_min_res = true;
734 res = DIV_ROUND_UP(res, 2);
740 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
741 * @vf: VF to calculate the register index for
742 * @q_vector: a q_vector associated to the VF
744 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
748 if (!vf || !q_vector)
753 /* always add one to account for the OICR being the first MSIX */
754 return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
759 * ice_get_max_valid_res_idx - Get the max valid resource index
760 * @res: pointer to the resource to find the max valid index for
762 * Start from the end of the ice_res_tracker and return right when we find the
763 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
764 * valid for SR-IOV because it is the only consumer that manipulates the
765 * res->end and this is always called when res->end is set to res->num_entries.
767 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
774 for (i = res->num_entries - 1; i >= 0; i--)
775 if (res->list[i] & ICE_RES_VALID_BIT)
782 * ice_sriov_set_msix_res - Set any used MSIX resources
783 * @pf: pointer to PF structure
784 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
786 * This function allows SR-IOV resources to be taken from the end of the PF's
787 * allowed HW MSIX vectors so in many cases the irq_tracker will not
788 * be needed. In these cases we just set the pf->sriov_base_vector and return
791 * If SR-IOV needs to use any pf->irq_tracker entries it updates the
792 * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
793 * so any calls to ice_get_res() using the irq_tracker will not try to use
794 * resources at or beyond the newly set value.
796 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
797 * in the PF's space available for SR-IOV.
799 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
801 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
802 u16 pf_total_msix_vectors =
803 pf->hw.func_caps.common_cap.num_msix_vectors;
804 struct ice_res_tracker *res = pf->irq_tracker;
805 int sriov_base_vector;
807 if (max_valid_res_idx < 0)
808 return max_valid_res_idx;
810 sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
812 /* make sure we only grab irq_tracker entries from the list end and
813 * that we have enough available MSIX vectors
815 if (sriov_base_vector <= max_valid_res_idx)
818 pf->sriov_base_vector = sriov_base_vector;
820 /* dip into irq_tracker entries and update used resources */
821 if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
822 pf->num_avail_sw_msix -=
823 res->num_entries - pf->sriov_base_vector;
824 res->end = pf->sriov_base_vector;
831 * ice_check_avail_res - check if vectors and queues are available
832 * @pf: pointer to the PF structure
834 * This function is where we calculate actual number of resources for VF VSIs,
835 * we don't reserve ahead of time during probe. Returns success if vectors and
836 * queues resources are available, otherwise returns error code
838 static int ice_check_avail_res(struct ice_pf *pf)
840 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
841 u16 num_msix, num_txq, num_rxq, num_avail_msix;
842 struct device *dev = ice_pf_to_dev(pf);
844 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
847 /* add 1 to max_valid_res_idx to account for it being 0-based */
848 num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
849 (max_valid_res_idx + 1);
851 /* Grab from HW interrupts common pool
852 * Note: By the time the user decides it needs more vectors in a VF
853 * its already too late since one must decide this prior to creating the
854 * VF interface. So the best we can do is take a guess as to what the
857 * We have two policies for vector allocation:
858 * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
859 * number of NFV VFs used for NFV appliances, since this is a special
860 * case, we try to assign maximum vectors per VF (65) as much as
861 * possible, based on determine_resources algorithm.
862 * 2. if num_alloc_vfs is from 17 to 256, then its large number of
863 * regular VFs which are not used for any special purpose. Hence try to
864 * grab default interrupt vectors (5 as supported by AVF driver).
866 if (pf->num_alloc_vfs <= 16) {
867 num_msix = ice_determine_res(pf, num_avail_msix,
869 ICE_MIN_INTR_PER_VF);
870 } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
871 num_msix = ice_determine_res(pf, num_avail_msix,
872 ICE_DFLT_INTR_PER_VF,
873 ICE_MIN_INTR_PER_VF);
875 dev_err(dev, "Number of VFs %d exceeds max VF count %d\n",
876 pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
883 /* Grab from the common pool
884 * start by requesting Default queues (4 as supported by AVF driver),
885 * Note that, the main difference between queues and vectors is, latter
886 * can only be reserved at init time but queues can be requested by VF
887 * at runtime through Virtchnl, that is the reason we start by reserving
890 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
891 ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
893 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
894 ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
896 if (!num_txq || !num_rxq)
899 if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
902 /* since AVF driver works with only queue pairs which means, it expects
903 * to have equal number of Rx and Tx queues, so take the minimum of
904 * available Tx or Rx queues
906 pf->num_vf_qps = min_t(int, num_txq, num_rxq);
907 pf->num_vf_msix = num_msix;
913 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
914 * @vf: pointer to the VF structure
916 * Cleanup a VF after the hardware reset is finished. Expects the caller to
917 * have verified whether the reset is finished properly, and ensure the
918 * minimum amount of wait time has passed. Reallocate VF resources back to make
921 static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
923 struct ice_pf *pf = vf->pf;
929 /* PF software completes the flow by notifying VF that reset flow is
930 * completed. This is done by enabling hardware by clearing the reset
931 * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
932 * register to VFR completed (done at the end of this function)
933 * By doing this we allow HW to access VF memory at any point. If we
934 * did it any sooner, HW could access memory while it was being freed
935 * in ice_free_vf_res(), causing an IOMMU fault.
937 * On the other hand, this needs to be done ASAP, because the VF driver
938 * is waiting for this to happen and may report a timeout. It's
939 * harmless, but it gets logged into Guest OS kernel log, so best avoid
942 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
943 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
944 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
946 /* reallocate VF resources to finish resetting the VSI state */
947 if (!ice_alloc_vf_res(vf)) {
950 ice_ena_vf_mappings(vf);
951 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
952 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
954 vsi = pf->vsi[vf->lan_vsi_idx];
955 if (ice_vsi_add_vlan(vsi, 0))
956 dev_warn(ice_pf_to_dev(pf),
957 "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest",
961 /* Tell the VF driver the reset is done. This needs to be done only
962 * after VF has been fully initialized, because the VF driver may
963 * request resources immediately after setting this flag.
965 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
969 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
970 * @vf: pointer to the VF info
971 * @vsi: the VSI being configured
972 * @promisc_m: mask of promiscuous config bits
973 * @rm_promisc: promisc flag request from the VF to remove or add filter
975 * This function configures VF VSI promiscuous mode, based on the VF requests,
976 * for Unicast, Multicast and VLAN
978 static enum ice_status
979 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
982 struct ice_pf *pf = vf->pf;
983 enum ice_status status = 0;
988 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
990 } else if (vf->port_vlan_id) {
992 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
995 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
999 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1002 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1010 * ice_config_res_vfs - Finalize allocation of VFs resources in one go
1011 * @pf: pointer to the PF structure
1013 * This function is being called as last part of resetting all VFs, or when
1014 * configuring VFs for the first time, where there is no resource to be freed
1015 * Returns true if resources were properly allocated for all VFs, and false
1018 static bool ice_config_res_vfs(struct ice_pf *pf)
1020 struct device *dev = ice_pf_to_dev(pf);
1021 struct ice_hw *hw = &pf->hw;
1024 if (ice_check_avail_res(pf)) {
1025 dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
1029 /* rearm global interrupts */
1030 if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
1031 ice_irq_dynamic_ena(hw, NULL, NULL);
1033 /* Finish resetting each VF and allocate resources */
1034 ice_for_each_vf(pf, v) {
1035 struct ice_vf *vf = &pf->vf[v];
1037 vf->num_vf_qs = pf->num_vf_qps;
1038 dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
1040 ice_cleanup_and_realloc_vf(vf);
1044 clear_bit(__ICE_VF_DIS, pf->state);
1050 * ice_reset_all_vfs - reset all allocated VFs in one go
1051 * @pf: pointer to the PF structure
1052 * @is_vflr: true if VFLR was issued, false if not
1054 * First, tell the hardware to reset each VF, then do all the waiting in one
1055 * chunk, and finally finish restoring each VF after the wait. This is useful
1056 * during PF routines which need to reset all VFs, as otherwise it must perform
1057 * these resets in a serialized fashion.
1059 * Returns true if any VFs were reset, and false otherwise.
1061 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1063 struct device *dev = ice_pf_to_dev(pf);
1064 struct ice_hw *hw = &pf->hw;
1068 /* If we don't have any VFs, then there is nothing to reset */
1069 if (!pf->num_alloc_vfs)
1072 /* If VFs have been disabled, there is no need to reset */
1073 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1076 /* Begin reset on all VFs at once */
1077 ice_for_each_vf(pf, v)
1078 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1080 ice_for_each_vf(pf, v) {
1081 struct ice_vsi *vsi;
1084 vsi = pf->vsi[vf->lan_vsi_idx];
1085 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1087 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1088 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1091 /* HW requires some time to make sure it can flush the FIFO for a VF
1092 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1093 * sequence to make sure that it has completed. We'll keep track of
1094 * the VFs using a simple iterator that increments once that VF has
1095 * finished resetting.
1097 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1099 /* Check each VF in sequence */
1100 while (v < pf->num_alloc_vfs) {
1104 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1105 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1106 /* only delay if the check failed */
1107 usleep_range(10, 20);
1111 /* If the current VF has finished resetting, move on
1112 * to the next VF in sequence.
1118 /* Display a warning if at least one VF didn't manage to reset in
1119 * time, but continue on with the operation.
1121 if (v < pf->num_alloc_vfs)
1122 dev_warn(dev, "VF reset check timeout\n");
1124 /* free VF resources to begin resetting the VSI state */
1125 ice_for_each_vf(pf, v) {
1128 ice_free_vf_res(vf);
1130 /* Free VF queues as well, and reallocate later.
1131 * If a given VF has different number of queues
1132 * configured, the request for update will come
1133 * via mailbox communication.
1138 if (ice_sriov_free_msix_res(pf))
1139 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
1141 if (!ice_config_res_vfs(pf))
1148 * ice_is_vf_disabled
1149 * @vf: pointer to the VF info
1151 * Returns true if the PF or VF is disabled, false otherwise.
1153 static bool ice_is_vf_disabled(struct ice_vf *vf)
1155 struct ice_pf *pf = vf->pf;
1157 /* If the PF has been disabled, there is no need resetting VF until
1158 * PF is active again. Similarly, if the VF has been disabled, this
1159 * means something else is resetting the VF, so we shouldn't continue.
1160 * Otherwise, set disable VF state bit for actual reset, and continue.
1162 return (test_bit(__ICE_VF_DIS, pf->state) ||
1163 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1167 * ice_reset_vf - Reset a particular VF
1168 * @vf: pointer to the VF structure
1169 * @is_vflr: true if VFLR was issued, false if not
1171 * Returns true if the VF is reset, false otherwise.
1173 static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1175 struct ice_pf *pf = vf->pf;
1176 struct ice_vsi *vsi;
1184 dev = ice_pf_to_dev(pf);
1186 if (ice_is_vf_disabled(vf)) {
1187 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1192 /* Set VF disable bit state here, before triggering reset */
1193 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1194 ice_trigger_vf_reset(vf, is_vflr, false);
1196 vsi = pf->vsi[vf->lan_vsi_idx];
1198 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1201 /* Call Disable LAN Tx queue AQ whether or not queues are
1202 * enabled. This is needed for successful completion of VFR.
1204 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1205 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1208 /* poll VPGEN_VFRSTAT reg to make sure
1209 * that reset is complete
1211 for (i = 0; i < 10; i++) {
1212 /* VF reset requires driver to first reset the VF and then
1213 * poll the status register to make sure that the reset
1214 * completed successfully.
1216 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1217 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1222 /* only sleep if the reset is not done */
1223 usleep_range(10, 20);
1226 /* Display a warning if VF didn't manage to reset in time, but need to
1227 * continue on with the operation.
1230 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1232 /* disable promiscuous modes in case they were enabled
1233 * ignore any error if disabling process failed
1235 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1236 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1237 if (vf->port_vlan_id || vsi->num_vlan)
1238 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1240 promisc_m = ICE_UCAST_PROMISC_BITS;
1242 vsi = pf->vsi[vf->lan_vsi_idx];
1243 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1244 dev_err(dev, "disabling promiscuous mode failed\n");
1247 /* free VF resources to begin resetting the VSI state */
1248 ice_free_vf_res(vf);
1250 ice_cleanup_and_realloc_vf(vf);
1258 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1259 * @pf: pointer to the PF structure
1261 void ice_vc_notify_link_state(struct ice_pf *pf)
1265 ice_for_each_vf(pf, i)
1266 ice_vc_notify_vf_link_state(&pf->vf[i]);
1270 * ice_vc_notify_reset - Send pending reset message to all VFs
1271 * @pf: pointer to the PF structure
1273 * indicate a pending reset to all VFs on a given PF
1275 void ice_vc_notify_reset(struct ice_pf *pf)
1277 struct virtchnl_pf_event pfe;
1279 if (!pf->num_alloc_vfs)
1282 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1283 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1284 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1285 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1289 * ice_vc_notify_vf_reset - Notify VF of a reset event
1290 * @vf: pointer to the VF structure
1292 static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1294 struct virtchnl_pf_event pfe;
1301 if (ice_validate_vf_id(pf, vf->vf_id))
1304 /* Bail out if VF is in disabled state, neither initialized, nor active
1305 * state - otherwise proceed with notifications
1307 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1308 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1309 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1312 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1313 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1314 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1315 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1320 * ice_alloc_vfs - Allocate and set up VFs resources
1321 * @pf: pointer to the PF structure
1322 * @num_alloc_vfs: number of VFs to allocate
1324 static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
1326 struct device *dev = ice_pf_to_dev(pf);
1327 struct ice_hw *hw = &pf->hw;
1331 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1332 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1333 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1334 set_bit(__ICE_OICR_INTR_DIS, pf->state);
1337 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1339 pf->num_alloc_vfs = 0;
1340 goto err_unroll_intr;
1342 /* allocate memory */
1343 vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL);
1346 goto err_pci_disable_sriov;
1349 pf->num_alloc_vfs = num_alloc_vfs;
1351 /* apply default profile */
1352 ice_for_each_vf(pf, i) {
1354 vfs[i].vf_sw_id = pf->first_sw;
1357 /* assign default capabilities */
1358 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1359 vfs[i].spoofchk = true;
1362 /* VF resources get allocated with initialization */
1363 if (!ice_config_res_vfs(pf)) {
1365 goto err_unroll_sriov;
1372 devm_kfree(dev, vfs);
1374 pf->num_alloc_vfs = 0;
1375 err_pci_disable_sriov:
1376 pci_disable_sriov(pf->pdev);
1378 /* rearm interrupts here */
1379 ice_irq_dynamic_ena(hw, NULL, NULL);
1380 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1385 * ice_pf_state_is_nominal - checks the PF for nominal state
1386 * @pf: pointer to PF to check
1388 * Check the PF's state for a collection of bits that would indicate
1389 * the PF is in a state that would inhibit normal operation for
1390 * driver functionality.
1392 * Returns true if PF is in a nominal state.
1393 * Returns false otherwise
1395 static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1397 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1402 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1403 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1410 * ice_pci_sriov_ena - Enable or change number of VFs
1411 * @pf: pointer to the PF structure
1412 * @num_vfs: number of VFs to allocate
1414 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1416 int pre_existing_vfs = pci_num_vf(pf->pdev);
1417 struct device *dev = ice_pf_to_dev(pf);
1420 if (!ice_pf_state_is_nominal(pf)) {
1421 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1425 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1426 dev_err(dev, "This device is not capable of SR-IOV\n");
1430 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1432 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1435 if (num_vfs > pf->num_vfs_supported) {
1436 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1437 num_vfs, pf->num_vfs_supported);
1441 dev_info(dev, "Allocating %d VFs\n", num_vfs);
1442 err = ice_alloc_vfs(pf, num_vfs);
1444 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1448 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1453 * ice_sriov_configure - Enable or change number of VFs via sysfs
1454 * @pdev: pointer to a pci_dev structure
1455 * @num_vfs: number of VFs to allocate
1457 * This function is called when the user updates the number of VFs in sysfs.
1459 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1461 struct ice_pf *pf = pci_get_drvdata(pdev);
1462 struct device *dev = ice_pf_to_dev(pf);
1464 if (ice_is_safe_mode(pf)) {
1465 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1470 return ice_pci_sriov_ena(pf, num_vfs);
1472 if (!pci_vfs_assigned(pdev)) {
1475 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1483 * ice_process_vflr_event - Free VF resources via IRQ calls
1484 * @pf: pointer to the PF structure
1486 * called from the VFLR IRQ handler to
1487 * free up VF resources and state variables
1489 void ice_process_vflr_event(struct ice_pf *pf)
1491 struct ice_hw *hw = &pf->hw;
1495 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1499 ice_for_each_vf(pf, vf_id) {
1500 struct ice_vf *vf = &pf->vf[vf_id];
1501 u32 reg_idx, bit_idx;
1503 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1504 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1505 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1506 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1507 if (reg & BIT(bit_idx))
1508 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1509 ice_reset_vf(vf, true);
1514 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
1515 * @vf: pointer to the VF info
1517 static void ice_vc_reset_vf(struct ice_vf *vf)
1519 ice_vc_notify_vf_reset(vf);
1520 ice_reset_vf(vf, false);
1524 * ice_vc_send_msg_to_vf - Send message to VF
1525 * @vf: pointer to the VF info
1526 * @v_opcode: virtual channel opcode
1527 * @v_retval: virtual channel return value
1528 * @msg: pointer to the msg buffer
1529 * @msglen: msg length
1534 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1535 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1537 enum ice_status aq_ret;
1545 if (ice_validate_vf_id(pf, vf->vf_id))
1548 dev = ice_pf_to_dev(pf);
1550 /* single place to detect unsuccessful return values */
1552 vf->num_inval_msgs++;
1553 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1554 v_opcode, v_retval);
1555 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1557 "Number of invalid messages exceeded for VF %d\n",
1559 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1560 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1564 vf->num_valid_msgs++;
1565 /* reset the invalid counter, if a valid message is received. */
1566 vf->num_inval_msgs = 0;
1569 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1571 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1573 "Unable to send the message to VF %d ret %d aq_err %d\n",
1574 vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
1582 * ice_vc_get_ver_msg
1583 * @vf: pointer to the VF info
1584 * @msg: pointer to the msg buffer
1586 * called from the VF to request the API version used by the PF
1588 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1590 struct virtchnl_version_info info = {
1591 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1594 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1595 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1596 if (VF_IS_V10(&vf->vf_ver))
1597 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1599 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1600 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1601 sizeof(struct virtchnl_version_info));
1605 * ice_vc_get_vf_res_msg
1606 * @vf: pointer to the VF info
1607 * @msg: pointer to the msg buffer
1609 * called from the VF to request its resources
1611 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1613 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1614 struct virtchnl_vf_resource *vfres = NULL;
1615 struct ice_pf *pf = vf->pf;
1616 struct ice_vsi *vsi;
1620 if (ice_check_vf_init(pf, vf)) {
1621 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1625 len = sizeof(struct virtchnl_vf_resource);
1627 vfres = kzalloc(len, GFP_KERNEL);
1629 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1633 if (VF_IS_V11(&vf->vf_ver))
1634 vf->driver_caps = *(u32 *)msg;
1636 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1637 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1638 VIRTCHNL_VF_OFFLOAD_VLAN;
1640 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1641 vsi = pf->vsi[vf->lan_vsi_idx];
1643 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1647 if (!vsi->info.pvid)
1648 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1650 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1651 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1653 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1654 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1656 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1659 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1660 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1662 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1663 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1665 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1666 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1668 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1669 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1671 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1672 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1674 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1675 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1677 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1678 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1680 vfres->num_vsis = 1;
1681 /* Tx and Rx queue are equal for VF */
1682 vfres->num_queue_pairs = vsi->num_txq;
1683 vfres->max_vectors = pf->num_vf_msix;
1684 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1685 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1687 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1688 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1689 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1690 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1691 vf->dflt_lan_addr.addr);
1693 /* match guest capabilities */
1694 vf->driver_caps = vfres->vf_cap_flags;
1696 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1699 /* send the response back to the VF */
1700 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1708 * ice_vc_reset_vf_msg
1709 * @vf: pointer to the VF info
1711 * called from the VF to reset itself,
1712 * unlike other virtchnl messages, PF driver
1713 * doesn't send the response back to the VF
1715 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1717 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1718 ice_reset_vf(vf, false);
1722 * ice_find_vsi_from_id
1723 * @pf: the PF structure to search for the VSI
1724 * @id: ID of the VSI it is searching for
1726 * searches for the VSI with the given ID
1728 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1732 ice_for_each_vsi(pf, i)
1733 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1740 * ice_vc_isvalid_vsi_id
1741 * @vf: pointer to the VF info
1742 * @vsi_id: VF relative VSI ID
1744 * check for the valid VSI ID
1746 static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1748 struct ice_pf *pf = vf->pf;
1749 struct ice_vsi *vsi;
1751 vsi = ice_find_vsi_from_id(pf, vsi_id);
1753 return (vsi && (vsi->vf_id == vf->vf_id));
1757 * ice_vc_isvalid_q_id
1758 * @vf: pointer to the VF info
1760 * @qid: VSI relative queue ID
1762 * check for the valid queue ID
1764 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
1766 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1767 /* allocated Tx and Rx queues should be always equal for VF VSI */
1768 return (vsi && (qid < vsi->alloc_txq));
1772 * ice_vc_isvalid_ring_len
1773 * @ring_len: length of ring
1775 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
1778 static bool ice_vc_isvalid_ring_len(u16 ring_len)
1780 return ring_len == 0 ||
1781 (ring_len >= ICE_MIN_NUM_DESC &&
1782 ring_len <= ICE_MAX_NUM_DESC &&
1783 !(ring_len % ICE_REQ_DESC_MULTIPLE));
1787 * ice_vc_config_rss_key
1788 * @vf: pointer to the VF info
1789 * @msg: pointer to the msg buffer
1791 * Configure the VF's RSS key
1793 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1795 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1796 struct virtchnl_rss_key *vrk =
1797 (struct virtchnl_rss_key *)msg;
1798 struct ice_pf *pf = vf->pf;
1799 struct ice_vsi *vsi;
1801 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1802 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1806 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
1807 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1811 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
1812 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1816 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1817 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1821 vsi = pf->vsi[vf->lan_vsi_idx];
1823 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1827 if (ice_set_rss(vsi, vrk->key, NULL, 0))
1828 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1830 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1835 * ice_vc_config_rss_lut
1836 * @vf: pointer to the VF info
1837 * @msg: pointer to the msg buffer
1839 * Configure the VF's RSS LUT
1841 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1843 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
1844 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1845 struct ice_pf *pf = vf->pf;
1846 struct ice_vsi *vsi;
1848 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1849 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1853 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
1854 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1858 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
1859 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1863 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1864 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1868 vsi = pf->vsi[vf->lan_vsi_idx];
1870 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1874 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
1875 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1877 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1882 * ice_set_vf_spoofchk
1883 * @netdev: network interface device structure
1884 * @vf_id: VF identifier
1885 * @ena: flag to enable or disable feature
1887 * Enable or disable VF spoof checking
1889 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1891 struct ice_netdev_priv *np = netdev_priv(netdev);
1892 struct ice_pf *pf = np->vsi->back;
1893 struct ice_vsi_ctx *ctx;
1894 struct ice_vsi *vf_vsi;
1895 enum ice_status status;
1900 dev = ice_pf_to_dev(pf);
1901 if (ice_validate_vf_id(pf, vf_id))
1904 vf = &pf->vf[vf_id];
1906 if (ice_check_vf_init(pf, vf))
1909 vf_vsi = pf->vsi[vf->lan_vsi_idx];
1911 netdev_err(netdev, "VSI %d for VF %d is null\n",
1912 vf->lan_vsi_idx, vf->vf_id);
1916 if (vf_vsi->type != ICE_VSI_VF) {
1918 "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
1919 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1923 if (ena == vf->spoofchk) {
1924 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1928 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1932 ctx->info.sec_flags = vf_vsi->info.sec_flags;
1933 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1935 ctx->info.sec_flags |=
1936 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
1937 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
1938 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
1940 ctx->info.sec_flags &=
1941 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
1942 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
1943 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
1946 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
1949 "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
1950 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
1955 /* only update spoofchk state and VSI context on success */
1956 vf_vsi->info.sec_flags = ctx->info.sec_flags;
1965 * ice_vc_get_stats_msg
1966 * @vf: pointer to the VF info
1967 * @msg: pointer to the msg buffer
1969 * called from the VF to get VSI stats
1971 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1973 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1974 struct virtchnl_queue_select *vqs =
1975 (struct virtchnl_queue_select *)msg;
1976 struct ice_eth_stats stats = { 0 };
1977 struct ice_pf *pf = vf->pf;
1978 struct ice_vsi *vsi;
1980 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1981 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1985 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1986 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1990 vsi = pf->vsi[vf->lan_vsi_idx];
1992 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1996 ice_update_eth_stats(vsi);
1998 stats = vsi->eth_stats;
2001 /* send the response to the VF */
2002 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
2003 (u8 *)&stats, sizeof(stats));
2008 * @vf: pointer to the VF info
2009 * @msg: pointer to the msg buffer
2011 * called from the VF to enable all or specific queue(s)
2013 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2015 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2016 struct virtchnl_queue_select *vqs =
2017 (struct virtchnl_queue_select *)msg;
2018 struct ice_pf *pf = vf->pf;
2019 struct ice_vsi *vsi;
2020 unsigned long q_map;
2023 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2024 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2028 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2029 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2033 if (!vqs->rx_queues && !vqs->tx_queues) {
2034 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2038 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
2039 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
2040 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2044 vsi = pf->vsi[vf->lan_vsi_idx];
2046 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2050 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2051 * Tx queue group list was configured and the context bits were
2052 * programmed using ice_vsi_cfg_txqs
2054 q_map = vqs->rx_queues;
2055 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2056 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2057 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2061 /* Skip queue if enabled */
2062 if (test_bit(vf_q_id, vf->rxq_ena))
2065 if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
2066 dev_err(&vsi->back->pdev->dev,
2067 "Failed to enable Rx ring %d on VSI %d\n",
2068 vf_q_id, vsi->vsi_num);
2069 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2073 set_bit(vf_q_id, vf->rxq_ena);
2077 vsi = pf->vsi[vf->lan_vsi_idx];
2078 q_map = vqs->tx_queues;
2079 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2080 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2081 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2085 /* Skip queue if enabled */
2086 if (test_bit(vf_q_id, vf->txq_ena))
2089 set_bit(vf_q_id, vf->txq_ena);
2093 /* Set flag to indicate that queues are enabled */
2094 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
2095 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2098 /* send the response to the VF */
2099 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2105 * @vf: pointer to the VF info
2106 * @msg: pointer to the msg buffer
2108 * called from the VF to disable all or specific
2111 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2113 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2114 struct virtchnl_queue_select *vqs =
2115 (struct virtchnl_queue_select *)msg;
2116 struct ice_pf *pf = vf->pf;
2117 struct ice_vsi *vsi;
2118 unsigned long q_map;
2121 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2122 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2123 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2127 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2128 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2132 if (!vqs->rx_queues && !vqs->tx_queues) {
2133 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2137 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
2138 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
2139 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2143 vsi = pf->vsi[vf->lan_vsi_idx];
2145 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2149 if (vqs->tx_queues) {
2150 q_map = vqs->tx_queues;
2152 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2153 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2154 struct ice_txq_meta txq_meta = { 0 };
2156 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2157 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2161 /* Skip queue if not enabled */
2162 if (!test_bit(vf_q_id, vf->txq_ena))
2165 ice_fill_txq_meta(vsi, ring, &txq_meta);
2167 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2169 dev_err(&vsi->back->pdev->dev,
2170 "Failed to stop Tx ring %d on VSI %d\n",
2171 vf_q_id, vsi->vsi_num);
2172 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2176 /* Clear enabled queues flag */
2177 clear_bit(vf_q_id, vf->txq_ena);
2182 if (vqs->rx_queues) {
2183 q_map = vqs->rx_queues;
2185 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2186 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2187 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2191 /* Skip queue if not enabled */
2192 if (!test_bit(vf_q_id, vf->rxq_ena))
2195 if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
2196 dev_err(&vsi->back->pdev->dev,
2197 "Failed to stop Rx ring %d on VSI %d\n",
2198 vf_q_id, vsi->vsi_num);
2199 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2203 /* Clear enabled queues flag */
2204 clear_bit(vf_q_id, vf->rxq_ena);
2209 /* Clear enabled queues flag */
2210 if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
2211 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2214 /* send the response to the VF */
2215 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2220 * ice_vc_cfg_irq_map_msg
2221 * @vf: pointer to the VF info
2222 * @msg: pointer to the msg buffer
2224 * called from the VF to configure the IRQ to queue map
2226 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2228 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2229 struct virtchnl_irq_map_info *irqmap_info;
2230 u16 vsi_id, vsi_q_id, vector_id;
2231 struct virtchnl_vector_map *map;
2232 struct ice_pf *pf = vf->pf;
2233 u16 num_q_vectors_mapped;
2234 struct ice_vsi *vsi;
2238 irqmap_info = (struct virtchnl_irq_map_info *)msg;
2239 num_q_vectors_mapped = irqmap_info->num_vectors;
2241 /* Check to make sure number of VF vectors mapped is not greater than
2242 * number of VF vectors originally allocated, and check that
2243 * there is actually at least a single VF queue vector mapped
2245 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2246 pf->num_vf_msix < num_q_vectors_mapped ||
2247 !irqmap_info->num_vectors) {
2248 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2252 vsi = pf->vsi[vf->lan_vsi_idx];
2254 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2258 for (i = 0; i < num_q_vectors_mapped; i++) {
2259 struct ice_q_vector *q_vector;
2261 map = &irqmap_info->vecmap[i];
2263 vector_id = map->vector_id;
2264 vsi_id = map->vsi_id;
2265 /* vector_id is always 0-based for each VF, and can never be
2266 * larger than or equal to the max allowed interrupts per VF
2268 if (!(vector_id < ICE_MAX_INTR_PER_VF) ||
2269 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2270 (!vector_id && (map->rxq_map || map->txq_map))) {
2271 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2275 /* No need to map VF miscellaneous or rogue vector */
2279 /* Subtract non queue vector from vector_id passed by VF
2280 * to get actual number of VSI queue vector array index
2282 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2284 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2288 /* lookout for the invalid queue index */
2289 qmap = map->rxq_map;
2290 q_vector->num_ring_rx = 0;
2291 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2292 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2293 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2296 q_vector->num_ring_rx++;
2297 q_vector->rx.itr_idx = map->rxitr_idx;
2298 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2299 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2300 q_vector->rx.itr_idx);
2303 qmap = map->txq_map;
2304 q_vector->num_ring_tx = 0;
2305 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2306 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2307 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2310 q_vector->num_ring_tx++;
2311 q_vector->tx.itr_idx = map->txitr_idx;
2312 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2313 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2314 q_vector->tx.itr_idx);
2319 /* send the response to the VF */
2320 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2326 * @vf: pointer to the VF info
2327 * @msg: pointer to the msg buffer
2329 * called from the VF to configure the Rx/Tx queues
2331 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2333 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2334 struct virtchnl_vsi_queue_config_info *qci =
2335 (struct virtchnl_vsi_queue_config_info *)msg;
2336 struct virtchnl_queue_pair_info *qpi;
2337 u16 num_rxq = 0, num_txq = 0;
2338 struct ice_pf *pf = vf->pf;
2339 struct ice_vsi *vsi;
2342 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2343 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2347 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2348 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2352 vsi = pf->vsi[vf->lan_vsi_idx];
2354 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2358 if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
2359 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2360 dev_err(ice_pf_to_dev(pf),
2361 "VF-%d requesting more than supported number of queues: %d\n",
2362 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2363 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2367 for (i = 0; i < qci->num_queue_pairs; i++) {
2368 qpi = &qci->qpair[i];
2369 if (qpi->txq.vsi_id != qci->vsi_id ||
2370 qpi->rxq.vsi_id != qci->vsi_id ||
2371 qpi->rxq.queue_id != qpi->txq.queue_id ||
2372 qpi->txq.headwb_enabled ||
2373 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2374 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2375 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2376 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2379 /* copy Tx queue info from VF into VSI */
2380 if (qpi->txq.ring_len > 0) {
2382 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2383 vsi->tx_rings[i]->count = qpi->txq.ring_len;
2386 /* copy Rx queue info from VF into VSI */
2387 if (qpi->rxq.ring_len > 0) {
2389 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2390 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2392 if (qpi->rxq.databuffer_size != 0 &&
2393 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2394 qpi->rxq.databuffer_size < 1024)) {
2395 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2398 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2399 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2400 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2401 qpi->rxq.max_pkt_size < 64) {
2402 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2407 vsi->max_frame = qpi->rxq.max_pkt_size;
2410 /* VF can request to configure less than allocated queues
2411 * or default allocated queues. So update the VSI with new number
2413 vsi->num_txq = num_txq;
2414 vsi->num_rxq = num_rxq;
2415 /* All queues of VF VSI are in TC 0 */
2416 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2417 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
2419 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2420 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2423 /* send the response to the VF */
2424 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2430 * @vf: pointer to the VF info
2432 static bool ice_is_vf_trusted(struct ice_vf *vf)
2434 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2438 * ice_can_vf_change_mac
2439 * @vf: pointer to the VF info
2441 * Return true if the VF is allowed to change its MAC filters, false otherwise
2443 static bool ice_can_vf_change_mac(struct ice_vf *vf)
2445 /* If the VF MAC address has been set administratively (via the
2446 * ndo_set_vf_mac command), then deny permission to the VF to
2447 * add/delete unicast MAC addresses, unless the VF is trusted
2449 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
2456 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
2457 * @vf: pointer to the VF info
2458 * @vsi: pointer to the VF's VSI
2459 * @mac_addr: MAC address to add
2462 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
2464 struct device *dev = ice_pf_to_dev(vf->pf);
2465 enum ice_status status;
2467 /* default unicast MAC already added */
2468 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2471 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
2472 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2476 status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
2477 if (status == ICE_ERR_ALREADY_EXISTS) {
2478 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
2481 } else if (status) {
2482 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
2483 mac_addr, vf->vf_id, status);
2487 /* only set dflt_lan_addr once */
2488 if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
2489 is_unicast_ether_addr(mac_addr))
2490 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
2498 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
2499 * @vf: pointer to the VF info
2500 * @vsi: pointer to the VF's VSI
2501 * @mac_addr: MAC address to delete
2504 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
2506 struct device *dev = ice_pf_to_dev(vf->pf);
2507 enum ice_status status;
2509 if (!ice_can_vf_change_mac(vf) &&
2510 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2513 status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
2514 if (status == ICE_ERR_DOES_NOT_EXIST) {
2515 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
2518 } else if (status) {
2519 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
2520 mac_addr, vf->vf_id, status);
2524 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2525 eth_zero_addr(vf->dflt_lan_addr.addr);
2533 * ice_vc_handle_mac_addr_msg
2534 * @vf: pointer to the VF info
2535 * @msg: pointer to the msg buffer
2536 * @set: true if MAC filters are being set, false otherwise
2538 * add guest MAC address filter
2541 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
2543 int (*ice_vc_cfg_mac)
2544 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
2545 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2546 struct virtchnl_ether_addr_list *al =
2547 (struct virtchnl_ether_addr_list *)msg;
2548 struct ice_pf *pf = vf->pf;
2549 enum virtchnl_ops vc_op;
2550 struct ice_vsi *vsi;
2554 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
2555 ice_vc_cfg_mac = ice_vc_add_mac_addr;
2557 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
2558 ice_vc_cfg_mac = ice_vc_del_mac_addr;
2561 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2562 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2563 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2564 goto handle_mac_exit;
2567 /* If this VF is not privileged, then we can't add more than a
2568 * limited number of addresses. Check to make sure that the
2569 * additions do not push us over the limit.
2571 if (set && !ice_is_vf_trusted(vf) &&
2572 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
2573 dev_err(ice_pf_to_dev(pf),
2574 "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
2576 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2577 goto handle_mac_exit;
2580 vsi = pf->vsi[vf->lan_vsi_idx];
2582 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2583 goto handle_mac_exit;
2586 for (i = 0; i < al->num_elements; i++) {
2587 u8 *mac_addr = al->list[i].addr;
2590 if (is_broadcast_ether_addr(mac_addr) ||
2591 is_zero_ether_addr(mac_addr))
2594 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
2595 if (result == -EEXIST || result == -ENOENT) {
2597 } else if (result) {
2598 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2599 goto handle_mac_exit;
2604 /* send the response to the VF */
2605 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2609 * ice_vc_add_mac_addr_msg
2610 * @vf: pointer to the VF info
2611 * @msg: pointer to the msg buffer
2613 * add guest MAC address filter
2615 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2617 return ice_vc_handle_mac_addr_msg(vf, msg, true);
2621 * ice_vc_del_mac_addr_msg
2622 * @vf: pointer to the VF info
2623 * @msg: pointer to the msg buffer
2625 * remove guest MAC address filter
2627 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2629 return ice_vc_handle_mac_addr_msg(vf, msg, false);
2633 * ice_vc_request_qs_msg
2634 * @vf: pointer to the VF info
2635 * @msg: pointer to the msg buffer
2637 * VFs get a default number of queues but can use this message to request a
2638 * different number. If the request is successful, PF will reset the VF and
2639 * return 0. If unsuccessful, PF will send message informing VF of number of
2640 * available queue pairs via virtchnl message response to VF.
2642 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2644 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2645 struct virtchnl_vf_res_request *vfres =
2646 (struct virtchnl_vf_res_request *)msg;
2647 u16 req_queues = vfres->num_queue_pairs;
2648 struct ice_pf *pf = vf->pf;
2649 u16 max_allowed_vf_queues;
2650 u16 tx_rx_queue_left;
2654 dev = ice_pf_to_dev(pf);
2655 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2656 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2660 cur_queues = vf->num_vf_qs;
2661 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2662 ice_get_avail_rxq_count(pf));
2663 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2665 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
2667 } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
2668 dev_err(dev, "VF %d tried to request more than %d queues.\n",
2669 vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
2670 vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
2671 } else if (req_queues > cur_queues &&
2672 req_queues - cur_queues > tx_rx_queue_left) {
2674 "VF %d requested %u more queues, but only %u left.\n",
2675 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2676 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
2677 ICE_MAX_BASE_QS_PER_VF);
2679 /* request is successful, then reset VF */
2680 vf->num_req_qs = req_queues;
2681 ice_vc_reset_vf(vf);
2682 dev_info(dev, "VF %d granted request of %u queues.\n",
2683 vf->vf_id, req_queues);
2688 /* send the response to the VF */
2689 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2690 v_ret, (u8 *)vfres, sizeof(*vfres));
2694 * ice_set_vf_port_vlan
2695 * @netdev: network interface device structure
2696 * @vf_id: VF identifier
2697 * @vlan_id: VLAN ID being set
2698 * @qos: priority setting
2699 * @vlan_proto: VLAN protocol
2701 * program VF Port VLAN ID and/or QoS
2704 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
2707 u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
2708 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2709 struct ice_vsi *vsi;
2714 dev = ice_pf_to_dev(pf);
2715 if (ice_validate_vf_id(pf, vf_id))
2718 if (vlan_id > ICE_MAX_VLANID || qos > 7) {
2719 dev_err(dev, "Invalid VF Parameters\n");
2723 if (vlan_proto != htons(ETH_P_8021Q)) {
2724 dev_err(dev, "VF VLAN protocol is not supported\n");
2725 return -EPROTONOSUPPORT;
2728 vf = &pf->vf[vf_id];
2729 vsi = pf->vsi[vf->lan_vsi_idx];
2730 if (ice_check_vf_init(pf, vf))
2733 if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
2734 /* duplicate request, so just return success */
2735 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
2739 /* If PVID, then remove all filters on the old VLAN */
2741 ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2744 if (vlan_id || qos) {
2745 ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
2747 goto error_set_pvid;
2749 ice_vsi_manage_pvid(vsi, 0, false);
2754 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
2755 vlan_id, qos, vf_id);
2757 /* add new VLAN filter for each MAC */
2758 ret = ice_vsi_add_vlan(vsi, vlan_id);
2760 goto error_set_pvid;
2763 /* The Port VLAN needs to be saved across resets the same as the
2764 * default LAN MAC address.
2766 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2773 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
2774 * @caps: VF driver negotiated capabilities
2776 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
2778 static bool ice_vf_vlan_offload_ena(u32 caps)
2780 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
2784 * ice_vc_process_vlan_msg
2785 * @vf: pointer to the VF info
2786 * @msg: pointer to the msg buffer
2787 * @add_v: Add VLAN if true, otherwise delete VLAN
2789 * Process virtchnl op to add or remove programmed guest VLAN ID
2791 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2793 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2794 struct virtchnl_vlan_filter_list *vfl =
2795 (struct virtchnl_vlan_filter_list *)msg;
2796 struct ice_pf *pf = vf->pf;
2797 bool vlan_promisc = false;
2798 struct ice_vsi *vsi;
2805 dev = ice_pf_to_dev(pf);
2806 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2807 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2811 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2812 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2816 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2817 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2821 for (i = 0; i < vfl->num_elements; i++) {
2822 if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
2823 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2825 "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
2831 vsi = pf->vsi[vf->lan_vsi_idx];
2833 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2837 if (add_v && !ice_is_vf_trusted(vf) &&
2838 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2840 "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2842 /* There is no need to let VF know about being not trusted,
2843 * so we can just return success message here
2848 if (vsi->info.pvid) {
2849 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2853 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2854 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2855 vlan_promisc = true;
2858 for (i = 0; i < vfl->num_elements; i++) {
2859 u16 vid = vfl->vlan_id[i];
2861 if (!ice_is_vf_trusted(vf) &&
2862 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2864 "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2866 /* There is no need to let VF know about being
2867 * not trusted, so we can just return success
2868 * message here as well.
2873 /* we add VLAN 0 by default for each VF so we can enable
2874 * Tx VLAN anti-spoof without triggering MDD events so
2875 * we don't need to add it again here
2880 status = ice_vsi_add_vlan(vsi, vid);
2882 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2887 /* Enable VLAN pruning when VLAN is added */
2888 if (!vlan_promisc) {
2889 status = ice_cfg_vlan_pruning(vsi, true, false);
2891 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2893 "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2898 /* Enable Ucast/Mcast VLAN promiscuous mode */
2899 promisc_m = ICE_PROMISC_VLAN_TX |
2900 ICE_PROMISC_VLAN_RX;
2902 status = ice_set_vsi_promisc(hw, vsi->idx,
2905 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2907 "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2913 /* In case of non_trusted VF, number of VLAN elements passed
2914 * to PF for removal might be greater than number of VLANs
2915 * filter programmed for that VF - So, use actual number of
2916 * VLANS added earlier with add VLAN opcode. In order to avoid
2917 * removing VLAN that doesn't exist, which result to sending
2918 * erroneous failed message back to the VF
2922 num_vf_vlan = vsi->num_vlan;
2923 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2924 u16 vid = vfl->vlan_id[i];
2926 /* we add VLAN 0 by default for each VF so we can enable
2927 * Tx VLAN anti-spoof without triggering MDD events so
2928 * we don't want a VIRTCHNL request to remove it
2933 /* Make sure ice_vsi_kill_vlan is successful before
2934 * updating VLAN information
2936 status = ice_vsi_kill_vlan(vsi, vid);
2938 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2943 /* Disable VLAN pruning when the last VLAN is removed */
2945 ice_cfg_vlan_pruning(vsi, false, false);
2947 /* Disable Unicast/Multicast VLAN promiscuous mode */
2949 promisc_m = ICE_PROMISC_VLAN_TX |
2950 ICE_PROMISC_VLAN_RX;
2952 ice_clear_vsi_promisc(hw, vsi->idx,
2959 /* send the response to the VF */
2961 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2964 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2969 * ice_vc_add_vlan_msg
2970 * @vf: pointer to the VF info
2971 * @msg: pointer to the msg buffer
2973 * Add and program guest VLAN ID
2975 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2977 return ice_vc_process_vlan_msg(vf, msg, true);
2981 * ice_vc_remove_vlan_msg
2982 * @vf: pointer to the VF info
2983 * @msg: pointer to the msg buffer
2985 * remove programmed guest VLAN ID
2987 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2989 return ice_vc_process_vlan_msg(vf, msg, false);
2993 * ice_vc_ena_vlan_stripping
2994 * @vf: pointer to the VF info
2996 * Enable VLAN header stripping for a given VF
2998 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
3000 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3001 struct ice_pf *pf = vf->pf;
3002 struct ice_vsi *vsi;
3004 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3005 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3009 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3010 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3014 vsi = pf->vsi[vf->lan_vsi_idx];
3015 if (ice_vsi_manage_vlan_stripping(vsi, true))
3016 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3019 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3024 * ice_vc_dis_vlan_stripping
3025 * @vf: pointer to the VF info
3027 * Disable VLAN header stripping for a given VF
3029 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3031 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3032 struct ice_pf *pf = vf->pf;
3033 struct ice_vsi *vsi;
3035 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3036 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3040 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3041 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3045 vsi = pf->vsi[vf->lan_vsi_idx];
3047 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3051 if (ice_vsi_manage_vlan_stripping(vsi, false))
3052 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3055 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3060 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3061 * @vf: VF to enable/disable VLAN stripping for on initialization
3063 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3064 * the flag is cleared then we want to disable stripping. For example, the flag
3065 * will be cleared when port VLANs are configured by the administrator before
3066 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3069 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3071 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3076 /* don't modify stripping if port VLAN is configured */
3080 if (ice_vf_vlan_offload_ena(vf->driver_caps))
3081 return ice_vsi_manage_vlan_stripping(vsi, true);
3083 return ice_vsi_manage_vlan_stripping(vsi, false);
3087 * ice_vc_process_vf_msg - Process request from VF
3088 * @pf: pointer to the PF structure
3089 * @event: pointer to the AQ event
3091 * called from the common asq/arq handler to
3092 * process request from VF
3094 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3096 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3097 s16 vf_id = le16_to_cpu(event->desc.retval);
3098 u16 msglen = event->msg_len;
3099 u8 *msg = event->msg_buf;
3100 struct ice_vf *vf = NULL;
3104 dev = ice_pf_to_dev(pf);
3105 if (ice_validate_vf_id(pf, vf_id)) {
3110 vf = &pf->vf[vf_id];
3112 /* Check if VF is disabled. */
3113 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3118 /* Perform basic checks on the msg */
3119 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3121 if (err == VIRTCHNL_STATUS_ERR_PARAM)
3129 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3131 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3132 vf_id, v_opcode, msglen, err);
3137 case VIRTCHNL_OP_VERSION:
3138 err = ice_vc_get_ver_msg(vf, msg);
3140 case VIRTCHNL_OP_GET_VF_RESOURCES:
3141 err = ice_vc_get_vf_res_msg(vf, msg);
3142 if (ice_vf_init_vlan_stripping(vf))
3144 "Failed to initialize VLAN stripping for VF %d\n",
3146 ice_vc_notify_vf_link_state(vf);
3148 case VIRTCHNL_OP_RESET_VF:
3149 ice_vc_reset_vf_msg(vf);
3151 case VIRTCHNL_OP_ADD_ETH_ADDR:
3152 err = ice_vc_add_mac_addr_msg(vf, msg);
3154 case VIRTCHNL_OP_DEL_ETH_ADDR:
3155 err = ice_vc_del_mac_addr_msg(vf, msg);
3157 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3158 err = ice_vc_cfg_qs_msg(vf, msg);
3160 case VIRTCHNL_OP_ENABLE_QUEUES:
3161 err = ice_vc_ena_qs_msg(vf, msg);
3162 ice_vc_notify_vf_link_state(vf);
3164 case VIRTCHNL_OP_DISABLE_QUEUES:
3165 err = ice_vc_dis_qs_msg(vf, msg);
3167 case VIRTCHNL_OP_REQUEST_QUEUES:
3168 err = ice_vc_request_qs_msg(vf, msg);
3170 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3171 err = ice_vc_cfg_irq_map_msg(vf, msg);
3173 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3174 err = ice_vc_config_rss_key(vf, msg);
3176 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3177 err = ice_vc_config_rss_lut(vf, msg);
3179 case VIRTCHNL_OP_GET_STATS:
3180 err = ice_vc_get_stats_msg(vf, msg);
3182 case VIRTCHNL_OP_ADD_VLAN:
3183 err = ice_vc_add_vlan_msg(vf, msg);
3185 case VIRTCHNL_OP_DEL_VLAN:
3186 err = ice_vc_remove_vlan_msg(vf, msg);
3188 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3189 err = ice_vc_ena_vlan_stripping(vf);
3191 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3192 err = ice_vc_dis_vlan_stripping(vf);
3194 case VIRTCHNL_OP_UNKNOWN:
3196 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3198 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3199 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3204 /* Helper function cares less about error return values here
3205 * as it is busy with pending work.
3207 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3208 vf_id, v_opcode, err);
3214 * @netdev: network interface device structure
3215 * @vf_id: VF identifier
3216 * @ivi: VF configuration structure
3218 * return VF configuration
3221 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3223 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3224 struct ice_vsi *vsi;
3227 if (ice_validate_vf_id(pf, vf_id))
3230 vf = &pf->vf[vf_id];
3231 vsi = pf->vsi[vf->lan_vsi_idx];
3233 if (ice_check_vf_init(pf, vf))
3237 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3239 /* VF configuration for VLAN and applicable QoS */
3240 ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
3241 ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
3242 ICE_VLAN_PRIORITY_S;
3244 ivi->trusted = vf->trusted;
3245 ivi->spoofchk = vf->spoofchk;
3246 if (!vf->link_forced)
3247 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3248 else if (vf->link_up)
3249 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3251 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3252 ivi->max_tx_rate = vf->tx_rate;
3253 ivi->min_tx_rate = 0;
3258 * ice_wait_on_vf_reset
3259 * @vf: The VF being resseting
3261 * Poll to make sure a given VF is ready after reset
3263 static void ice_wait_on_vf_reset(struct ice_vf *vf)
3267 for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) {
3268 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
3276 * @netdev: network interface device structure
3277 * @vf_id: VF identifier
3280 * program VF MAC address
3282 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3284 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3288 if (ice_validate_vf_id(pf, vf_id))
3291 vf = &pf->vf[vf_id];
3292 /* Don't set MAC on disabled VF */
3293 if (ice_is_vf_disabled(vf))
3296 /* In case VF is in reset mode, wait until it is completed. Depending
3297 * on factors like queue disabling routine, this could take ~250ms
3299 ice_wait_on_vf_reset(vf);
3301 if (ice_check_vf_init(pf, vf))
3304 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
3305 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3309 /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
3310 * flow will use the updated dflt_lan_addr and add a MAC filter
3311 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
3312 * set the MAC address for this VF.
3314 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3315 vf->pf_set_mac = true;
3317 "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
3320 ice_vc_reset_vf(vf);
3326 * @netdev: network interface device structure
3327 * @vf_id: VF identifier
3328 * @trusted: Boolean value to enable/disable trusted VF
3330 * Enable or disable a given VF as trusted
3332 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3334 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3338 dev = ice_pf_to_dev(pf);
3339 if (ice_validate_vf_id(pf, vf_id))
3342 vf = &pf->vf[vf_id];
3343 /* Don't set Trusted Mode on disabled VF */
3344 if (ice_is_vf_disabled(vf))
3347 /* In case VF is in reset mode, wait until it is completed. Depending
3348 * on factors like queue disabling routine, this could take ~250ms
3350 ice_wait_on_vf_reset(vf);
3352 if (ice_check_vf_init(pf, vf))
3355 /* Check if already trusted */
3356 if (trusted == vf->trusted)
3359 vf->trusted = trusted;
3360 ice_vc_reset_vf(vf);
3361 dev_info(dev, "VF %u is now %strusted\n",
3362 vf_id, trusted ? "" : "un");
3368 * ice_set_vf_link_state
3369 * @netdev: network interface device structure
3370 * @vf_id: VF identifier
3371 * @link_state: required link state
3373 * Set VF's link state, irrespective of physical link state status
3375 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3377 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3380 if (ice_validate_vf_id(pf, vf_id))
3383 vf = &pf->vf[vf_id];
3384 if (ice_check_vf_init(pf, vf))
3387 switch (link_state) {
3388 case IFLA_VF_LINK_STATE_AUTO:
3389 vf->link_forced = false;
3391 case IFLA_VF_LINK_STATE_ENABLE:
3392 vf->link_forced = true;
3395 case IFLA_VF_LINK_STATE_DISABLE:
3396 vf->link_forced = true;
3397 vf->link_up = false;
3403 ice_vc_notify_vf_link_state(vf);
3409 * ice_get_vf_stats - populate some stats for the VF
3410 * @netdev: the netdev of the PF
3411 * @vf_id: the host OS identifier (0-255)
3412 * @vf_stats: pointer to the OS memory to be initialized
3414 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3415 struct ifla_vf_stats *vf_stats)
3417 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3418 struct ice_eth_stats *stats;
3419 struct ice_vsi *vsi;
3422 if (ice_validate_vf_id(pf, vf_id))
3425 vf = &pf->vf[vf_id];
3427 if (ice_check_vf_init(pf, vf))
3430 vsi = pf->vsi[vf->lan_vsi_idx];
3434 ice_update_eth_stats(vsi);
3435 stats = &vsi->eth_stats;
3437 memset(vf_stats, 0, sizeof(*vf_stats));
3439 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
3440 stats->rx_multicast;
3441 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
3442 stats->tx_multicast;
3443 vf_stats->rx_bytes = stats->rx_bytes;
3444 vf_stats->tx_bytes = stats->tx_bytes;
3445 vf_stats->broadcast = stats->rx_broadcast;
3446 vf_stats->multicast = stats->rx_multicast;
3447 vf_stats->rx_dropped = stats->rx_discards;
3448 vf_stats->tx_dropped = stats->tx_discards;