1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
9 * ice_validate_vf_id - helper to check if VF ID is valid
10 * @pf: pointer to the PF structure
11 * @vf_id: the ID of the VF to check
13 static int ice_validate_vf_id(struct ice_pf *pf, int vf_id)
15 if (vf_id >= pf->num_alloc_vfs) {
16 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %d\n", vf_id);
23 * ice_check_vf_init - helper to check if VF init complete
24 * @pf: pointer to the PF structure
25 * @vf: the pointer to the VF to check
27 static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
29 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
30 dev_err(ice_pf_to_dev(pf), "VF ID: %d in reset. Try again.\n",
38 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
39 * @pf: pointer to the PF structure
40 * @v_opcode: operation code
41 * @v_retval: return value
42 * @msg: pointer to the msg buffer
46 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
47 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
49 struct ice_hw *hw = &pf->hw;
52 ice_for_each_vf(pf, i) {
53 struct ice_vf *vf = &pf->vf[i];
55 /* Not all vfs are enabled so skip the ones that are not */
56 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
57 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
60 /* Ignore return value on purpose - a given VF may fail, but
61 * we need to keep going and send to all of them
63 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
69 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
70 * @vf: pointer to the VF structure
71 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
72 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
73 * @link_up: whether or not to set the link up/down
76 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
77 int ice_link_speed, bool link_up)
79 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
80 pfe->event_data.link_event_adv.link_status = link_up;
82 pfe->event_data.link_event_adv.link_speed =
83 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
85 pfe->event_data.link_event.link_status = link_up;
86 /* Legacy method for virtchnl link speeds */
87 pfe->event_data.link_event.link_speed =
88 (enum virtchnl_link_speed)
89 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
94 * ice_vc_notify_vf_link_state - Inform a VF of link status
95 * @vf: pointer to the VF structure
97 * send a link status message to a single VF
99 static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
101 struct virtchnl_pf_event pfe = { 0 };
102 struct ice_link_status *ls;
103 struct ice_pf *pf = vf->pf;
107 ls = &hw->port_info->phy.link_info;
109 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
110 pfe.severity = PF_EVENT_SEVERITY_INFO;
112 /* Always report link is down if the VF queues aren't enabled */
113 if (!vf->num_qs_ena) {
114 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
115 } else if (vf->link_forced) {
116 u16 link_speed = vf->link_up ?
117 ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN;
119 ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up);
121 ice_set_pfe_link(vf, &pfe, ls->link_speed,
122 ls->link_info & ICE_AQ_LINK_UP);
125 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
126 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
131 * ice_free_vf_res - Free a VF's resources
132 * @vf: pointer to the VF info
134 static void ice_free_vf_res(struct ice_vf *vf)
136 struct ice_pf *pf = vf->pf;
137 int i, last_vector_idx;
139 /* First, disable VF's configuration API to prevent OS from
140 * accessing the VF's VSI after it's freed or invalidated.
142 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
144 /* free VSI and disconnect it from the parent uplink */
145 if (vf->lan_vsi_idx) {
146 ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
152 last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
153 /* Disable interrupts so that VF starts in a known state */
154 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
155 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
158 /* reset some of the state variables keeping track of the resources */
159 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
160 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
164 * ice_dis_vf_mappings
165 * @vf: pointer to the VF structure
167 static void ice_dis_vf_mappings(struct ice_vf *vf)
169 struct ice_pf *pf = vf->pf;
176 vsi = pf->vsi[vf->lan_vsi_idx];
178 dev = ice_pf_to_dev(pf);
179 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
180 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
182 first = vf->first_vector_idx;
183 last = first + pf->num_vf_msix - 1;
184 for (v = first; v <= last; v++) {
187 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
188 GLINT_VECT2FUNC_IS_PF_M) |
189 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
190 GLINT_VECT2FUNC_PF_NUM_M));
191 wr32(hw, GLINT_VECT2FUNC(v), reg);
194 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
195 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
197 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
199 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
200 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
202 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
206 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
207 * @pf: pointer to the PF structure
209 * If MSIX entries from the pf->irq_tracker were needed then we need to
210 * reset the irq_tracker->end and give back the entries we needed to
213 * If no MSIX entries were taken from the pf->irq_tracker then just clear
214 * the pf->sriov_base_vector.
216 * Returns 0 on success, and -EINVAL on error.
218 static int ice_sriov_free_msix_res(struct ice_pf *pf)
220 struct ice_res_tracker *res;
225 res = pf->irq_tracker;
229 /* give back irq_tracker resources used */
230 if (pf->sriov_base_vector < res->num_entries) {
231 res->end = res->num_entries;
232 pf->num_avail_sw_msix +=
233 res->num_entries - pf->sriov_base_vector;
236 pf->sriov_base_vector = 0;
242 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
243 * @vf: pointer to the VF structure
245 void ice_set_vf_state_qs_dis(struct ice_vf *vf)
247 /* Clear Rx/Tx enabled queues flag */
248 bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
249 bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
251 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
255 * ice_dis_vf_qs - Disable the VF queues
256 * @vf: pointer to the VF structure
258 static void ice_dis_vf_qs(struct ice_vf *vf)
260 struct ice_pf *pf = vf->pf;
263 vsi = pf->vsi[vf->lan_vsi_idx];
265 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
266 ice_vsi_stop_rx_rings(vsi);
267 ice_set_vf_state_qs_dis(vf);
271 * ice_free_vfs - Free all VFs
272 * @pf: pointer to the PF structure
274 void ice_free_vfs(struct ice_pf *pf)
276 struct device *dev = ice_pf_to_dev(pf);
277 struct ice_hw *hw = &pf->hw;
283 while (test_and_set_bit(__ICE_VF_DIS, pf->state))
284 usleep_range(1000, 2000);
286 /* Avoid wait time by stopping all VFs at the same time */
287 ice_for_each_vf(pf, i)
288 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
289 ice_dis_vf_qs(&pf->vf[i]);
291 /* Disable IOV before freeing resources. This lets any VF drivers
292 * running in the host get themselves cleaned up before we yank
293 * the carpet out from underneath their feet.
295 if (!pci_vfs_assigned(pf->pdev))
296 pci_disable_sriov(pf->pdev);
298 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
300 tmp = pf->num_alloc_vfs;
302 pf->num_alloc_vfs = 0;
303 for (i = 0; i < tmp; i++) {
304 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
305 /* disable VF qp mappings and set VF disable state */
306 ice_dis_vf_mappings(&pf->vf[i]);
307 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
308 ice_free_vf_res(&pf->vf[i]);
312 if (ice_sriov_free_msix_res(pf))
313 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
315 devm_kfree(dev, pf->vf);
318 /* This check is for when the driver is unloaded while VFs are
319 * assigned. Setting the number of VFs to 0 through sysfs is caught
320 * before this function ever gets called.
322 if (!pci_vfs_assigned(pf->pdev)) {
325 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
326 * work correctly when SR-IOV gets re-enabled.
328 for (vf_id = 0; vf_id < tmp; vf_id++) {
329 u32 reg_idx, bit_idx;
331 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
332 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
333 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
336 clear_bit(__ICE_VF_DIS, pf->state);
337 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
341 * ice_trigger_vf_reset - Reset a VF on HW
342 * @vf: pointer to the VF structure
343 * @is_vflr: true if VFLR was issued, false if not
344 * @is_pfr: true if the reset was triggered due to a previous PFR
346 * Trigger hardware to start a reset for a particular VF. Expects the caller
347 * to wait the proper amount of time to allow hardware to reset the VF before
348 * it cleans up and restores VF functionality.
350 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
352 struct ice_pf *pf = vf->pf;
353 u32 reg, reg_idx, bit_idx;
358 dev = ice_pf_to_dev(pf);
360 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
362 /* Inform VF that it is no longer active, as a warning */
363 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
365 /* Disable VF's configuration API during reset. The flag is re-enabled
366 * in ice_alloc_vf_res(), when it's safe again to access VF's VSI.
367 * It's normally disabled in ice_free_vf_res(), but it's safer
368 * to do it earlier to give some time to finish to any VF config
369 * functions that may still be running at this point.
371 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
373 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
374 * in the case of VFR. If this is done for PFR, it can mess up VF
375 * resets because the VF driver may already have started cleanup
376 * by the time we get here.
379 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
381 /* In the case of a VFLR, the HW has already reset the VF and we
382 * just need to clean up, so don't hit the VFRTRIG register.
385 /* reset VF using VPGEN_VFRTRIG reg */
386 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
387 reg |= VPGEN_VFRTRIG_VFSWR_M;
388 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
390 /* clear the VFLR bit in GLGEN_VFLRSTAT */
391 reg_idx = (vf_abs_id) / 32;
392 bit_idx = (vf_abs_id) % 32;
393 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
396 wr32(hw, PF_PCI_CIAA,
397 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
398 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
399 reg = rd32(hw, PF_PCI_CIAD);
400 /* no transactions pending so stop polling */
401 if ((reg & VF_TRANS_PENDING_M) == 0)
404 dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
405 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
410 * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
411 * @ctxt: the VSI ctxt to fill
412 * @vid: the VLAN ID to set as a PVID
414 static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
416 ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
417 ICE_AQ_VSI_PVLAN_INSERT_PVID |
418 ICE_AQ_VSI_VLAN_EMOD_STR);
419 ctxt->info.pvid = cpu_to_le16(vid);
420 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
421 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
422 ICE_AQ_VSI_PROP_SW_VALID);
426 * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
427 * @ctxt: the VSI ctxt to fill
429 static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
431 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
432 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
433 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
434 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
435 ICE_AQ_VSI_PROP_SW_VALID);
439 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
440 * @vsi: the VSI to update
441 * @vid: the VLAN ID to set as a PVID
442 * @enable: true for enable PVID false for disable
444 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
446 struct ice_hw *hw = &vsi->back->hw;
447 struct ice_vsi_ctx *ctxt;
448 enum ice_status status;
451 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
455 ctxt->info = vsi->info;
457 ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
459 ice_vsi_kill_pvid_fill_ctxt(ctxt);
461 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
463 dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
464 status, hw->adminq.sq_last_status);
469 vsi->info = ctxt->info;
476 * ice_vf_vsi_setup - Set up a VF VSI
477 * @pf: board private structure
478 * @pi: pointer to the port_info instance
479 * @vf_id: defines VF ID to which this VSI connects.
481 * Returns pointer to the successfully allocated VSI struct on success,
482 * otherwise returns NULL on failure.
484 static struct ice_vsi *
485 ice_vf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, u16 vf_id)
487 return ice_vsi_setup(pf, pi, ICE_VSI_VF, vf_id);
491 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
492 * @pf: pointer to PF structure
493 * @vf: pointer to VF that the first MSIX vector index is being calculated for
495 * This returns the first MSIX vector index in PF space that is used by this VF.
496 * This index is used when accessing PF relative registers such as
497 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
498 * This will always be the OICR index in the AVF driver so any functionality
499 * using vf->first_vector_idx for queue configuration will have to increment by
500 * 1 to avoid meddling with the OICR index.
502 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
504 return pf->sriov_base_vector + vf->vf_id * pf->num_vf_msix;
508 * ice_alloc_vsi_res - Setup VF VSI and its resources
509 * @vf: pointer to the VF structure
511 * Returns 0 on success, negative value on failure
513 static int ice_alloc_vsi_res(struct ice_vf *vf)
515 struct ice_pf *pf = vf->pf;
516 LIST_HEAD(tmp_add_list);
517 u8 broadcast[ETH_ALEN];
522 dev = ice_pf_to_dev(pf);
523 /* first vector index is the VFs OICR index */
524 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
526 vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
528 dev_err(dev, "Failed to create VF VSI\n");
532 vf->lan_vsi_idx = vsi->idx;
533 vf->lan_vsi_num = vsi->vsi_num;
535 /* Check if port VLAN exist before, and restore it accordingly */
536 if (vf->port_vlan_id) {
537 ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
538 ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
541 eth_broadcast_addr(broadcast);
543 status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast);
545 goto ice_alloc_vsi_res_exit;
547 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
548 status = ice_add_mac_to_list(vsi, &tmp_add_list,
549 vf->dflt_lan_addr.addr);
551 goto ice_alloc_vsi_res_exit;
554 status = ice_add_mac(&pf->hw, &tmp_add_list);
556 dev_err(dev, "could not add mac filters error %d\n", status);
560 /* Clear this bit after VF initialization since we shouldn't reclaim
561 * and reassign interrupts for synchronous or asynchronous VFR events.
562 * We don't want to reconfigure interrupts since AVF driver doesn't
563 * expect vector assignment to be changed unless there is a request for
566 ice_alloc_vsi_res_exit:
567 ice_free_fltr_list(dev, &tmp_add_list);
572 * ice_alloc_vf_res - Allocate VF resources
573 * @vf: pointer to the VF structure
575 static int ice_alloc_vf_res(struct ice_vf *vf)
577 struct ice_pf *pf = vf->pf;
578 int tx_rx_queue_left;
581 /* Update number of VF queues, in case VF had requested for queue
584 tx_rx_queue_left = min_t(int, ice_get_avail_txq_count(pf),
585 ice_get_avail_rxq_count(pf));
586 tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
587 if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
588 vf->num_req_qs != vf->num_vf_qs)
589 vf->num_vf_qs = vf->num_req_qs;
591 /* setup VF VSI and necessary resources */
592 status = ice_alloc_vsi_res(vf);
594 goto ice_alloc_vf_res_exit;
597 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
599 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
601 /* VF is now completely initialized */
602 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
606 ice_alloc_vf_res_exit:
612 * ice_ena_vf_mappings
613 * @vf: pointer to the VF structure
615 * Enable VF vectors and queues allocation by writing the details into
616 * respective registers.
618 static void ice_ena_vf_mappings(struct ice_vf *vf)
620 int abs_vf_id, abs_first, abs_last;
621 struct ice_pf *pf = vf->pf;
628 dev = ice_pf_to_dev(pf);
630 vsi = pf->vsi[vf->lan_vsi_idx];
631 first = vf->first_vector_idx;
632 last = (first + pf->num_vf_msix) - 1;
633 abs_first = first + pf->hw.func_caps.common_cap.msix_vector_first_id;
634 abs_last = (abs_first + pf->num_vf_msix) - 1;
635 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
637 /* VF Vector allocation */
638 reg = (((abs_first << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
639 ((abs_last << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
640 VPINT_ALLOC_VALID_M);
641 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
643 reg = (((abs_first << VPINT_ALLOC_PCI_FIRST_S)
644 & VPINT_ALLOC_PCI_FIRST_M) |
645 ((abs_last << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
646 VPINT_ALLOC_PCI_VALID_M);
647 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
648 /* map the interrupts to its functions */
649 for (v = first; v <= last; v++) {
650 reg = (((abs_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
651 GLINT_VECT2FUNC_VF_NUM_M) |
652 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
653 GLINT_VECT2FUNC_PF_NUM_M));
654 wr32(hw, GLINT_VECT2FUNC(v), reg);
657 /* Map mailbox interrupt. We put an explicit 0 here to remind us that
658 * VF admin queue interrupts will go to VF MSI-X vector 0.
660 wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
661 /* set regardless of mapping mode */
662 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
664 /* VF Tx queues allocation */
665 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
666 /* set the VF PF Tx queue range
667 * VFNUMQ value should be set to (number of queues - 1). A value
668 * of 0 means 1 queue and a value of 255 means 256 queues
670 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
671 VPLAN_TX_QBASE_VFFIRSTQ_M) |
672 (((vsi->alloc_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
673 VPLAN_TX_QBASE_VFNUMQ_M));
674 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
676 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
679 /* set regardless of mapping mode */
680 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
682 /* VF Rx queues allocation */
683 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
684 /* set the VF PF Rx queue range
685 * VFNUMQ value should be set to (number of queues - 1). A value
686 * of 0 means 1 queue and a value of 255 means 256 queues
688 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
689 VPLAN_RX_QBASE_VFFIRSTQ_M) |
690 (((vsi->alloc_txq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
691 VPLAN_RX_QBASE_VFNUMQ_M));
692 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
694 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
700 * @pf: pointer to the PF structure
701 * @avail_res: available resources in the PF structure
702 * @max_res: maximum resources that can be given per VF
703 * @min_res: minimum resources that can be given per VF
705 * Returns non-zero value if resources (queues/vectors) are available or
706 * returns zero if PF cannot accommodate for all num_alloc_vfs.
709 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
711 bool checked_min_res = false;
714 /* start by checking if PF can assign max number of resources for
716 * if yes, return number per VF
717 * If no, divide by 2 and roundup, check again
718 * repeat the loop till we reach a point where even minimum resources
719 * are not available, in that case return 0
722 while ((res >= min_res) && !checked_min_res) {
725 num_all_res = pf->num_alloc_vfs * res;
726 if (num_all_res <= avail_res)
730 checked_min_res = true;
732 res = DIV_ROUND_UP(res, 2);
738 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
739 * @vf: VF to calculate the register index for
740 * @q_vector: a q_vector associated to the VF
742 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
746 if (!vf || !q_vector)
751 /* always add one to account for the OICR being the first MSIX */
752 return pf->sriov_base_vector + pf->num_vf_msix * vf->vf_id +
757 * ice_get_max_valid_res_idx - Get the max valid resource index
758 * @res: pointer to the resource to find the max valid index for
760 * Start from the end of the ice_res_tracker and return right when we find the
761 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
762 * valid for SR-IOV because it is the only consumer that manipulates the
763 * res->end and this is always called when res->end is set to res->num_entries.
765 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
772 for (i = res->num_entries - 1; i >= 0; i--)
773 if (res->list[i] & ICE_RES_VALID_BIT)
780 * ice_sriov_set_msix_res - Set any used MSIX resources
781 * @pf: pointer to PF structure
782 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
784 * This function allows SR-IOV resources to be taken from the end of the PF's
785 * allowed HW MSIX vectors so in many cases the irq_tracker will not
786 * be needed. In these cases we just set the pf->sriov_base_vector and return
789 * If SR-IOV needs to use any pf->irq_tracker entries it updates the
790 * irq_tracker->end based on the first entry needed for SR-IOV. This makes it
791 * so any calls to ice_get_res() using the irq_tracker will not try to use
792 * resources at or beyond the newly set value.
794 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
795 * in the PF's space available for SR-IOV.
797 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
799 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
800 u16 pf_total_msix_vectors =
801 pf->hw.func_caps.common_cap.num_msix_vectors;
802 struct ice_res_tracker *res = pf->irq_tracker;
803 int sriov_base_vector;
805 if (max_valid_res_idx < 0)
806 return max_valid_res_idx;
808 sriov_base_vector = pf_total_msix_vectors - num_msix_needed;
810 /* make sure we only grab irq_tracker entries from the list end and
811 * that we have enough available MSIX vectors
813 if (sriov_base_vector <= max_valid_res_idx)
816 pf->sriov_base_vector = sriov_base_vector;
818 /* dip into irq_tracker entries and update used resources */
819 if (num_msix_needed > (pf_total_msix_vectors - res->num_entries)) {
820 pf->num_avail_sw_msix -=
821 res->num_entries - pf->sriov_base_vector;
822 res->end = pf->sriov_base_vector;
829 * ice_check_avail_res - check if vectors and queues are available
830 * @pf: pointer to the PF structure
832 * This function is where we calculate actual number of resources for VF VSIs,
833 * we don't reserve ahead of time during probe. Returns success if vectors and
834 * queues resources are available, otherwise returns error code
836 static int ice_check_avail_res(struct ice_pf *pf)
838 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
839 u16 num_msix, num_txq, num_rxq, num_avail_msix;
840 struct device *dev = ice_pf_to_dev(pf);
842 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
845 /* add 1 to max_valid_res_idx to account for it being 0-based */
846 num_avail_msix = pf->hw.func_caps.common_cap.num_msix_vectors -
847 (max_valid_res_idx + 1);
849 /* Grab from HW interrupts common pool
850 * Note: By the time the user decides it needs more vectors in a VF
851 * its already too late since one must decide this prior to creating the
852 * VF interface. So the best we can do is take a guess as to what the
855 * We have two policies for vector allocation:
856 * 1. if num_alloc_vfs is from 1 to 16, then we consider this as small
857 * number of NFV VFs used for NFV appliances, since this is a special
858 * case, we try to assign maximum vectors per VF (65) as much as
859 * possible, based on determine_resources algorithm.
860 * 2. if num_alloc_vfs is from 17 to 256, then its large number of
861 * regular VFs which are not used for any special purpose. Hence try to
862 * grab default interrupt vectors (5 as supported by AVF driver).
864 if (pf->num_alloc_vfs <= 16) {
865 num_msix = ice_determine_res(pf, num_avail_msix,
867 ICE_MIN_INTR_PER_VF);
868 } else if (pf->num_alloc_vfs <= ICE_MAX_VF_COUNT) {
869 num_msix = ice_determine_res(pf, num_avail_msix,
870 ICE_DFLT_INTR_PER_VF,
871 ICE_MIN_INTR_PER_VF);
873 dev_err(dev, "Number of VFs %d exceeds max VF count %d\n",
874 pf->num_alloc_vfs, ICE_MAX_VF_COUNT);
881 /* Grab from the common pool
882 * start by requesting Default queues (4 as supported by AVF driver),
883 * Note that, the main difference between queues and vectors is, latter
884 * can only be reserved at init time but queues can be requested by VF
885 * at runtime through Virtchnl, that is the reason we start by reserving
888 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
889 ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
891 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
892 ICE_DFLT_QS_PER_VF, ICE_MIN_QS_PER_VF);
894 if (!num_txq || !num_rxq)
897 if (ice_sriov_set_msix_res(pf, num_msix * pf->num_alloc_vfs))
900 /* since AVF driver works with only queue pairs which means, it expects
901 * to have equal number of Rx and Tx queues, so take the minimum of
902 * available Tx or Rx queues
904 pf->num_vf_qps = min_t(int, num_txq, num_rxq);
905 pf->num_vf_msix = num_msix;
911 * ice_cleanup_and_realloc_vf - Clean up VF and reallocate resources after reset
912 * @vf: pointer to the VF structure
914 * Cleanup a VF after the hardware reset is finished. Expects the caller to
915 * have verified whether the reset is finished properly, and ensure the
916 * minimum amount of wait time has passed. Reallocate VF resources back to make
919 static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
921 struct ice_pf *pf = vf->pf;
927 /* PF software completes the flow by notifying VF that reset flow is
928 * completed. This is done by enabling hardware by clearing the reset
929 * bit in the VPGEN_VFRTRIG reg and setting VFR_STATE in the VFGEN_RSTAT
930 * register to VFR completed (done at the end of this function)
931 * By doing this we allow HW to access VF memory at any point. If we
932 * did it any sooner, HW could access memory while it was being freed
933 * in ice_free_vf_res(), causing an IOMMU fault.
935 * On the other hand, this needs to be done ASAP, because the VF driver
936 * is waiting for this to happen and may report a timeout. It's
937 * harmless, but it gets logged into Guest OS kernel log, so best avoid
940 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
941 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
942 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
944 /* reallocate VF resources to finish resetting the VSI state */
945 if (!ice_alloc_vf_res(vf)) {
948 ice_ena_vf_mappings(vf);
949 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
950 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
952 vsi = pf->vsi[vf->lan_vsi_idx];
953 if (ice_vsi_add_vlan(vsi, 0))
954 dev_warn(ice_pf_to_dev(pf),
955 "Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest",
959 /* Tell the VF driver the reset is done. This needs to be done only
960 * after VF has been fully initialized, because the VF driver may
961 * request resources immediately after setting this flag.
963 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
967 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
968 * @vf: pointer to the VF info
969 * @vsi: the VSI being configured
970 * @promisc_m: mask of promiscuous config bits
971 * @rm_promisc: promisc flag request from the VF to remove or add filter
973 * This function configures VF VSI promiscuous mode, based on the VF requests,
974 * for Unicast, Multicast and VLAN
976 static enum ice_status
977 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
980 struct ice_pf *pf = vf->pf;
981 enum ice_status status = 0;
986 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
988 } else if (vf->port_vlan_id) {
990 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
993 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
997 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1000 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1008 * ice_config_res_vfs - Finalize allocation of VFs resources in one go
1009 * @pf: pointer to the PF structure
1011 * This function is being called as last part of resetting all VFs, or when
1012 * configuring VFs for the first time, where there is no resource to be freed
1013 * Returns true if resources were properly allocated for all VFs, and false
1016 static bool ice_config_res_vfs(struct ice_pf *pf)
1018 struct device *dev = ice_pf_to_dev(pf);
1019 struct ice_hw *hw = &pf->hw;
1022 if (ice_check_avail_res(pf)) {
1023 dev_err(dev, "Cannot allocate VF resources, try with fewer number of VFs\n");
1027 /* rearm global interrupts */
1028 if (test_and_clear_bit(__ICE_OICR_INTR_DIS, pf->state))
1029 ice_irq_dynamic_ena(hw, NULL, NULL);
1031 /* Finish resetting each VF and allocate resources */
1032 ice_for_each_vf(pf, v) {
1033 struct ice_vf *vf = &pf->vf[v];
1035 vf->num_vf_qs = pf->num_vf_qps;
1036 dev_dbg(dev, "VF-id %d has %d queues configured\n", vf->vf_id,
1038 ice_cleanup_and_realloc_vf(vf);
1042 clear_bit(__ICE_VF_DIS, pf->state);
1048 * ice_reset_all_vfs - reset all allocated VFs in one go
1049 * @pf: pointer to the PF structure
1050 * @is_vflr: true if VFLR was issued, false if not
1052 * First, tell the hardware to reset each VF, then do all the waiting in one
1053 * chunk, and finally finish restoring each VF after the wait. This is useful
1054 * during PF routines which need to reset all VFs, as otherwise it must perform
1055 * these resets in a serialized fashion.
1057 * Returns true if any VFs were reset, and false otherwise.
1059 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1061 struct device *dev = ice_pf_to_dev(pf);
1062 struct ice_hw *hw = &pf->hw;
1066 /* If we don't have any VFs, then there is nothing to reset */
1067 if (!pf->num_alloc_vfs)
1070 /* If VFs have been disabled, there is no need to reset */
1071 if (test_and_set_bit(__ICE_VF_DIS, pf->state))
1074 /* Begin reset on all VFs at once */
1075 ice_for_each_vf(pf, v)
1076 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1078 ice_for_each_vf(pf, v) {
1079 struct ice_vsi *vsi;
1082 vsi = pf->vsi[vf->lan_vsi_idx];
1083 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1085 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1086 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1089 /* HW requires some time to make sure it can flush the FIFO for a VF
1090 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1091 * sequence to make sure that it has completed. We'll keep track of
1092 * the VFs using a simple iterator that increments once that VF has
1093 * finished resetting.
1095 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1096 /* Check each VF in sequence */
1097 while (v < pf->num_alloc_vfs) {
1101 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1102 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1103 /* only delay if the check failed */
1104 usleep_range(10, 20);
1108 /* If the current VF has finished resetting, move on
1109 * to the next VF in sequence.
1115 /* Display a warning if at least one VF didn't manage to reset in
1116 * time, but continue on with the operation.
1118 if (v < pf->num_alloc_vfs)
1119 dev_warn(dev, "VF reset check timeout\n");
1121 /* free VF resources to begin resetting the VSI state */
1122 ice_for_each_vf(pf, v) {
1125 ice_free_vf_res(vf);
1127 /* Free VF queues as well, and reallocate later.
1128 * If a given VF has different number of queues
1129 * configured, the request for update will come
1130 * via mailbox communication.
1135 if (ice_sriov_free_msix_res(pf))
1136 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
1138 if (!ice_config_res_vfs(pf))
1145 * ice_is_vf_disabled
1146 * @vf: pointer to the VF info
1148 * Returns true if the PF or VF is disabled, false otherwise.
1150 static bool ice_is_vf_disabled(struct ice_vf *vf)
1152 struct ice_pf *pf = vf->pf;
1154 /* If the PF has been disabled, there is no need resetting VF until
1155 * PF is active again. Similarly, if the VF has been disabled, this
1156 * means something else is resetting the VF, so we shouldn't continue.
1157 * Otherwise, set disable VF state bit for actual reset, and continue.
1159 return (test_bit(__ICE_VF_DIS, pf->state) ||
1160 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1164 * ice_reset_vf - Reset a particular VF
1165 * @vf: pointer to the VF structure
1166 * @is_vflr: true if VFLR was issued, false if not
1168 * Returns true if the VF is reset, false otherwise.
1170 static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1172 struct ice_pf *pf = vf->pf;
1173 struct ice_vsi *vsi;
1181 dev = ice_pf_to_dev(pf);
1183 if (ice_is_vf_disabled(vf)) {
1184 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1189 /* Set VF disable bit state here, before triggering reset */
1190 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1191 ice_trigger_vf_reset(vf, is_vflr, false);
1193 vsi = pf->vsi[vf->lan_vsi_idx];
1195 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1198 /* Call Disable LAN Tx queue AQ whether or not queues are
1199 * enabled. This is needed for successful completion of VFR.
1201 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1202 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1205 /* poll VPGEN_VFRSTAT reg to make sure
1206 * that reset is complete
1208 for (i = 0; i < 10; i++) {
1209 /* VF reset requires driver to first reset the VF and then
1210 * poll the status register to make sure that the reset
1211 * completed successfully.
1213 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1214 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1219 /* only sleep if the reset is not done */
1220 usleep_range(10, 20);
1223 /* Display a warning if VF didn't manage to reset in time, but need to
1224 * continue on with the operation.
1227 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1229 /* disable promiscuous modes in case they were enabled
1230 * ignore any error if disabling process failed
1232 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1233 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1234 if (vf->port_vlan_id || vsi->num_vlan)
1235 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1237 promisc_m = ICE_UCAST_PROMISC_BITS;
1239 vsi = pf->vsi[vf->lan_vsi_idx];
1240 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1241 dev_err(dev, "disabling promiscuous mode failed\n");
1244 /* free VF resources to begin resetting the VSI state */
1245 ice_free_vf_res(vf);
1247 ice_cleanup_and_realloc_vf(vf);
1255 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1256 * @pf: pointer to the PF structure
1258 void ice_vc_notify_link_state(struct ice_pf *pf)
1262 ice_for_each_vf(pf, i)
1263 ice_vc_notify_vf_link_state(&pf->vf[i]);
1267 * ice_vc_notify_reset - Send pending reset message to all VFs
1268 * @pf: pointer to the PF structure
1270 * indicate a pending reset to all VFs on a given PF
1272 void ice_vc_notify_reset(struct ice_pf *pf)
1274 struct virtchnl_pf_event pfe;
1276 if (!pf->num_alloc_vfs)
1279 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1280 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1281 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1282 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1286 * ice_vc_notify_vf_reset - Notify VF of a reset event
1287 * @vf: pointer to the VF structure
1289 static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1291 struct virtchnl_pf_event pfe;
1298 if (ice_validate_vf_id(pf, vf->vf_id))
1301 /* Bail out if VF is in disabled state, neither initialized, nor active
1302 * state - otherwise proceed with notifications
1304 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1305 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1306 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1309 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1310 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1311 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1312 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1317 * ice_alloc_vfs - Allocate and set up VFs resources
1318 * @pf: pointer to the PF structure
1319 * @num_alloc_vfs: number of VFs to allocate
1321 static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
1323 struct device *dev = ice_pf_to_dev(pf);
1324 struct ice_hw *hw = &pf->hw;
1328 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1329 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1330 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1331 set_bit(__ICE_OICR_INTR_DIS, pf->state);
1334 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
1336 pf->num_alloc_vfs = 0;
1337 goto err_unroll_intr;
1339 /* allocate memory */
1340 vfs = devm_kcalloc(dev, num_alloc_vfs, sizeof(*vfs), GFP_KERNEL);
1343 goto err_pci_disable_sriov;
1346 pf->num_alloc_vfs = num_alloc_vfs;
1348 /* apply default profile */
1349 ice_for_each_vf(pf, i) {
1351 vfs[i].vf_sw_id = pf->first_sw;
1354 /* assign default capabilities */
1355 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
1356 vfs[i].spoofchk = true;
1359 /* VF resources get allocated with initialization */
1360 if (!ice_config_res_vfs(pf)) {
1362 goto err_unroll_sriov;
1369 devm_kfree(dev, vfs);
1371 pf->num_alloc_vfs = 0;
1372 err_pci_disable_sriov:
1373 pci_disable_sriov(pf->pdev);
1375 /* rearm interrupts here */
1376 ice_irq_dynamic_ena(hw, NULL, NULL);
1377 clear_bit(__ICE_OICR_INTR_DIS, pf->state);
1382 * ice_pf_state_is_nominal - checks the PF for nominal state
1383 * @pf: pointer to PF to check
1385 * Check the PF's state for a collection of bits that would indicate
1386 * the PF is in a state that would inhibit normal operation for
1387 * driver functionality.
1389 * Returns true if PF is in a nominal state.
1390 * Returns false otherwise
1392 static bool ice_pf_state_is_nominal(struct ice_pf *pf)
1394 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1399 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1400 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1407 * ice_pci_sriov_ena - Enable or change number of VFs
1408 * @pf: pointer to the PF structure
1409 * @num_vfs: number of VFs to allocate
1411 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1413 int pre_existing_vfs = pci_num_vf(pf->pdev);
1414 struct device *dev = ice_pf_to_dev(pf);
1417 if (!ice_pf_state_is_nominal(pf)) {
1418 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
1422 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
1423 dev_err(dev, "This device is not capable of SR-IOV\n");
1427 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1429 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1432 if (num_vfs > pf->num_vfs_supported) {
1433 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1434 num_vfs, pf->num_vfs_supported);
1438 dev_info(dev, "Allocating %d VFs\n", num_vfs);
1439 err = ice_alloc_vfs(pf, num_vfs);
1441 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1445 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1450 * ice_sriov_configure - Enable or change number of VFs via sysfs
1451 * @pdev: pointer to a pci_dev structure
1452 * @num_vfs: number of VFs to allocate
1454 * This function is called when the user updates the number of VFs in sysfs.
1456 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1458 struct ice_pf *pf = pci_get_drvdata(pdev);
1459 struct device *dev = ice_pf_to_dev(pf);
1461 if (ice_is_safe_mode(pf)) {
1462 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
1467 return ice_pci_sriov_ena(pf, num_vfs);
1469 if (!pci_vfs_assigned(pdev)) {
1472 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1480 * ice_process_vflr_event - Free VF resources via IRQ calls
1481 * @pf: pointer to the PF structure
1483 * called from the VFLR IRQ handler to
1484 * free up VF resources and state variables
1486 void ice_process_vflr_event(struct ice_pf *pf)
1488 struct ice_hw *hw = &pf->hw;
1492 if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
1496 ice_for_each_vf(pf, vf_id) {
1497 struct ice_vf *vf = &pf->vf[vf_id];
1498 u32 reg_idx, bit_idx;
1500 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
1501 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
1502 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1503 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1504 if (reg & BIT(bit_idx))
1505 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1506 ice_reset_vf(vf, true);
1511 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
1512 * @vf: pointer to the VF info
1514 static void ice_vc_reset_vf(struct ice_vf *vf)
1516 ice_vc_notify_vf_reset(vf);
1517 ice_reset_vf(vf, false);
1521 * ice_vc_send_msg_to_vf - Send message to VF
1522 * @vf: pointer to the VF info
1523 * @v_opcode: virtual channel opcode
1524 * @v_retval: virtual channel return value
1525 * @msg: pointer to the msg buffer
1526 * @msglen: msg length
1531 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
1532 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
1534 enum ice_status aq_ret;
1542 if (ice_validate_vf_id(pf, vf->vf_id))
1545 dev = ice_pf_to_dev(pf);
1547 /* single place to detect unsuccessful return values */
1549 vf->num_inval_msgs++;
1550 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
1551 v_opcode, v_retval);
1552 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
1553 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
1555 dev_err(dev, "Use PF Control I/F to enable the VF\n");
1556 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1560 vf->num_valid_msgs++;
1561 /* reset the invalid counter, if a valid message is received. */
1562 vf->num_inval_msgs = 0;
1565 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
1567 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
1568 dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
1569 vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
1577 * ice_vc_get_ver_msg
1578 * @vf: pointer to the VF info
1579 * @msg: pointer to the msg buffer
1581 * called from the VF to request the API version used by the PF
1583 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
1585 struct virtchnl_version_info info = {
1586 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
1589 vf->vf_ver = *(struct virtchnl_version_info *)msg;
1590 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
1591 if (VF_IS_V10(&vf->vf_ver))
1592 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
1594 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
1595 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
1596 sizeof(struct virtchnl_version_info));
1600 * ice_vc_get_vf_res_msg
1601 * @vf: pointer to the VF info
1602 * @msg: pointer to the msg buffer
1604 * called from the VF to request its resources
1606 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
1608 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1609 struct virtchnl_vf_resource *vfres = NULL;
1610 struct ice_pf *pf = vf->pf;
1611 struct ice_vsi *vsi;
1615 if (ice_check_vf_init(pf, vf)) {
1616 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1620 len = sizeof(struct virtchnl_vf_resource);
1622 vfres = kzalloc(len, GFP_KERNEL);
1624 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
1628 if (VF_IS_V11(&vf->vf_ver))
1629 vf->driver_caps = *(u32 *)msg;
1631 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
1632 VIRTCHNL_VF_OFFLOAD_RSS_REG |
1633 VIRTCHNL_VF_OFFLOAD_VLAN;
1635 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
1636 vsi = pf->vsi[vf->lan_vsi_idx];
1638 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1642 if (!vsi->info.pvid)
1643 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
1645 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
1646 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
1648 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
1649 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
1651 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
1654 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1655 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
1657 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
1658 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
1660 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
1661 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
1663 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
1664 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
1666 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1667 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
1669 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
1670 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
1672 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1673 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
1675 vfres->num_vsis = 1;
1676 /* Tx and Rx queue are equal for VF */
1677 vfres->num_queue_pairs = vsi->num_txq;
1678 vfres->max_vectors = pf->num_vf_msix;
1679 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
1680 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
1682 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
1683 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
1684 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
1685 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
1686 vf->dflt_lan_addr.addr);
1688 /* match guest capabilities */
1689 vf->driver_caps = vfres->vf_cap_flags;
1691 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
1694 /* send the response back to the VF */
1695 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
1703 * ice_vc_reset_vf_msg
1704 * @vf: pointer to the VF info
1706 * called from the VF to reset itself,
1707 * unlike other virtchnl messages, PF driver
1708 * doesn't send the response back to the VF
1710 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
1712 if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
1713 ice_reset_vf(vf, false);
1717 * ice_find_vsi_from_id
1718 * @pf: the PF structure to search for the VSI
1719 * @id: ID of the VSI it is searching for
1721 * searches for the VSI with the given ID
1723 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
1727 ice_for_each_vsi(pf, i)
1728 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
1735 * ice_vc_isvalid_vsi_id
1736 * @vf: pointer to the VF info
1737 * @vsi_id: VF relative VSI ID
1739 * check for the valid VSI ID
1741 static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
1743 struct ice_pf *pf = vf->pf;
1744 struct ice_vsi *vsi;
1746 vsi = ice_find_vsi_from_id(pf, vsi_id);
1748 return (vsi && (vsi->vf_id == vf->vf_id));
1752 * ice_vc_isvalid_q_id
1753 * @vf: pointer to the VF info
1755 * @qid: VSI relative queue ID
1757 * check for the valid queue ID
1759 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
1761 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
1762 /* allocated Tx and Rx queues should be always equal for VF VSI */
1763 return (vsi && (qid < vsi->alloc_txq));
1767 * ice_vc_isvalid_ring_len
1768 * @ring_len: length of ring
1770 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
1773 static bool ice_vc_isvalid_ring_len(u16 ring_len)
1775 return ring_len == 0 ||
1776 (ring_len >= ICE_MIN_NUM_DESC &&
1777 ring_len <= ICE_MAX_NUM_DESC &&
1778 !(ring_len % ICE_REQ_DESC_MULTIPLE));
1782 * ice_vc_config_rss_key
1783 * @vf: pointer to the VF info
1784 * @msg: pointer to the msg buffer
1786 * Configure the VF's RSS key
1788 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
1790 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1791 struct virtchnl_rss_key *vrk =
1792 (struct virtchnl_rss_key *)msg;
1793 struct ice_pf *pf = vf->pf;
1794 struct ice_vsi *vsi;
1796 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1797 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1801 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
1802 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1806 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
1807 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1811 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1812 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1816 vsi = pf->vsi[vf->lan_vsi_idx];
1818 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1822 if (ice_set_rss(vsi, vrk->key, NULL, 0))
1823 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1825 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
1830 * ice_vc_config_rss_lut
1831 * @vf: pointer to the VF info
1832 * @msg: pointer to the msg buffer
1834 * Configure the VF's RSS LUT
1836 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
1838 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
1839 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1840 struct ice_pf *pf = vf->pf;
1841 struct ice_vsi *vsi;
1843 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1844 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1848 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
1849 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1853 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
1854 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1858 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
1859 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1863 vsi = pf->vsi[vf->lan_vsi_idx];
1865 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1869 if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
1870 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
1872 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
1877 * ice_set_vf_spoofchk
1878 * @netdev: network interface device structure
1879 * @vf_id: VF identifier
1880 * @ena: flag to enable or disable feature
1882 * Enable or disable VF spoof checking
1884 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1886 struct ice_netdev_priv *np = netdev_priv(netdev);
1887 struct ice_pf *pf = np->vsi->back;
1888 struct ice_vsi_ctx *ctx;
1889 struct ice_vsi *vf_vsi;
1890 enum ice_status status;
1895 dev = ice_pf_to_dev(pf);
1896 if (ice_validate_vf_id(pf, vf_id))
1899 vf = &pf->vf[vf_id];
1901 if (ice_check_vf_init(pf, vf))
1904 vf_vsi = pf->vsi[vf->lan_vsi_idx];
1906 netdev_err(netdev, "VSI %d for VF %d is null\n",
1907 vf->lan_vsi_idx, vf->vf_id);
1911 if (vf_vsi->type != ICE_VSI_VF) {
1912 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
1913 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1917 if (ena == vf->spoofchk) {
1918 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1922 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1926 ctx->info.sec_flags = vf_vsi->info.sec_flags;
1927 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1929 ctx->info.sec_flags |=
1930 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
1931 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
1932 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
1934 ctx->info.sec_flags &=
1935 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
1936 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
1937 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
1940 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
1942 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
1943 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
1948 /* only update spoofchk state and VSI context on success */
1949 vf_vsi->info.sec_flags = ctx->info.sec_flags;
1958 * ice_vc_get_stats_msg
1959 * @vf: pointer to the VF info
1960 * @msg: pointer to the msg buffer
1962 * called from the VF to get VSI stats
1964 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
1966 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
1967 struct virtchnl_queue_select *vqs =
1968 (struct virtchnl_queue_select *)msg;
1969 struct ice_eth_stats stats = { 0 };
1970 struct ice_pf *pf = vf->pf;
1971 struct ice_vsi *vsi;
1973 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
1974 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1978 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
1979 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1983 vsi = pf->vsi[vf->lan_vsi_idx];
1985 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
1989 ice_update_eth_stats(vsi);
1991 stats = vsi->eth_stats;
1994 /* send the response to the VF */
1995 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
1996 (u8 *)&stats, sizeof(stats));
2001 * @vf: pointer to the VF info
2002 * @msg: pointer to the msg buffer
2004 * called from the VF to enable all or specific queue(s)
2006 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
2008 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2009 struct virtchnl_queue_select *vqs =
2010 (struct virtchnl_queue_select *)msg;
2011 struct ice_pf *pf = vf->pf;
2012 struct ice_vsi *vsi;
2013 unsigned long q_map;
2016 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2017 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2021 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2022 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2026 if (!vqs->rx_queues && !vqs->tx_queues) {
2027 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2031 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
2032 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
2033 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2037 vsi = pf->vsi[vf->lan_vsi_idx];
2039 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2043 /* Enable only Rx rings, Tx rings were enabled by the FW when the
2044 * Tx queue group list was configured and the context bits were
2045 * programmed using ice_vsi_cfg_txqs
2047 q_map = vqs->rx_queues;
2048 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2049 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2050 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2054 /* Skip queue if enabled */
2055 if (test_bit(vf_q_id, vf->rxq_ena))
2058 if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
2059 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
2060 vf_q_id, vsi->vsi_num);
2061 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2065 set_bit(vf_q_id, vf->rxq_ena);
2069 vsi = pf->vsi[vf->lan_vsi_idx];
2070 q_map = vqs->tx_queues;
2071 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2072 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2073 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2077 /* Skip queue if enabled */
2078 if (test_bit(vf_q_id, vf->txq_ena))
2081 set_bit(vf_q_id, vf->txq_ena);
2085 /* Set flag to indicate that queues are enabled */
2086 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
2087 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2090 /* send the response to the VF */
2091 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
2097 * @vf: pointer to the VF info
2098 * @msg: pointer to the msg buffer
2100 * called from the VF to disable all or specific
2103 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
2105 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2106 struct virtchnl_queue_select *vqs =
2107 (struct virtchnl_queue_select *)msg;
2108 struct ice_pf *pf = vf->pf;
2109 struct ice_vsi *vsi;
2110 unsigned long q_map;
2113 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
2114 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
2115 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2119 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
2120 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2124 if (!vqs->rx_queues && !vqs->tx_queues) {
2125 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2129 if (vqs->rx_queues > ICE_MAX_BASE_QS_PER_VF ||
2130 vqs->tx_queues > ICE_MAX_BASE_QS_PER_VF) {
2131 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2135 vsi = pf->vsi[vf->lan_vsi_idx];
2137 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2141 if (vqs->tx_queues) {
2142 q_map = vqs->tx_queues;
2144 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2145 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
2146 struct ice_txq_meta txq_meta = { 0 };
2148 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2149 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2153 /* Skip queue if not enabled */
2154 if (!test_bit(vf_q_id, vf->txq_ena))
2157 ice_fill_txq_meta(vsi, ring, &txq_meta);
2159 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
2161 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
2162 vf_q_id, vsi->vsi_num);
2163 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2167 /* Clear enabled queues flag */
2168 clear_bit(vf_q_id, vf->txq_ena);
2173 if (vqs->rx_queues) {
2174 q_map = vqs->rx_queues;
2176 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
2177 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
2178 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2182 /* Skip queue if not enabled */
2183 if (!test_bit(vf_q_id, vf->rxq_ena))
2186 if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
2187 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
2188 vf_q_id, vsi->vsi_num);
2189 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2193 /* Clear enabled queues flag */
2194 clear_bit(vf_q_id, vf->rxq_ena);
2199 /* Clear enabled queues flag */
2200 if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
2201 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
2204 /* send the response to the VF */
2205 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
2210 * ice_vc_cfg_irq_map_msg
2211 * @vf: pointer to the VF info
2212 * @msg: pointer to the msg buffer
2214 * called from the VF to configure the IRQ to queue map
2216 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
2218 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2219 struct virtchnl_irq_map_info *irqmap_info;
2220 u16 vsi_id, vsi_q_id, vector_id;
2221 struct virtchnl_vector_map *map;
2222 struct ice_pf *pf = vf->pf;
2223 u16 num_q_vectors_mapped;
2224 struct ice_vsi *vsi;
2228 irqmap_info = (struct virtchnl_irq_map_info *)msg;
2229 num_q_vectors_mapped = irqmap_info->num_vectors;
2231 /* Check to make sure number of VF vectors mapped is not greater than
2232 * number of VF vectors originally allocated, and check that
2233 * there is actually at least a single VF queue vector mapped
2235 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2236 pf->num_vf_msix < num_q_vectors_mapped ||
2237 !irqmap_info->num_vectors) {
2238 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2242 vsi = pf->vsi[vf->lan_vsi_idx];
2244 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2248 for (i = 0; i < num_q_vectors_mapped; i++) {
2249 struct ice_q_vector *q_vector;
2251 map = &irqmap_info->vecmap[i];
2253 vector_id = map->vector_id;
2254 vsi_id = map->vsi_id;
2255 /* vector_id is always 0-based for each VF, and can never be
2256 * larger than or equal to the max allowed interrupts per VF
2258 if (!(vector_id < ICE_MAX_INTR_PER_VF) ||
2259 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
2260 (!vector_id && (map->rxq_map || map->txq_map))) {
2261 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2265 /* No need to map VF miscellaneous or rogue vector */
2269 /* Subtract non queue vector from vector_id passed by VF
2270 * to get actual number of VSI queue vector array index
2272 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
2274 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2278 /* lookout for the invalid queue index */
2279 qmap = map->rxq_map;
2280 q_vector->num_ring_rx = 0;
2281 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2282 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2283 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2286 q_vector->num_ring_rx++;
2287 q_vector->rx.itr_idx = map->rxitr_idx;
2288 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
2289 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
2290 q_vector->rx.itr_idx);
2293 qmap = map->txq_map;
2294 q_vector->num_ring_tx = 0;
2295 for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
2296 if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
2297 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2300 q_vector->num_ring_tx++;
2301 q_vector->tx.itr_idx = map->txitr_idx;
2302 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
2303 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
2304 q_vector->tx.itr_idx);
2309 /* send the response to the VF */
2310 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
2316 * @vf: pointer to the VF info
2317 * @msg: pointer to the msg buffer
2319 * called from the VF to configure the Rx/Tx queues
2321 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
2323 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2324 struct virtchnl_vsi_queue_config_info *qci =
2325 (struct virtchnl_vsi_queue_config_info *)msg;
2326 struct virtchnl_queue_pair_info *qpi;
2327 u16 num_rxq = 0, num_txq = 0;
2328 struct ice_pf *pf = vf->pf;
2329 struct ice_vsi *vsi;
2332 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2333 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2337 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
2338 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2342 vsi = pf->vsi[vf->lan_vsi_idx];
2344 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2348 if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
2349 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
2350 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
2351 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
2352 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2356 for (i = 0; i < qci->num_queue_pairs; i++) {
2357 qpi = &qci->qpair[i];
2358 if (qpi->txq.vsi_id != qci->vsi_id ||
2359 qpi->rxq.vsi_id != qci->vsi_id ||
2360 qpi->rxq.queue_id != qpi->txq.queue_id ||
2361 qpi->txq.headwb_enabled ||
2362 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
2363 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
2364 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
2365 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2368 /* copy Tx queue info from VF into VSI */
2369 if (qpi->txq.ring_len > 0) {
2371 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
2372 vsi->tx_rings[i]->count = qpi->txq.ring_len;
2375 /* copy Rx queue info from VF into VSI */
2376 if (qpi->rxq.ring_len > 0) {
2378 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
2379 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
2381 if (qpi->rxq.databuffer_size != 0 &&
2382 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
2383 qpi->rxq.databuffer_size < 1024)) {
2384 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2387 vsi->rx_buf_len = qpi->rxq.databuffer_size;
2388 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
2389 if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
2390 qpi->rxq.max_pkt_size < 64) {
2391 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2396 vsi->max_frame = qpi->rxq.max_pkt_size;
2399 /* VF can request to configure less than allocated queues
2400 * or default allocated queues. So update the VSI with new number
2402 vsi->num_txq = num_txq;
2403 vsi->num_rxq = num_rxq;
2404 /* All queues of VF VSI are in TC 0 */
2405 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
2406 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
2408 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
2409 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2412 /* send the response to the VF */
2413 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
2419 * @vf: pointer to the VF info
2421 static bool ice_is_vf_trusted(struct ice_vf *vf)
2423 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
2427 * ice_can_vf_change_mac
2428 * @vf: pointer to the VF info
2430 * Return true if the VF is allowed to change its MAC filters, false otherwise
2432 static bool ice_can_vf_change_mac(struct ice_vf *vf)
2434 /* If the VF MAC address has been set administratively (via the
2435 * ndo_set_vf_mac command), then deny permission to the VF to
2436 * add/delete unicast MAC addresses, unless the VF is trusted
2438 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
2445 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
2446 * @vf: pointer to the VF info
2447 * @vsi: pointer to the VF's VSI
2448 * @mac_addr: MAC address to add
2451 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
2453 struct device *dev = ice_pf_to_dev(vf->pf);
2454 enum ice_status status;
2456 /* default unicast MAC already added */
2457 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2460 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
2461 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
2465 status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
2466 if (status == ICE_ERR_ALREADY_EXISTS) {
2467 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
2470 } else if (status) {
2471 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
2472 mac_addr, vf->vf_id, status);
2476 /* only set dflt_lan_addr once */
2477 if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
2478 is_unicast_ether_addr(mac_addr))
2479 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
2487 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
2488 * @vf: pointer to the VF info
2489 * @vsi: pointer to the VF's VSI
2490 * @mac_addr: MAC address to delete
2493 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
2495 struct device *dev = ice_pf_to_dev(vf->pf);
2496 enum ice_status status;
2498 if (!ice_can_vf_change_mac(vf) &&
2499 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2502 status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
2503 if (status == ICE_ERR_DOES_NOT_EXIST) {
2504 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
2507 } else if (status) {
2508 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
2509 mac_addr, vf->vf_id, status);
2513 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
2514 eth_zero_addr(vf->dflt_lan_addr.addr);
2522 * ice_vc_handle_mac_addr_msg
2523 * @vf: pointer to the VF info
2524 * @msg: pointer to the msg buffer
2525 * @set: true if MAC filters are being set, false otherwise
2527 * add guest MAC address filter
2530 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
2532 int (*ice_vc_cfg_mac)
2533 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
2534 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2535 struct virtchnl_ether_addr_list *al =
2536 (struct virtchnl_ether_addr_list *)msg;
2537 struct ice_pf *pf = vf->pf;
2538 enum virtchnl_ops vc_op;
2539 struct ice_vsi *vsi;
2543 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
2544 ice_vc_cfg_mac = ice_vc_add_mac_addr;
2546 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
2547 ice_vc_cfg_mac = ice_vc_del_mac_addr;
2550 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
2551 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
2552 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2553 goto handle_mac_exit;
2556 /* If this VF is not privileged, then we can't add more than a
2557 * limited number of addresses. Check to make sure that the
2558 * additions do not push us over the limit.
2560 if (set && !ice_is_vf_trusted(vf) &&
2561 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
2562 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
2564 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2565 goto handle_mac_exit;
2568 vsi = pf->vsi[vf->lan_vsi_idx];
2570 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2571 goto handle_mac_exit;
2574 for (i = 0; i < al->num_elements; i++) {
2575 u8 *mac_addr = al->list[i].addr;
2578 if (is_broadcast_ether_addr(mac_addr) ||
2579 is_zero_ether_addr(mac_addr))
2582 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
2583 if (result == -EEXIST || result == -ENOENT) {
2585 } else if (result) {
2586 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2587 goto handle_mac_exit;
2592 /* send the response to the VF */
2593 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
2597 * ice_vc_add_mac_addr_msg
2598 * @vf: pointer to the VF info
2599 * @msg: pointer to the msg buffer
2601 * add guest MAC address filter
2603 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2605 return ice_vc_handle_mac_addr_msg(vf, msg, true);
2609 * ice_vc_del_mac_addr_msg
2610 * @vf: pointer to the VF info
2611 * @msg: pointer to the msg buffer
2613 * remove guest MAC address filter
2615 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
2617 return ice_vc_handle_mac_addr_msg(vf, msg, false);
2621 * ice_vc_request_qs_msg
2622 * @vf: pointer to the VF info
2623 * @msg: pointer to the msg buffer
2625 * VFs get a default number of queues but can use this message to request a
2626 * different number. If the request is successful, PF will reset the VF and
2627 * return 0. If unsuccessful, PF will send message informing VF of number of
2628 * available queue pairs via virtchnl message response to VF.
2630 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
2632 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2633 struct virtchnl_vf_res_request *vfres =
2634 (struct virtchnl_vf_res_request *)msg;
2635 u16 req_queues = vfres->num_queue_pairs;
2636 struct ice_pf *pf = vf->pf;
2637 u16 max_allowed_vf_queues;
2638 u16 tx_rx_queue_left;
2642 dev = ice_pf_to_dev(pf);
2643 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2644 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2648 cur_queues = vf->num_vf_qs;
2649 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
2650 ice_get_avail_rxq_count(pf));
2651 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
2653 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
2655 } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
2656 dev_err(dev, "VF %d tried to request more than %d queues.\n",
2657 vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
2658 vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
2659 } else if (req_queues > cur_queues &&
2660 req_queues - cur_queues > tx_rx_queue_left) {
2661 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
2662 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
2663 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
2664 ICE_MAX_BASE_QS_PER_VF);
2666 /* request is successful, then reset VF */
2667 vf->num_req_qs = req_queues;
2668 ice_vc_reset_vf(vf);
2669 dev_info(dev, "VF %d granted request of %u queues.\n",
2670 vf->vf_id, req_queues);
2675 /* send the response to the VF */
2676 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
2677 v_ret, (u8 *)vfres, sizeof(*vfres));
2681 * ice_set_vf_port_vlan
2682 * @netdev: network interface device structure
2683 * @vf_id: VF identifier
2684 * @vlan_id: VLAN ID being set
2685 * @qos: priority setting
2686 * @vlan_proto: VLAN protocol
2688 * program VF Port VLAN ID and/or QoS
2691 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
2694 u16 vlanprio = vlan_id | (qos << ICE_VLAN_PRIORITY_S);
2695 struct ice_pf *pf = ice_netdev_to_pf(netdev);
2696 struct ice_vsi *vsi;
2701 dev = ice_pf_to_dev(pf);
2702 if (ice_validate_vf_id(pf, vf_id))
2705 if (vlan_id > ICE_MAX_VLANID || qos > 7) {
2706 dev_err(dev, "Invalid VF Parameters\n");
2710 if (vlan_proto != htons(ETH_P_8021Q)) {
2711 dev_err(dev, "VF VLAN protocol is not supported\n");
2712 return -EPROTONOSUPPORT;
2715 vf = &pf->vf[vf_id];
2716 vsi = pf->vsi[vf->lan_vsi_idx];
2717 if (ice_check_vf_init(pf, vf))
2720 if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
2721 /* duplicate request, so just return success */
2722 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
2726 /* If PVID, then remove all filters on the old VLAN */
2728 ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
2731 if (vlan_id || qos) {
2732 ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
2734 goto error_set_pvid;
2736 ice_vsi_manage_pvid(vsi, 0, false);
2741 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
2742 vlan_id, qos, vf_id);
2744 /* add new VLAN filter for each MAC */
2745 ret = ice_vsi_add_vlan(vsi, vlan_id);
2747 goto error_set_pvid;
2750 /* The Port VLAN needs to be saved across resets the same as the
2751 * default LAN MAC address.
2753 vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
2760 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
2761 * @caps: VF driver negotiated capabilities
2763 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
2765 static bool ice_vf_vlan_offload_ena(u32 caps)
2767 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
2771 * ice_vc_process_vlan_msg
2772 * @vf: pointer to the VF info
2773 * @msg: pointer to the msg buffer
2774 * @add_v: Add VLAN if true, otherwise delete VLAN
2776 * Process virtchnl op to add or remove programmed guest VLAN ID
2778 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
2780 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2781 struct virtchnl_vlan_filter_list *vfl =
2782 (struct virtchnl_vlan_filter_list *)msg;
2783 struct ice_pf *pf = vf->pf;
2784 bool vlan_promisc = false;
2785 struct ice_vsi *vsi;
2792 dev = ice_pf_to_dev(pf);
2793 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2794 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2798 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2799 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2803 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
2804 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2808 for (i = 0; i < vfl->num_elements; i++) {
2809 if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
2810 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2811 dev_err(dev, "invalid VF VLAN id %d\n",
2818 vsi = pf->vsi[vf->lan_vsi_idx];
2820 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2824 if (add_v && !ice_is_vf_trusted(vf) &&
2825 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2826 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2828 /* There is no need to let VF know about being not trusted,
2829 * so we can just return success message here
2834 if (vsi->info.pvid) {
2835 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2839 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2840 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2841 vlan_promisc = true;
2844 for (i = 0; i < vfl->num_elements; i++) {
2845 u16 vid = vfl->vlan_id[i];
2847 if (!ice_is_vf_trusted(vf) &&
2848 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
2849 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
2851 /* There is no need to let VF know about being
2852 * not trusted, so we can just return success
2853 * message here as well.
2858 /* we add VLAN 0 by default for each VF so we can enable
2859 * Tx VLAN anti-spoof without triggering MDD events so
2860 * we don't need to add it again here
2865 status = ice_vsi_add_vlan(vsi, vid);
2867 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2871 /* Enable VLAN pruning when non-zero VLAN is added */
2872 if (!vlan_promisc && vid &&
2873 !ice_vsi_is_vlan_pruning_ena(vsi)) {
2874 status = ice_cfg_vlan_pruning(vsi, true, false);
2876 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2877 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
2881 } else if (vlan_promisc) {
2882 /* Enable Ucast/Mcast VLAN promiscuous mode */
2883 promisc_m = ICE_PROMISC_VLAN_TX |
2884 ICE_PROMISC_VLAN_RX;
2886 status = ice_set_vsi_promisc(hw, vsi->idx,
2889 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2890 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
2896 /* In case of non_trusted VF, number of VLAN elements passed
2897 * to PF for removal might be greater than number of VLANs
2898 * filter programmed for that VF - So, use actual number of
2899 * VLANS added earlier with add VLAN opcode. In order to avoid
2900 * removing VLAN that doesn't exist, which result to sending
2901 * erroneous failed message back to the VF
2905 num_vf_vlan = vsi->num_vlan;
2906 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
2907 u16 vid = vfl->vlan_id[i];
2909 /* we add VLAN 0 by default for each VF so we can enable
2910 * Tx VLAN anti-spoof without triggering MDD events so
2911 * we don't want a VIRTCHNL request to remove it
2916 /* Make sure ice_vsi_kill_vlan is successful before
2917 * updating VLAN information
2919 status = ice_vsi_kill_vlan(vsi, vid);
2921 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2925 /* Disable VLAN pruning when only VLAN 0 is left */
2926 if (vsi->num_vlan == 1 &&
2927 ice_vsi_is_vlan_pruning_ena(vsi))
2928 ice_cfg_vlan_pruning(vsi, false, false);
2930 /* Disable Unicast/Multicast VLAN promiscuous mode */
2932 promisc_m = ICE_PROMISC_VLAN_TX |
2933 ICE_PROMISC_VLAN_RX;
2935 ice_clear_vsi_promisc(hw, vsi->idx,
2942 /* send the response to the VF */
2944 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
2947 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
2952 * ice_vc_add_vlan_msg
2953 * @vf: pointer to the VF info
2954 * @msg: pointer to the msg buffer
2956 * Add and program guest VLAN ID
2958 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
2960 return ice_vc_process_vlan_msg(vf, msg, true);
2964 * ice_vc_remove_vlan_msg
2965 * @vf: pointer to the VF info
2966 * @msg: pointer to the msg buffer
2968 * remove programmed guest VLAN ID
2970 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
2972 return ice_vc_process_vlan_msg(vf, msg, false);
2976 * ice_vc_ena_vlan_stripping
2977 * @vf: pointer to the VF info
2979 * Enable VLAN header stripping for a given VF
2981 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
2983 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2984 struct ice_pf *pf = vf->pf;
2985 struct ice_vsi *vsi;
2987 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2988 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2992 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
2993 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2997 vsi = pf->vsi[vf->lan_vsi_idx];
2998 if (ice_vsi_manage_vlan_stripping(vsi, true))
2999 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3002 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
3007 * ice_vc_dis_vlan_stripping
3008 * @vf: pointer to the VF info
3010 * Disable VLAN header stripping for a given VF
3012 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
3014 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3015 struct ice_pf *pf = vf->pf;
3016 struct ice_vsi *vsi;
3018 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3019 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3023 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
3024 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3028 vsi = pf->vsi[vf->lan_vsi_idx];
3030 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3034 if (ice_vsi_manage_vlan_stripping(vsi, false))
3035 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3038 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
3043 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
3044 * @vf: VF to enable/disable VLAN stripping for on initialization
3046 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
3047 * the flag is cleared then we want to disable stripping. For example, the flag
3048 * will be cleared when port VLANs are configured by the administrator before
3049 * passing the VF to the guest or if the AVF driver doesn't support VLAN
3052 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
3054 struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
3059 /* don't modify stripping if port VLAN is configured */
3063 if (ice_vf_vlan_offload_ena(vf->driver_caps))
3064 return ice_vsi_manage_vlan_stripping(vsi, true);
3066 return ice_vsi_manage_vlan_stripping(vsi, false);
3070 * ice_vc_process_vf_msg - Process request from VF
3071 * @pf: pointer to the PF structure
3072 * @event: pointer to the AQ event
3074 * called from the common asq/arq handler to
3075 * process request from VF
3077 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
3079 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
3080 s16 vf_id = le16_to_cpu(event->desc.retval);
3081 u16 msglen = event->msg_len;
3082 u8 *msg = event->msg_buf;
3083 struct ice_vf *vf = NULL;
3087 dev = ice_pf_to_dev(pf);
3088 if (ice_validate_vf_id(pf, vf_id)) {
3093 vf = &pf->vf[vf_id];
3095 /* Check if VF is disabled. */
3096 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
3101 /* Perform basic checks on the msg */
3102 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
3104 if (err == VIRTCHNL_STATUS_ERR_PARAM)
3112 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
3114 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
3115 vf_id, v_opcode, msglen, err);
3120 case VIRTCHNL_OP_VERSION:
3121 err = ice_vc_get_ver_msg(vf, msg);
3123 case VIRTCHNL_OP_GET_VF_RESOURCES:
3124 err = ice_vc_get_vf_res_msg(vf, msg);
3125 if (ice_vf_init_vlan_stripping(vf))
3126 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
3128 ice_vc_notify_vf_link_state(vf);
3130 case VIRTCHNL_OP_RESET_VF:
3131 ice_vc_reset_vf_msg(vf);
3133 case VIRTCHNL_OP_ADD_ETH_ADDR:
3134 err = ice_vc_add_mac_addr_msg(vf, msg);
3136 case VIRTCHNL_OP_DEL_ETH_ADDR:
3137 err = ice_vc_del_mac_addr_msg(vf, msg);
3139 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
3140 err = ice_vc_cfg_qs_msg(vf, msg);
3142 case VIRTCHNL_OP_ENABLE_QUEUES:
3143 err = ice_vc_ena_qs_msg(vf, msg);
3144 ice_vc_notify_vf_link_state(vf);
3146 case VIRTCHNL_OP_DISABLE_QUEUES:
3147 err = ice_vc_dis_qs_msg(vf, msg);
3149 case VIRTCHNL_OP_REQUEST_QUEUES:
3150 err = ice_vc_request_qs_msg(vf, msg);
3152 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
3153 err = ice_vc_cfg_irq_map_msg(vf, msg);
3155 case VIRTCHNL_OP_CONFIG_RSS_KEY:
3156 err = ice_vc_config_rss_key(vf, msg);
3158 case VIRTCHNL_OP_CONFIG_RSS_LUT:
3159 err = ice_vc_config_rss_lut(vf, msg);
3161 case VIRTCHNL_OP_GET_STATS:
3162 err = ice_vc_get_stats_msg(vf, msg);
3164 case VIRTCHNL_OP_ADD_VLAN:
3165 err = ice_vc_add_vlan_msg(vf, msg);
3167 case VIRTCHNL_OP_DEL_VLAN:
3168 err = ice_vc_remove_vlan_msg(vf, msg);
3170 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
3171 err = ice_vc_ena_vlan_stripping(vf);
3173 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
3174 err = ice_vc_dis_vlan_stripping(vf);
3176 case VIRTCHNL_OP_UNKNOWN:
3178 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
3180 err = ice_vc_send_msg_to_vf(vf, v_opcode,
3181 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
3186 /* Helper function cares less about error return values here
3187 * as it is busy with pending work.
3189 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
3190 vf_id, v_opcode, err);
3196 * @netdev: network interface device structure
3197 * @vf_id: VF identifier
3198 * @ivi: VF configuration structure
3200 * return VF configuration
3203 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
3205 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3206 struct ice_vsi *vsi;
3209 if (ice_validate_vf_id(pf, vf_id))
3212 vf = &pf->vf[vf_id];
3213 vsi = pf->vsi[vf->lan_vsi_idx];
3215 if (ice_check_vf_init(pf, vf))
3219 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
3221 /* VF configuration for VLAN and applicable QoS */
3222 ivi->vlan = le16_to_cpu(vsi->info.pvid) & ICE_VLAN_M;
3223 ivi->qos = (le16_to_cpu(vsi->info.pvid) & ICE_PRIORITY_M) >>
3224 ICE_VLAN_PRIORITY_S;
3226 ivi->trusted = vf->trusted;
3227 ivi->spoofchk = vf->spoofchk;
3228 if (!vf->link_forced)
3229 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
3230 else if (vf->link_up)
3231 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
3233 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
3234 ivi->max_tx_rate = vf->tx_rate;
3235 ivi->min_tx_rate = 0;
3240 * ice_wait_on_vf_reset
3241 * @vf: The VF being resseting
3243 * Poll to make sure a given VF is ready after reset
3245 static void ice_wait_on_vf_reset(struct ice_vf *vf)
3249 for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) {
3250 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
3258 * @netdev: network interface device structure
3259 * @vf_id: VF identifier
3262 * program VF MAC address
3264 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
3266 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3270 if (ice_validate_vf_id(pf, vf_id))
3273 vf = &pf->vf[vf_id];
3274 /* Don't set MAC on disabled VF */
3275 if (ice_is_vf_disabled(vf))
3278 /* In case VF is in reset mode, wait until it is completed. Depending
3279 * on factors like queue disabling routine, this could take ~250ms
3281 ice_wait_on_vf_reset(vf);
3283 if (ice_check_vf_init(pf, vf))
3286 if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
3287 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
3291 /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
3292 * flow will use the updated dflt_lan_addr and add a MAC filter
3293 * using ice_add_mac. Also set pf_set_mac to indicate that the PF has
3294 * set the MAC address for this VF.
3296 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
3297 vf->pf_set_mac = true;
3298 netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
3301 ice_vc_reset_vf(vf);
3307 * @netdev: network interface device structure
3308 * @vf_id: VF identifier
3309 * @trusted: Boolean value to enable/disable trusted VF
3311 * Enable or disable a given VF as trusted
3313 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
3315 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3318 if (ice_validate_vf_id(pf, vf_id))
3321 vf = &pf->vf[vf_id];
3322 /* Don't set Trusted Mode on disabled VF */
3323 if (ice_is_vf_disabled(vf))
3326 /* In case VF is in reset mode, wait until it is completed. Depending
3327 * on factors like queue disabling routine, this could take ~250ms
3329 ice_wait_on_vf_reset(vf);
3331 if (ice_check_vf_init(pf, vf))
3334 /* Check if already trusted */
3335 if (trusted == vf->trusted)
3338 vf->trusted = trusted;
3339 ice_vc_reset_vf(vf);
3340 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
3341 vf_id, trusted ? "" : "un");
3347 * ice_set_vf_link_state
3348 * @netdev: network interface device structure
3349 * @vf_id: VF identifier
3350 * @link_state: required link state
3352 * Set VF's link state, irrespective of physical link state status
3354 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
3356 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3359 if (ice_validate_vf_id(pf, vf_id))
3362 vf = &pf->vf[vf_id];
3363 if (ice_check_vf_init(pf, vf))
3366 switch (link_state) {
3367 case IFLA_VF_LINK_STATE_AUTO:
3368 vf->link_forced = false;
3370 case IFLA_VF_LINK_STATE_ENABLE:
3371 vf->link_forced = true;
3374 case IFLA_VF_LINK_STATE_DISABLE:
3375 vf->link_forced = true;
3376 vf->link_up = false;
3382 ice_vc_notify_vf_link_state(vf);
3388 * ice_get_vf_stats - populate some stats for the VF
3389 * @netdev: the netdev of the PF
3390 * @vf_id: the host OS identifier (0-255)
3391 * @vf_stats: pointer to the OS memory to be initialized
3393 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
3394 struct ifla_vf_stats *vf_stats)
3396 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3397 struct ice_eth_stats *stats;
3398 struct ice_vsi *vsi;
3401 if (ice_validate_vf_id(pf, vf_id))
3404 vf = &pf->vf[vf_id];
3406 if (ice_check_vf_init(pf, vf))
3409 vsi = pf->vsi[vf->lan_vsi_idx];
3413 ice_update_eth_stats(vsi);
3414 stats = &vsi->eth_stats;
3416 memset(vf_stats, 0, sizeof(*vf_stats));
3418 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
3419 stats->rx_multicast;
3420 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
3421 stats->tx_multicast;
3422 vf_stats->rx_bytes = stats->rx_bytes;
3423 vf_stats->tx_bytes = stats->tx_bytes;
3424 vf_stats->broadcast = stats->rx_broadcast;
3425 vf_stats->multicast = stats->rx_multicast;
3426 vf_stats->rx_dropped = stats->rx_discards;
3427 vf_stats->tx_dropped = stats->tx_discards;