1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
9 #include "ice_dcb_lib.h"
10 #include "ice_devlink.h"
13 * ice_vsi_type_str - maps VSI type enum to string equivalents
14 * @vsi_type: VSI type enum
16 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
24 return "ICE_VSI_CTRL";
33 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
34 * @vsi: the VSI being configured
35 * @ena: start or stop the Rx rings
37 * First enable/disable all of the Rx rings, flush any remaining writes, and
38 * then verify that they have all been enabled/disabled successfully. This will
39 * let all of the register writes complete when enabling/disabling the Rx rings
40 * before waiting for the change in hardware to complete.
42 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
47 for (i = 0; i < vsi->num_rxq; i++)
48 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
50 ice_flush(&vsi->back->hw);
52 for (i = 0; i < vsi->num_rxq; i++) {
53 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
62 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
65 * On error: returns error code (negative)
66 * On success: returns 0
68 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
70 struct ice_pf *pf = vsi->back;
73 dev = ice_pf_to_dev(pf);
75 /* allocate memory for both Tx and Rx ring pointers */
76 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
77 sizeof(*vsi->tx_rings), GFP_KERNEL);
81 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
82 sizeof(*vsi->rx_rings), GFP_KERNEL);
86 /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
87 vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
88 sizeof(*vsi->txq_map), GFP_KERNEL);
93 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
94 sizeof(*vsi->rxq_map), GFP_KERNEL);
98 /* There is no need to allocate q_vectors for a loopback VSI. */
99 if (vsi->type == ICE_VSI_LB)
102 /* allocate memory for q_vector pointers */
103 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
104 sizeof(*vsi->q_vectors), GFP_KERNEL);
111 devm_kfree(dev, vsi->rxq_map);
113 devm_kfree(dev, vsi->txq_map);
115 devm_kfree(dev, vsi->rx_rings);
117 devm_kfree(dev, vsi->tx_rings);
122 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
123 * @vsi: the VSI being configured
125 static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
131 /* a user could change the values of num_[tr]x_desc using
132 * ethtool -G so we should keep those values instead of
133 * overwriting them with the defaults.
135 if (!vsi->num_rx_desc)
136 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
137 if (!vsi->num_tx_desc)
138 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
141 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
148 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
149 * @vsi: the VSI being configured
150 * @vf_id: ID of the VF being configured
152 * Return 0 on success and a negative value on error
154 static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
156 struct ice_pf *pf = vsi->back;
157 struct ice_vf *vf = NULL;
159 if (vsi->type == ICE_VSI_VF)
164 vsi->alloc_txq = min3(pf->num_lan_msix,
165 ice_get_avail_txq_count(pf),
166 (u16)num_online_cpus());
168 vsi->alloc_txq = vsi->req_txq;
169 vsi->num_txq = vsi->req_txq;
172 pf->num_lan_tx = vsi->alloc_txq;
174 /* only 1 Rx queue unless RSS is enabled */
175 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
178 vsi->alloc_rxq = min3(pf->num_lan_msix,
179 ice_get_avail_rxq_count(pf),
180 (u16)num_online_cpus());
182 vsi->alloc_rxq = vsi->req_rxq;
183 vsi->num_rxq = vsi->req_rxq;
187 pf->num_lan_rx = vsi->alloc_rxq;
189 vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
190 max_t(int, vsi->alloc_rxq,
194 vf = &pf->vf[vsi->vf_id];
195 vsi->alloc_txq = vf->num_vf_qs;
196 vsi->alloc_rxq = vf->num_vf_qs;
197 /* pf->num_msix_per_vf includes (VF miscellaneous vector +
198 * data queue interrupts). Since vsi->num_q_vectors is number
199 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
200 * original vector count
202 vsi->num_q_vectors = pf->num_msix_per_vf - ICE_NONQ_VECS_VF;
207 vsi->num_q_vectors = 1;
214 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
218 ice_vsi_set_num_desc(vsi);
222 * ice_get_free_slot - get the next non-NULL location index in array
223 * @array: array to search
224 * @size: size of the array
225 * @curr: last known occupied index to be used as a search hint
227 * void * is being used to keep the functionality generic. This lets us use this
228 * function on any array of pointers.
230 static int ice_get_free_slot(void *array, int size, int curr)
232 int **tmp_array = (int **)array;
235 if (curr < (size - 1) && !tmp_array[curr + 1]) {
240 while ((i < size) && (tmp_array[i]))
251 * ice_vsi_delete - delete a VSI from the switch
252 * @vsi: pointer to VSI being removed
254 static void ice_vsi_delete(struct ice_vsi *vsi)
256 struct ice_pf *pf = vsi->back;
257 struct ice_vsi_ctx *ctxt;
258 enum ice_status status;
260 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
264 if (vsi->type == ICE_VSI_VF)
265 ctxt->vf_num = vsi->vf_id;
266 ctxt->vsi_num = vsi->vsi_num;
268 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
270 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
272 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %s\n",
273 vsi->vsi_num, ice_stat_str(status));
279 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
280 * @vsi: pointer to VSI being cleared
282 static void ice_vsi_free_arrays(struct ice_vsi *vsi)
284 struct ice_pf *pf = vsi->back;
287 dev = ice_pf_to_dev(pf);
289 /* free the ring and vector containers */
290 if (vsi->q_vectors) {
291 devm_kfree(dev, vsi->q_vectors);
292 vsi->q_vectors = NULL;
295 devm_kfree(dev, vsi->tx_rings);
296 vsi->tx_rings = NULL;
299 devm_kfree(dev, vsi->rx_rings);
300 vsi->rx_rings = NULL;
303 devm_kfree(dev, vsi->txq_map);
307 devm_kfree(dev, vsi->rxq_map);
313 * ice_vsi_clear - clean up and deallocate the provided VSI
314 * @vsi: pointer to VSI being cleared
316 * This deallocates the VSI's queue resources, removes it from the PF's
317 * VSI array if necessary, and deallocates the VSI
319 * Returns 0 on success, negative on failure
321 static int ice_vsi_clear(struct ice_vsi *vsi)
323 struct ice_pf *pf = NULL;
333 dev = ice_pf_to_dev(pf);
335 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
336 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
340 mutex_lock(&pf->sw_mutex);
341 /* updates the PF for this cleared VSI */
343 pf->vsi[vsi->idx] = NULL;
344 if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
345 pf->next_vsi = vsi->idx;
347 ice_vsi_free_arrays(vsi);
348 mutex_unlock(&pf->sw_mutex);
349 devm_kfree(dev, vsi);
355 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
356 * @irq: interrupt number
357 * @data: pointer to a q_vector
359 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
361 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
363 if (!q_vector->tx.ring)
366 #define FDIR_RX_DESC_CLEAN_BUDGET 64
367 ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET);
368 ice_clean_ctrl_tx_irq(q_vector->tx.ring);
374 * ice_msix_clean_rings - MSIX mode Interrupt Handler
375 * @irq: interrupt number
376 * @data: pointer to a q_vector
378 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
380 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
382 if (!q_vector->tx.ring && !q_vector->rx.ring)
385 napi_schedule(&q_vector->napi);
391 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
392 * @pf: board private structure
393 * @vsi_type: type of VSI
394 * @vf_id: ID of the VF being configured
396 * returns a pointer to a VSI on success, NULL on failure.
398 static struct ice_vsi *
399 ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
401 struct device *dev = ice_pf_to_dev(pf);
402 struct ice_vsi *vsi = NULL;
404 /* Need to protect the allocation of the VSIs at the PF level */
405 mutex_lock(&pf->sw_mutex);
407 /* If we have already allocated our maximum number of VSIs,
408 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
409 * is available to be populated
411 if (pf->next_vsi == ICE_NO_VSI) {
412 dev_dbg(dev, "out of VSI slots!\n");
416 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
420 vsi->type = vsi_type;
422 set_bit(__ICE_DOWN, vsi->state);
424 if (vsi_type == ICE_VSI_VF)
425 ice_vsi_set_num_qs(vsi, vf_id);
427 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
431 if (ice_vsi_alloc_arrays(vsi))
434 /* Setup default MSIX irq handler for VSI */
435 vsi->irq_handler = ice_msix_clean_rings;
438 if (ice_vsi_alloc_arrays(vsi))
441 /* Setup ctrl VSI MSIX irq handler */
442 vsi->irq_handler = ice_msix_clean_ctrl_vsi;
445 if (ice_vsi_alloc_arrays(vsi))
449 if (ice_vsi_alloc_arrays(vsi))
453 dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
457 if (vsi->type == ICE_VSI_CTRL) {
458 /* Use the last VSI slot as the index for the control VSI */
459 vsi->idx = pf->num_alloc_vsi - 1;
460 pf->ctrl_vsi_idx = vsi->idx;
461 pf->vsi[vsi->idx] = vsi;
463 /* fill slot and make note of the index */
464 vsi->idx = pf->next_vsi;
465 pf->vsi[pf->next_vsi] = vsi;
467 /* prepare pf->next_vsi for next use */
468 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
474 devm_kfree(dev, vsi);
477 mutex_unlock(&pf->sw_mutex);
482 * ice_alloc_fd_res - Allocate FD resource for a VSI
483 * @vsi: pointer to the ice_vsi
485 * This allocates the FD resources
487 * Returns 0 on success, -EPERM on no-op or -EIO on failure
489 static int ice_alloc_fd_res(struct ice_vsi *vsi)
491 struct ice_pf *pf = vsi->back;
494 /* Flow Director filters are only allocated/assigned to the PF VSI which
495 * passes the traffic. The CTRL VSI is only used to add/delete filters
496 * so we don't allocate resources to it
499 /* FD filters from guaranteed pool per VSI */
500 g_val = pf->hw.func_caps.fd_fltr_guar;
504 /* FD filters from best effort pool */
505 b_val = pf->hw.func_caps.fd_fltr_best_effort;
509 if (vsi->type != ICE_VSI_PF)
512 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
515 vsi->num_gfltr = g_val / pf->num_alloc_vsi;
517 /* each VSI gets same "best_effort" quota */
518 vsi->num_bfltr = b_val;
524 * ice_vsi_get_qs - Assign queues from PF to VSI
525 * @vsi: the VSI to assign queues to
527 * Returns 0 on success and a negative value on error
529 static int ice_vsi_get_qs(struct ice_vsi *vsi)
531 struct ice_pf *pf = vsi->back;
532 struct ice_qs_cfg tx_qs_cfg = {
533 .qs_mutex = &pf->avail_q_mutex,
534 .pf_map = pf->avail_txqs,
535 .pf_map_size = pf->max_pf_txqs,
536 .q_count = vsi->alloc_txq,
537 .scatter_count = ICE_MAX_SCATTER_TXQS,
538 .vsi_map = vsi->txq_map,
540 .mapping_mode = ICE_VSI_MAP_CONTIG
542 struct ice_qs_cfg rx_qs_cfg = {
543 .qs_mutex = &pf->avail_q_mutex,
544 .pf_map = pf->avail_rxqs,
545 .pf_map_size = pf->max_pf_rxqs,
546 .q_count = vsi->alloc_rxq,
547 .scatter_count = ICE_MAX_SCATTER_RXQS,
548 .vsi_map = vsi->rxq_map,
550 .mapping_mode = ICE_VSI_MAP_CONTIG
554 ret = __ice_vsi_get_qs(&tx_qs_cfg);
557 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
559 ret = __ice_vsi_get_qs(&rx_qs_cfg);
562 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
568 * ice_vsi_put_qs - Release queues from VSI to PF
569 * @vsi: the VSI that is going to release queues
571 static void ice_vsi_put_qs(struct ice_vsi *vsi)
573 struct ice_pf *pf = vsi->back;
576 mutex_lock(&pf->avail_q_mutex);
578 for (i = 0; i < vsi->alloc_txq; i++) {
579 clear_bit(vsi->txq_map[i], pf->avail_txqs);
580 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
583 for (i = 0; i < vsi->alloc_rxq; i++) {
584 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
585 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
588 mutex_unlock(&pf->avail_q_mutex);
593 * @pf: pointer to the PF struct
595 * returns true if driver is in safe mode, false otherwise
597 bool ice_is_safe_mode(struct ice_pf *pf)
599 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
603 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
604 * @vsi: the VSI being cleaned up
606 * This function deletes RSS input set for all flows that were configured
609 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
611 struct ice_pf *pf = vsi->back;
612 enum ice_status status;
614 if (ice_is_safe_mode(pf))
617 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
619 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %s\n",
620 vsi->vsi_num, ice_stat_str(status));
624 * ice_rss_clean - Delete RSS related VSI structures and configuration
625 * @vsi: the VSI being removed
627 static void ice_rss_clean(struct ice_vsi *vsi)
629 struct ice_pf *pf = vsi->back;
632 dev = ice_pf_to_dev(pf);
634 if (vsi->rss_hkey_user)
635 devm_kfree(dev, vsi->rss_hkey_user);
636 if (vsi->rss_lut_user)
637 devm_kfree(dev, vsi->rss_lut_user);
639 ice_vsi_clean_rss_flow_fld(vsi);
640 /* remove RSS replay list */
641 if (!ice_is_safe_mode(pf))
642 ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
646 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
647 * @vsi: the VSI being configured
649 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
651 struct ice_hw_common_caps *cap;
652 struct ice_pf *pf = vsi->back;
654 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
659 cap = &pf->hw.func_caps.common_cap;
662 /* PF VSI will inherit RSS instance of PF */
663 vsi->rss_table_size = (u16)cap->rss_table_size;
664 vsi->rss_size = min_t(u16, num_online_cpus(),
665 BIT(cap->rss_table_entry_width));
666 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
669 /* VF VSI will get a small RSS table.
670 * For VSI_LUT, LUT size should be set to 64 bytes.
672 vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
673 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
674 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
679 dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
680 ice_vsi_type_str(vsi->type));
686 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
687 * @ctxt: the VSI context being set
689 * This initializes a default VSI context for all sections except the Queues.
691 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
695 memset(&ctxt->info, 0, sizeof(ctxt->info));
696 /* VSI's should be allocated from shared pool */
697 ctxt->alloc_from_pool = true;
698 /* Src pruning enabled by default */
699 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
700 /* Traffic from VSI can be sent to LAN */
701 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
702 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
703 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
704 * packets untagged/tagged.
706 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
707 ICE_AQ_VSI_VLAN_MODE_M) >>
708 ICE_AQ_VSI_VLAN_MODE_S);
709 /* Have 1:1 UP mapping for both ingress/egress tables */
710 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
711 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
712 table |= ICE_UP_TABLE_TRANSLATE(2, 2);
713 table |= ICE_UP_TABLE_TRANSLATE(3, 3);
714 table |= ICE_UP_TABLE_TRANSLATE(4, 4);
715 table |= ICE_UP_TABLE_TRANSLATE(5, 5);
716 table |= ICE_UP_TABLE_TRANSLATE(6, 6);
717 table |= ICE_UP_TABLE_TRANSLATE(7, 7);
718 ctxt->info.ingress_table = cpu_to_le32(table);
719 ctxt->info.egress_table = cpu_to_le32(table);
720 /* Have 1:1 UP mapping for outer to inner UP table */
721 ctxt->info.outer_up_table = cpu_to_le32(table);
722 /* No Outer tag support outer_tag_flags remains to zero */
726 * ice_vsi_setup_q_map - Setup a VSI queue map
727 * @vsi: the VSI being configured
728 * @ctxt: VSI context structure
730 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
732 u16 offset = 0, qmap = 0, tx_count = 0;
733 u16 qcount_tx = vsi->alloc_txq;
734 u16 qcount_rx = vsi->alloc_rxq;
735 u16 tx_numq_tc, rx_numq_tc;
736 u16 pow = 0, max_rss = 0;
737 bool ena_tc0 = false;
741 /* at least TC0 should be enabled by default */
742 if (vsi->tc_cfg.numtc) {
743 if (!(vsi->tc_cfg.ena_tc & BIT(0)))
751 vsi->tc_cfg.ena_tc |= 1;
754 rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
757 tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
761 /* TC mapping is a function of the number of Rx queues assigned to the
762 * VSI for each traffic class and the offset of these queues.
763 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
764 * queues allocated to TC0. No:of queues is a power-of-2.
766 * If TC is not enabled, the queue offset is set to 0, and allocate one
767 * queue, this way, traffic for the given TC will be sent to the default
770 * Setup number and offset of Rx queues for all TCs for the VSI
773 qcount_rx = rx_numq_tc;
775 /* qcount will change if RSS is enabled */
776 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
777 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
778 if (vsi->type == ICE_VSI_PF)
779 max_rss = ICE_MAX_LG_RSS_QS;
781 max_rss = ICE_MAX_RSS_QS_PER_VF;
782 qcount_rx = min_t(u16, rx_numq_tc, max_rss);
784 qcount_rx = min_t(u16, qcount_rx,
789 /* find the (rounded up) power-of-2 of qcount */
790 pow = (u16)order_base_2(qcount_rx);
792 ice_for_each_traffic_class(i) {
793 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
794 /* TC is not enabled */
795 vsi->tc_cfg.tc_info[i].qoffset = 0;
796 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
797 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
798 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
799 ctxt->info.tc_mapping[i] = 0;
804 vsi->tc_cfg.tc_info[i].qoffset = offset;
805 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
806 vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
807 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
809 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
810 ICE_AQ_VSI_TC_Q_OFFSET_M) |
811 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
812 ICE_AQ_VSI_TC_Q_NUM_M);
814 tx_count += tx_numq_tc;
815 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
818 /* if offset is non-zero, means it is calculated correctly based on
819 * enabled TCs for a given VSI otherwise qcount_rx will always
820 * be correct and non-zero because it is based off - VSI's
821 * allocated Rx queues which is at least 1 (hence qcount_tx will be
825 vsi->num_rxq = offset;
827 vsi->num_rxq = qcount_rx;
829 vsi->num_txq = tx_count;
831 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
832 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
833 /* since there is a chance that num_rxq could have been changed
834 * in the above for loop, make num_txq equal to num_rxq.
836 vsi->num_txq = vsi->num_rxq;
839 /* Rx queue mapping */
840 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
841 /* q_mapping buffer holds the info for the first queue allocated for
842 * this VSI in the PF space and also the number of queues associated
845 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
846 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
850 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
851 * @ctxt: the VSI context being set
852 * @vsi: the VSI being configured
854 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
856 u8 dflt_q_group, dflt_q_prio;
857 u16 dflt_q, report_q, val;
859 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL)
862 val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
863 ctxt->info.valid_sections |= cpu_to_le16(val);
869 /* enable flow director filtering/programming */
870 val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
871 ctxt->info.fd_options = cpu_to_le16(val);
872 /* max of allocated flow director filters */
873 ctxt->info.max_fd_fltr_dedicated =
874 cpu_to_le16(vsi->num_gfltr);
875 /* max of shared flow director filters any VSI may program */
876 ctxt->info.max_fd_fltr_shared =
877 cpu_to_le16(vsi->num_bfltr);
878 /* default queue index within the VSI of the default FD */
879 val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
880 ICE_AQ_VSI_FD_DEF_Q_M);
881 /* target queue or queue group to the FD filter */
882 val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
883 ICE_AQ_VSI_FD_DEF_GRP_M);
884 ctxt->info.fd_def_q = cpu_to_le16(val);
885 /* queue index on which FD filter completion is reported */
886 val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
887 ICE_AQ_VSI_FD_REPORT_Q_M);
888 /* priority of the default qindex action */
889 val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
890 ICE_AQ_VSI_FD_DEF_PRIORITY_M);
891 ctxt->info.fd_report_opt = cpu_to_le16(val);
895 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
896 * @ctxt: the VSI context being set
897 * @vsi: the VSI being configured
899 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
901 u8 lut_type, hash_type;
906 dev = ice_pf_to_dev(pf);
910 /* PF VSI will inherit RSS instance of PF */
911 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
912 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
915 /* VF VSI will gets a small RSS table which is a VSI LUT type */
916 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
917 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
920 dev_dbg(dev, "Unsupported VSI type %s\n",
921 ice_vsi_type_str(vsi->type));
925 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
926 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
927 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
928 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
932 * ice_vsi_init - Create and initialize a VSI
933 * @vsi: the VSI being configured
934 * @init_vsi: is this call creating a VSI
936 * This initializes a VSI context depending on the VSI type to be added and
937 * passes it down to the add_vsi aq command to create a new VSI.
939 static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
941 struct ice_pf *pf = vsi->back;
942 struct ice_hw *hw = &pf->hw;
943 struct ice_vsi_ctx *ctxt;
947 dev = ice_pf_to_dev(pf);
948 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
956 ctxt->flags = ICE_AQ_VSI_TYPE_PF;
959 ctxt->flags = ICE_AQ_VSI_TYPE_VF;
960 /* VF number here is the absolute VF number (0-255) */
961 ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
968 ice_set_dflt_vsi_ctx(ctxt);
969 if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
970 ice_set_fd_vsi_ctx(ctxt, vsi);
971 /* if the switch is in VEB mode, allow VSI loopback */
972 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
973 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
975 /* Set LUT type and HASH type if RSS is enabled */
976 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
977 vsi->type != ICE_VSI_CTRL) {
978 ice_set_rss_vsi_ctx(ctxt, vsi);
979 /* if updating VSI context, make sure to set valid_section:
980 * to indicate which section of VSI context being updated
983 ctxt->info.valid_sections |=
984 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
987 ctxt->info.sw_id = vsi->port_info->sw_id;
988 ice_vsi_setup_q_map(vsi, ctxt);
989 if (!init_vsi) /* means VSI being updated */
990 /* must to indicate which section of VSI context are
993 ctxt->info.valid_sections |=
994 cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
996 /* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
999 if (vsi->type == ICE_VSI_VF) {
1000 ctxt->info.valid_sections |=
1001 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1002 if (pf->vf[vsi->vf_id].spoofchk) {
1003 ctxt->info.sec_flags |=
1004 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
1005 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
1006 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
1008 ctxt->info.sec_flags &=
1009 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
1010 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
1011 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
1015 /* Allow control frames out of main VSI */
1016 if (vsi->type == ICE_VSI_PF) {
1017 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
1018 ctxt->info.valid_sections |=
1019 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1023 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1025 dev_err(dev, "Add VSI failed, err %d\n", ret);
1030 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1032 dev_err(dev, "Update VSI failed, err %d\n", ret);
1038 /* keep context for update VSI operations */
1039 vsi->info = ctxt->info;
1041 /* record VSI number returned */
1042 vsi->vsi_num = ctxt->vsi_num;
1050 * ice_free_res - free a block of resources
1051 * @res: pointer to the resource
1052 * @index: starting index previously returned by ice_get_res
1053 * @id: identifier to track owner
1055 * Returns number of resources freed
1057 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
1062 if (!res || index >= res->end)
1065 id |= ICE_RES_VALID_BIT;
1066 for (i = index; i < res->end && res->list[i] == id; i++) {
1075 * ice_search_res - Search the tracker for a block of resources
1076 * @res: pointer to the resource
1077 * @needed: size of the block needed
1078 * @id: identifier to track owner
1080 * Returns the base item index of the block, or -ENOMEM for error
1082 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
1084 u16 start = 0, end = 0;
1086 if (needed > res->end)
1089 id |= ICE_RES_VALID_BIT;
1092 /* skip already allocated entries */
1093 if (res->list[end++] & ICE_RES_VALID_BIT) {
1095 if ((start + needed) > res->end)
1099 if (end == (start + needed)) {
1102 /* there was enough, so assign it to the requestor */
1104 res->list[i++] = id;
1108 } while (end < res->end);
1114 * ice_get_free_res_count - Get free count from a resource tracker
1115 * @res: Resource tracker instance
1117 static u16 ice_get_free_res_count(struct ice_res_tracker *res)
1121 for (i = 0; i < res->end; i++)
1122 if (!(res->list[i] & ICE_RES_VALID_BIT))
1129 * ice_get_res - get a block of resources
1130 * @pf: board private structure
1131 * @res: pointer to the resource
1132 * @needed: size of the block needed
1133 * @id: identifier to track owner
1135 * Returns the base item index of the block, or negative for error
1138 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
1143 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
1144 dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
1145 needed, res->num_entries, id);
1149 return ice_search_res(res, needed, id);
1153 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
1154 * @vsi: ptr to the VSI
1156 * This should only be called after ice_vsi_alloc() which allocates the
1157 * corresponding SW VSI structure and initializes num_queue_pairs for the
1158 * newly allocated VSI.
1160 * Returns 0 on success or negative on failure
1162 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
1164 struct ice_pf *pf = vsi->back;
1169 dev = ice_pf_to_dev(pf);
1170 /* SRIOV doesn't grab irq_tracker entries for each VSI */
1171 if (vsi->type == ICE_VSI_VF)
1174 if (vsi->base_vector) {
1175 dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
1176 vsi->vsi_num, vsi->base_vector);
1180 num_q_vectors = vsi->num_q_vectors;
1181 /* reserve slots from OS requested IRQs */
1182 base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx);
1185 dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
1186 ice_get_free_res_count(pf->irq_tracker),
1187 ice_vsi_type_str(vsi->type), vsi->idx, num_q_vectors);
1190 vsi->base_vector = (u16)base;
1191 pf->num_avail_sw_msix -= num_q_vectors;
1197 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1198 * @vsi: the VSI having rings deallocated
1200 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1204 /* Avoid stale references by clearing map from vector to ring */
1205 if (vsi->q_vectors) {
1206 ice_for_each_q_vector(vsi, i) {
1207 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1210 q_vector->tx.ring = NULL;
1211 q_vector->rx.ring = NULL;
1216 if (vsi->tx_rings) {
1217 for (i = 0; i < vsi->alloc_txq; i++) {
1218 if (vsi->tx_rings[i]) {
1219 kfree_rcu(vsi->tx_rings[i], rcu);
1220 WRITE_ONCE(vsi->tx_rings[i], NULL);
1224 if (vsi->rx_rings) {
1225 for (i = 0; i < vsi->alloc_rxq; i++) {
1226 if (vsi->rx_rings[i]) {
1227 kfree_rcu(vsi->rx_rings[i], rcu);
1228 WRITE_ONCE(vsi->rx_rings[i], NULL);
1235 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1236 * @vsi: VSI which is having rings allocated
1238 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1240 struct ice_pf *pf = vsi->back;
1244 dev = ice_pf_to_dev(pf);
1245 /* Allocate Tx rings */
1246 for (i = 0; i < vsi->alloc_txq; i++) {
1247 struct ice_ring *ring;
1249 /* allocate with kzalloc(), free with kfree_rcu() */
1250 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1256 ring->reg_idx = vsi->txq_map[i];
1257 ring->ring_active = false;
1260 ring->count = vsi->num_tx_desc;
1261 WRITE_ONCE(vsi->tx_rings[i], ring);
1264 /* Allocate Rx rings */
1265 for (i = 0; i < vsi->alloc_rxq; i++) {
1266 struct ice_ring *ring;
1268 /* allocate with kzalloc(), free with kfree_rcu() */
1269 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1274 ring->reg_idx = vsi->rxq_map[i];
1275 ring->ring_active = false;
1277 ring->netdev = vsi->netdev;
1279 ring->count = vsi->num_rx_desc;
1280 WRITE_ONCE(vsi->rx_rings[i], ring);
1286 ice_vsi_clear_rings(vsi);
1291 * ice_vsi_manage_rss_lut - disable/enable RSS
1292 * @vsi: the VSI being changed
1293 * @ena: boolean value indicating if this is an enable or disable request
1295 * In the event of disable request for RSS, this function will zero out RSS
1296 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1299 int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1304 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1309 if (vsi->rss_lut_user)
1310 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1312 ice_fill_rss_lut(lut, vsi->rss_table_size,
1316 err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
1322 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1323 * @vsi: VSI to be configured
1325 static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1327 struct ice_aqc_get_set_rss_keys *key;
1328 struct ice_pf *pf = vsi->back;
1329 enum ice_status status;
1334 dev = ice_pf_to_dev(pf);
1335 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
1337 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1341 if (vsi->rss_lut_user)
1342 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1344 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1346 status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
1347 vsi->rss_table_size);
1350 dev_err(dev, "set_rss_lut failed, error %s\n",
1351 ice_stat_str(status));
1353 goto ice_vsi_cfg_rss_exit;
1356 key = kzalloc(sizeof(*key), GFP_KERNEL);
1359 goto ice_vsi_cfg_rss_exit;
1362 if (vsi->rss_hkey_user)
1364 (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
1365 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1367 netdev_rss_key_fill((void *)key,
1368 ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1370 status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
1373 dev_err(dev, "set_rss_key failed, error %s\n",
1374 ice_stat_str(status));
1379 ice_vsi_cfg_rss_exit:
1385 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1386 * @vsi: VSI to be configured
1388 * This function will only be called during the VF VSI setup. Upon successful
1389 * completion of package download, this function will configure default RSS
1390 * input sets for VF VSI.
1392 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
1394 struct ice_pf *pf = vsi->back;
1395 enum ice_status status;
1398 dev = ice_pf_to_dev(pf);
1399 if (ice_is_safe_mode(pf)) {
1400 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1405 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
1407 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %s\n",
1408 vsi->vsi_num, ice_stat_str(status));
1412 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1413 * @vsi: VSI to be configured
1415 * This function will only be called after successful download package call
1416 * during initialization of PF. Since the downloaded package will erase the
1417 * RSS section, this function will configure RSS input sets for different
1418 * flow types. The last profile added has the highest priority, therefore 2
1419 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
1420 * (i.e. IPv4 src/dst TCP src/dst port).
1422 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1424 u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
1425 struct ice_pf *pf = vsi->back;
1426 struct ice_hw *hw = &pf->hw;
1427 enum ice_status status;
1430 dev = ice_pf_to_dev(pf);
1431 if (ice_is_safe_mode(pf)) {
1432 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1436 /* configure RSS for IPv4 with input set IP src/dst */
1437 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1438 ICE_FLOW_SEG_HDR_IPV4);
1440 dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %s\n",
1441 vsi_num, ice_stat_str(status));
1443 /* configure RSS for IPv6 with input set IPv6 src/dst */
1444 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1445 ICE_FLOW_SEG_HDR_IPV6);
1447 dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %s\n",
1448 vsi_num, ice_stat_str(status));
1450 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1451 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
1452 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
1454 dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %s\n",
1455 vsi_num, ice_stat_str(status));
1457 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1458 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
1459 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
1461 dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %s\n",
1462 vsi_num, ice_stat_str(status));
1464 /* configure RSS for sctp4 with input set IP src/dst */
1465 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1466 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
1468 dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %s\n",
1469 vsi_num, ice_stat_str(status));
1471 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1472 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
1473 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
1475 dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %s\n",
1476 vsi_num, ice_stat_str(status));
1478 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1479 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
1480 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
1482 dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %s\n",
1483 vsi_num, ice_stat_str(status));
1485 /* configure RSS for sctp6 with input set IPv6 src/dst */
1486 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1487 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
1489 dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %s\n",
1490 vsi_num, ice_stat_str(status));
1494 * ice_pf_state_is_nominal - checks the PF for nominal state
1495 * @pf: pointer to PF to check
1497 * Check the PF's state for a collection of bits that would indicate
1498 * the PF is in a state that would inhibit normal operation for
1499 * driver functionality.
1501 * Returns true if PF is in a nominal state, false otherwise
1503 bool ice_pf_state_is_nominal(struct ice_pf *pf)
1505 DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
1510 bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
1511 if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
1518 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1519 * @vsi: the VSI to be updated
1521 void ice_update_eth_stats(struct ice_vsi *vsi)
1523 struct ice_eth_stats *prev_es, *cur_es;
1524 struct ice_hw *hw = &vsi->back->hw;
1525 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
1527 prev_es = &vsi->eth_stats_prev;
1528 cur_es = &vsi->eth_stats;
1530 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1531 &prev_es->rx_bytes, &cur_es->rx_bytes);
1533 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1534 &prev_es->rx_unicast, &cur_es->rx_unicast);
1536 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1537 &prev_es->rx_multicast, &cur_es->rx_multicast);
1539 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1540 &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1542 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1543 &prev_es->rx_discards, &cur_es->rx_discards);
1545 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1546 &prev_es->tx_bytes, &cur_es->tx_bytes);
1548 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1549 &prev_es->tx_unicast, &cur_es->tx_unicast);
1551 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1552 &prev_es->tx_multicast, &cur_es->tx_multicast);
1554 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1555 &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1557 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1558 &prev_es->tx_errors, &cur_es->tx_errors);
1560 vsi->stat_offsets_loaded = true;
1564 * ice_vsi_add_vlan - Add VSI membership for given VLAN
1565 * @vsi: the VSI being configured
1566 * @vid: VLAN ID to be added
1567 * @action: filter action to be performed on match
1570 ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid, enum ice_sw_fwd_act_type action)
1572 struct ice_pf *pf = vsi->back;
1576 dev = ice_pf_to_dev(pf);
1578 if (!ice_fltr_add_vlan(vsi, vid, action)) {
1582 dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
1590 * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
1591 * @vsi: the VSI being configured
1592 * @vid: VLAN ID to be removed
1594 * Returns 0 on success and negative on failure
1596 int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
1598 struct ice_pf *pf = vsi->back;
1599 enum ice_status status;
1603 dev = ice_pf_to_dev(pf);
1605 status = ice_fltr_remove_vlan(vsi, vid, ICE_FWD_TO_VSI);
1608 } else if (status == ICE_ERR_DOES_NOT_EXIST) {
1609 dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %s\n",
1610 vid, vsi->vsi_num, ice_stat_str(status));
1612 dev_err(dev, "Error removing VLAN %d on vsi %i error: %s\n",
1613 vid, vsi->vsi_num, ice_stat_str(status));
1621 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
1624 void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
1626 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
1627 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
1628 vsi->rx_buf_len = ICE_RXBUF_2048;
1629 #if (PAGE_SIZE < 8192)
1630 } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
1631 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
1632 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
1633 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
1636 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
1637 #if (PAGE_SIZE < 8192)
1638 vsi->rx_buf_len = ICE_RXBUF_3072;
1640 vsi->rx_buf_len = ICE_RXBUF_2048;
1646 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1648 * @pf_q: index of the Rx queue in the PF's queue space
1649 * @rxdid: flexible descriptor RXDID
1650 * @prio: priority for the RXDID for this queue
1653 ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
1655 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
1657 /* clear any previous values */
1658 regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
1659 QRXFLXP_CNTXT_RXDID_PRIO_M |
1660 QRXFLXP_CNTXT_TS_M);
1662 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
1663 QRXFLXP_CNTXT_RXDID_IDX_M;
1665 regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
1666 QRXFLXP_CNTXT_RXDID_PRIO_M;
1668 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
1672 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1673 * @vsi: the VSI being configured
1675 * Return 0 on success and a negative value on error
1676 * Configure the Rx VSI for operation.
1678 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1682 if (vsi->type == ICE_VSI_VF)
1685 ice_vsi_cfg_frame_size(vsi);
1687 /* set up individual rings */
1688 for (i = 0; i < vsi->num_rxq; i++) {
1691 err = ice_setup_rx_ctx(vsi->rx_rings[i]);
1693 dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
1703 * ice_vsi_cfg_txqs - Configure the VSI for Tx
1704 * @vsi: the VSI being configured
1705 * @rings: Tx ring array to be configured
1707 * Return 0 on success and a negative value on error
1708 * Configure the Tx VSI for operation.
1711 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
1713 struct ice_aqc_add_tx_qgrp *qg_buf;
1717 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
1721 qg_buf->num_txqs = 1;
1723 for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
1724 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
1735 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1736 * @vsi: the VSI being configured
1738 * Return 0 on success and a negative value on error
1739 * Configure the Tx VSI for operation.
1741 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1743 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
1747 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1748 * @vsi: the VSI being configured
1750 * Return 0 on success and a negative value on error
1751 * Configure the Tx queues dedicated for XDP in given VSI for operation.
1753 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1758 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
1762 for (i = 0; i < vsi->num_xdp_txq; i++)
1763 vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
1769 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1770 * @intrl: interrupt rate limit in usecs
1771 * @gran: interrupt rate limit granularity in usecs
1773 * This function converts a decimal interrupt rate limit in usecs to the format
1774 * expected by firmware.
1776 u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1778 u32 val = intrl / gran;
1781 return val | GLINT_RATE_INTRL_ENA_M;
1786 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1787 * @vsi: the VSI being configured
1789 * This configures MSIX mode interrupts for the PF VSI, and should not be used
1792 void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1794 struct ice_pf *pf = vsi->back;
1795 struct ice_hw *hw = &pf->hw;
1796 u16 txq = 0, rxq = 0;
1799 for (i = 0; i < vsi->num_q_vectors; i++) {
1800 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1801 u16 reg_idx = q_vector->reg_idx;
1803 ice_cfg_itr(hw, q_vector);
1805 wr32(hw, GLINT_RATE(reg_idx),
1806 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1808 /* Both Transmit Queue Interrupt Cause Control register
1809 * and Receive Queue Interrupt Cause control register
1810 * expects MSIX_INDX field to be the vector index
1811 * within the function space and not the absolute
1812 * vector index across PF or across device.
1813 * For SR-IOV VF VSIs queue vector index always starts
1814 * with 1 since first vector index(0) is used for OICR
1815 * in VF space. Since VMDq and other PF VSIs are within
1816 * the PF function space, use the vector index that is
1817 * tracked for this PF.
1819 for (q = 0; q < q_vector->num_ring_tx; q++) {
1820 ice_cfg_txq_interrupt(vsi, txq, reg_idx,
1821 q_vector->tx.itr_idx);
1825 for (q = 0; q < q_vector->num_ring_rx; q++) {
1826 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
1827 q_vector->rx.itr_idx);
1834 * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
1835 * @vsi: the VSI being changed
1837 int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
1839 struct ice_hw *hw = &vsi->back->hw;
1840 struct ice_vsi_ctx *ctxt;
1841 enum ice_status status;
1844 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1848 /* Here we are configuring the VSI to let the driver add VLAN tags by
1849 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
1850 * insertion happens in the Tx hot path, in ice_tx_map.
1852 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1854 /* Preserve existing VLAN strip setting */
1855 ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
1856 ICE_AQ_VSI_VLAN_EMOD_M);
1858 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1860 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1862 dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %s aq_err %s\n",
1863 ice_stat_str(status),
1864 ice_aq_str(hw->adminq.sq_last_status));
1869 vsi->info.vlan_flags = ctxt->info.vlan_flags;
1876 * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
1877 * @vsi: the VSI being changed
1878 * @ena: boolean value indicating if this is a enable or disable request
1880 int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
1882 struct ice_hw *hw = &vsi->back->hw;
1883 struct ice_vsi_ctx *ctxt;
1884 enum ice_status status;
1887 /* do not allow modifying VLAN stripping when a port VLAN is configured
1893 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1897 /* Here we are configuring what the VSI should do with the VLAN tag in
1898 * the Rx packet. We can either leave the tag in the packet or put it in
1899 * the Rx descriptor.
1902 /* Strip VLAN tag from Rx packet and put it in the desc */
1903 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
1905 /* Disable stripping. Leave tag in packet */
1906 ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1908 /* Allow all packets untagged/tagged */
1909 ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
1911 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1913 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1915 dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %s aq_err %s\n",
1916 ena, ice_stat_str(status),
1917 ice_aq_str(hw->adminq.sq_last_status));
1922 vsi->info.vlan_flags = ctxt->info.vlan_flags;
1929 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
1930 * @vsi: the VSI whose rings are to be enabled
1932 * Returns 0 on success and a negative value on error
1934 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
1936 return ice_vsi_ctrl_all_rx_rings(vsi, true);
1940 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
1941 * @vsi: the VSI whose rings are to be disabled
1943 * Returns 0 on success and a negative value on error
1945 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
1947 return ice_vsi_ctrl_all_rx_rings(vsi, false);
1951 * ice_vsi_stop_tx_rings - Disable Tx rings
1952 * @vsi: the VSI being configured
1953 * @rst_src: reset source
1954 * @rel_vmvf_num: Relative ID of VF/VM
1955 * @rings: Tx ring array to be stopped
1958 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1959 u16 rel_vmvf_num, struct ice_ring **rings)
1963 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
1966 for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
1967 struct ice_txq_meta txq_meta = { };
1970 if (!rings || !rings[q_idx])
1973 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
1974 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
1975 rings[q_idx], &txq_meta);
1985 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
1986 * @vsi: the VSI being configured
1987 * @rst_src: reset source
1988 * @rel_vmvf_num: Relative ID of VF/VM
1991 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1994 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
1998 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
1999 * @vsi: the VSI being configured
2001 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
2003 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
2007 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
2008 * @vsi: VSI to check whether or not VLAN pruning is enabled.
2010 * returns true if Rx VLAN pruning is enabled and false otherwise.
2012 bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
2017 return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
2021 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
2022 * @vsi: VSI to enable or disable VLAN pruning on
2023 * @ena: set to true to enable VLAN pruning and false to disable it
2024 * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
2026 * returns 0 if VSI is updated, negative otherwise
2028 int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
2030 struct ice_vsi_ctx *ctxt;
2037 /* Don't enable VLAN pruning if the netdev is currently in promiscuous
2038 * mode. VLAN pruning will be enabled when the interface exits
2039 * promiscuous mode if any VLAN filters are active.
2041 if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena)
2045 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
2049 ctxt->info = vsi->info;
2052 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2054 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2057 ctxt->info.valid_sections =
2058 cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
2060 status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
2062 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %s, aq_err = %s\n",
2063 ena ? "En" : "Dis", vsi->idx, vsi->vsi_num,
2064 ice_stat_str(status),
2065 ice_aq_str(pf->hw.adminq.sq_last_status));
2069 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
2079 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2081 struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
2083 vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
2084 vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
2088 * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
2089 * @vsi: VSI to set the q_vectors register index on
2092 ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
2096 if (!vsi || !vsi->q_vectors)
2099 ice_for_each_q_vector(vsi, i) {
2100 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2103 dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n",
2108 if (vsi->type == ICE_VSI_VF) {
2109 struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
2111 q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
2114 q_vector->v_idx + vsi->base_vector;
2121 ice_for_each_q_vector(vsi, i) {
2122 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2125 q_vector->reg_idx = 0;
2132 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2133 * @vsi: the VSI being configured
2134 * @tx: bool to determine Tx or Rx rule
2135 * @create: bool to determine create or remove Rule
2137 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2139 enum ice_status (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
2140 enum ice_sw_fwd_act_type act);
2141 struct ice_pf *pf = vsi->back;
2142 enum ice_status status;
2145 dev = ice_pf_to_dev(pf);
2146 eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
2149 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
2152 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
2153 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
2156 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
2162 dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
2163 create ? "adding" : "removing", tx ? "TX" : "RX",
2164 vsi->vsi_num, ice_stat_str(status));
2168 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2169 * @vsi: pointer to the VSI
2171 * This function will allocate new scheduler aggregator now if needed and will
2172 * move specified VSI into it.
2174 static void ice_set_agg_vsi(struct ice_vsi *vsi)
2176 struct device *dev = ice_pf_to_dev(vsi->back);
2177 struct ice_agg_node *agg_node_iter = NULL;
2178 u32 agg_id = ICE_INVALID_AGG_NODE_ID;
2179 struct ice_agg_node *agg_node = NULL;
2180 int node_offset, max_agg_nodes = 0;
2181 struct ice_port_info *port_info;
2182 struct ice_pf *pf = vsi->back;
2183 u32 agg_node_id_start = 0;
2184 enum ice_status status;
2186 /* create (as needed) scheduler aggregator node and move VSI into
2187 * corresponding aggregator node
2188 * - PF aggregator node to contains VSIs of type _PF and _CTRL
2189 * - VF aggregator nodes will contain VF VSI
2191 port_info = pf->hw.port_info;
2195 switch (vsi->type) {
2199 max_agg_nodes = ICE_MAX_PF_AGG_NODES;
2200 agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
2201 agg_node_iter = &pf->pf_agg_node[0];
2204 /* user can create 'n' VFs on a given PF, but since max children
2205 * per aggregator node can be only 64. Following code handles
2206 * aggregator(s) for VF VSIs, either selects a agg_node which
2207 * was already created provided num_vsis < 64, otherwise
2208 * select next available node, which will be created
2210 max_agg_nodes = ICE_MAX_VF_AGG_NODES;
2211 agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
2212 agg_node_iter = &pf->vf_agg_node[0];
2215 /* other VSI type, handle later if needed */
2216 dev_dbg(dev, "unexpected VSI type %s\n",
2217 ice_vsi_type_str(vsi->type));
2221 /* find the appropriate aggregator node */
2222 for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
2223 /* see if we can find space in previously created
2224 * node if num_vsis < 64, otherwise skip
2226 if (agg_node_iter->num_vsis &&
2227 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
2232 if (agg_node_iter->valid &&
2233 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
2234 agg_id = agg_node_iter->agg_id;
2235 agg_node = agg_node_iter;
2239 /* find unclaimed agg_id */
2240 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
2241 agg_id = node_offset + agg_node_id_start;
2242 agg_node = agg_node_iter;
2245 /* move to next agg_node */
2252 /* if selected aggregator node was not created, create it */
2253 if (!agg_node->valid) {
2254 status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
2255 (u8)vsi->tc_cfg.ena_tc);
2257 dev_err(dev, "unable to create aggregator node with agg_id %u\n",
2261 /* aggregator node is created, store the neeeded info */
2262 agg_node->valid = true;
2263 agg_node->agg_id = agg_id;
2266 /* move VSI to corresponding aggregator node */
2267 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2268 (u8)vsi->tc_cfg.ena_tc);
2270 dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
2275 /* keep active children count for aggregator node */
2276 agg_node->num_vsis++;
2278 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
2279 * to aggregator node
2281 vsi->agg_node = agg_node;
2282 dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
2283 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2284 vsi->agg_node->num_vsis);
2288 * ice_vsi_setup - Set up a VSI by a given type
2289 * @pf: board private structure
2290 * @pi: pointer to the port_info instance
2291 * @vsi_type: VSI type
2292 * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
2293 * used only for ICE_VSI_VF VSI type. For other VSI types, should
2294 * fill-in ICE_INVAL_VFID as input.
2296 * This allocates the sw VSI structure and its queue resources.
2298 * Returns pointer to the successfully allocated and configured VSI sw struct on
2299 * success, NULL on failure.
2302 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2303 enum ice_vsi_type vsi_type, u16 vf_id)
2305 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2306 struct device *dev = ice_pf_to_dev(pf);
2307 enum ice_status status;
2308 struct ice_vsi *vsi;
2311 if (vsi_type == ICE_VSI_VF)
2312 vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
2314 vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
2317 dev_err(dev, "could not allocate VSI\n");
2321 vsi->port_info = pi;
2322 vsi->vsw = pf->first_sw;
2323 if (vsi->type == ICE_VSI_PF)
2324 vsi->ethtype = ETH_P_PAUSE;
2326 if (vsi->type == ICE_VSI_VF)
2329 ice_alloc_fd_res(vsi);
2331 if (ice_vsi_get_qs(vsi)) {
2332 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2334 goto unroll_vsi_alloc;
2337 /* set RSS capabilities */
2338 ice_vsi_set_rss_params(vsi);
2340 /* set TC configuration */
2341 ice_vsi_set_tc_cfg(vsi);
2343 /* create the VSI */
2344 ret = ice_vsi_init(vsi, true);
2348 switch (vsi->type) {
2351 ret = ice_vsi_alloc_q_vectors(vsi);
2353 goto unroll_vsi_init;
2355 ret = ice_vsi_setup_vector_base(vsi);
2357 goto unroll_alloc_q_vector;
2359 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2361 goto unroll_vector_base;
2363 ret = ice_vsi_alloc_rings(vsi);
2365 goto unroll_vector_base;
2367 /* Always add VLAN ID 0 switch rule by default. This is needed
2368 * in order to allow all untagged and 0 tagged priority traffic
2369 * if Rx VLAN pruning is enabled. Also there are cases where we
2370 * don't get the call to add VLAN 0 via ice_vlan_rx_add_vid()
2371 * so this handles those cases (i.e. adding the PF to a bridge
2372 * without the 8021q module loaded).
2374 ret = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
2376 goto unroll_clear_rings;
2378 ice_vsi_map_rings_to_vectors(vsi);
2380 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
2381 if (vsi->type != ICE_VSI_CTRL)
2382 /* Do not exit if configuring RSS had an issue, at
2383 * least receive traffic on first queue. Hence no
2384 * need to capture return value
2386 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2387 ice_vsi_cfg_rss_lut_key(vsi);
2388 ice_vsi_set_rss_flow_fld(vsi);
2393 /* VF driver will take care of creating netdev for this type and
2394 * map queues to vectors through Virtchnl, PF driver only
2395 * creates a VSI and corresponding structures for bookkeeping
2398 ret = ice_vsi_alloc_q_vectors(vsi);
2400 goto unroll_vsi_init;
2402 ret = ice_vsi_alloc_rings(vsi);
2404 goto unroll_alloc_q_vector;
2406 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2408 goto unroll_vector_base;
2410 /* Do not exit if configuring RSS had an issue, at least
2411 * receive traffic on first queue. Hence no need to capture
2414 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2415 ice_vsi_cfg_rss_lut_key(vsi);
2416 ice_vsi_set_vf_rss_flow_fld(vsi);
2420 ret = ice_vsi_alloc_rings(vsi);
2422 goto unroll_vsi_init;
2425 /* clean up the resources and exit */
2426 goto unroll_vsi_init;
2429 /* configure VSI nodes based on number of queues and TC's */
2430 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2431 max_txqs[i] = vsi->alloc_txq;
2433 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2436 dev_err(dev, "VSI %d failed lan queue config, error %s\n",
2437 vsi->vsi_num, ice_stat_str(status));
2438 goto unroll_clear_rings;
2441 /* Add switch rule to drop all Tx Flow Control Frames, of look up
2442 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2443 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2444 * The rule is added once for PF VSI in order to create appropriate
2445 * recipe, since VSI/VSI list is ignored with drop action...
2446 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to
2447 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
2448 * settings in the HW.
2450 if (!ice_is_safe_mode(pf))
2451 if (vsi->type == ICE_VSI_PF) {
2452 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
2454 ice_cfg_sw_lldp(vsi, true, true);
2458 ice_set_agg_vsi(vsi);
2462 ice_vsi_clear_rings(vsi);
2464 /* reclaim SW interrupts back to the common pool */
2465 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2466 pf->num_avail_sw_msix += vsi->num_q_vectors;
2467 unroll_alloc_q_vector:
2468 ice_vsi_free_q_vectors(vsi);
2470 ice_vsi_delete(vsi);
2472 ice_vsi_put_qs(vsi);
2474 if (vsi_type == ICE_VSI_VF)
2475 ice_enable_lag(pf->lag);
2482 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2483 * @vsi: the VSI being cleaned up
2485 static void ice_vsi_release_msix(struct ice_vsi *vsi)
2487 struct ice_pf *pf = vsi->back;
2488 struct ice_hw *hw = &pf->hw;
2493 for (i = 0; i < vsi->num_q_vectors; i++) {
2494 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2495 u16 reg_idx = q_vector->reg_idx;
2497 wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
2498 wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
2499 for (q = 0; q < q_vector->num_ring_tx; q++) {
2500 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2501 if (ice_is_xdp_ena_vsi(vsi)) {
2502 u32 xdp_txq = txq + vsi->num_xdp_txq;
2504 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2509 for (q = 0; q < q_vector->num_ring_rx; q++) {
2510 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2519 * ice_vsi_free_irq - Free the IRQ association with the OS
2520 * @vsi: the VSI being configured
2522 void ice_vsi_free_irq(struct ice_vsi *vsi)
2524 struct ice_pf *pf = vsi->back;
2525 int base = vsi->base_vector;
2528 if (!vsi->q_vectors || !vsi->irqs_ready)
2531 ice_vsi_release_msix(vsi);
2532 if (vsi->type == ICE_VSI_VF)
2535 vsi->irqs_ready = false;
2536 ice_for_each_q_vector(vsi, i) {
2537 u16 vector = i + base;
2540 irq_num = pf->msix_entries[vector].vector;
2542 /* free only the irqs that were actually requested */
2543 if (!vsi->q_vectors[i] ||
2544 !(vsi->q_vectors[i]->num_ring_tx ||
2545 vsi->q_vectors[i]->num_ring_rx))
2548 /* clear the affinity notifier in the IRQ descriptor */
2549 irq_set_affinity_notifier(irq_num, NULL);
2551 /* clear the affinity_mask in the IRQ descriptor */
2552 irq_set_affinity_hint(irq_num, NULL);
2553 synchronize_irq(irq_num);
2554 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2559 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2560 * @vsi: the VSI having resources freed
2562 void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2569 ice_for_each_txq(vsi, i)
2570 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2571 ice_free_tx_ring(vsi->tx_rings[i]);
2575 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2576 * @vsi: the VSI having resources freed
2578 void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2585 ice_for_each_rxq(vsi, i)
2586 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2587 ice_free_rx_ring(vsi->rx_rings[i]);
2591 * ice_vsi_close - Shut down a VSI
2592 * @vsi: the VSI being shut down
2594 void ice_vsi_close(struct ice_vsi *vsi)
2596 if (!test_and_set_bit(__ICE_DOWN, vsi->state))
2599 ice_vsi_free_irq(vsi);
2600 ice_vsi_free_tx_rings(vsi);
2601 ice_vsi_free_rx_rings(vsi);
2605 * ice_ena_vsi - resume a VSI
2606 * @vsi: the VSI being resume
2607 * @locked: is the rtnl_lock already held
2609 int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
2613 if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
2616 clear_bit(__ICE_NEEDS_RESTART, vsi->state);
2618 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
2619 if (netif_running(vsi->netdev)) {
2623 err = ice_open_internal(vsi->netdev);
2628 } else if (vsi->type == ICE_VSI_CTRL) {
2629 err = ice_vsi_open_ctrl(vsi);
2636 * ice_dis_vsi - pause a VSI
2637 * @vsi: the VSI being paused
2638 * @locked: is the rtnl_lock already held
2640 void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
2642 if (test_bit(__ICE_DOWN, vsi->state))
2645 set_bit(__ICE_NEEDS_RESTART, vsi->state);
2647 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
2648 if (netif_running(vsi->netdev)) {
2659 } else if (vsi->type == ICE_VSI_CTRL) {
2665 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2666 * @vsi: the VSI being un-configured
2668 void ice_vsi_dis_irq(struct ice_vsi *vsi)
2670 int base = vsi->base_vector;
2671 struct ice_pf *pf = vsi->back;
2672 struct ice_hw *hw = &pf->hw;
2676 /* disable interrupt causation from each queue */
2677 if (vsi->tx_rings) {
2678 ice_for_each_txq(vsi, i) {
2679 if (vsi->tx_rings[i]) {
2682 reg = vsi->tx_rings[i]->reg_idx;
2683 val = rd32(hw, QINT_TQCTL(reg));
2684 val &= ~QINT_TQCTL_CAUSE_ENA_M;
2685 wr32(hw, QINT_TQCTL(reg), val);
2690 if (vsi->rx_rings) {
2691 ice_for_each_rxq(vsi, i) {
2692 if (vsi->rx_rings[i]) {
2695 reg = vsi->rx_rings[i]->reg_idx;
2696 val = rd32(hw, QINT_RQCTL(reg));
2697 val &= ~QINT_RQCTL_CAUSE_ENA_M;
2698 wr32(hw, QINT_RQCTL(reg), val);
2703 /* disable each interrupt */
2704 ice_for_each_q_vector(vsi, i) {
2705 if (!vsi->q_vectors[i])
2707 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2712 /* don't call synchronize_irq() for VF's from the host */
2713 if (vsi->type == ICE_VSI_VF)
2716 ice_for_each_q_vector(vsi, i)
2717 synchronize_irq(pf->msix_entries[i + base].vector);
2721 * ice_napi_del - Remove NAPI handler for the VSI
2722 * @vsi: VSI for which NAPI handler is to be removed
2724 void ice_napi_del(struct ice_vsi *vsi)
2731 ice_for_each_q_vector(vsi, v_idx)
2732 netif_napi_del(&vsi->q_vectors[v_idx]->napi);
2736 * ice_vsi_release - Delete a VSI and free its resources
2737 * @vsi: the VSI being removed
2739 * Returns 0 on success or < 0 on error
2741 int ice_vsi_release(struct ice_vsi *vsi)
2749 /* do not unregister while driver is in the reset recovery pending
2750 * state. Since reset/rebuild happens through PF service task workqueue,
2751 * it's not a good idea to unregister netdev that is associated to the
2752 * PF that is running the work queue items currently. This is done to
2753 * avoid check_flush_dependency() warning on this wq
2755 if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
2756 unregister_netdev(vsi->netdev);
2757 ice_devlink_destroy_port(vsi);
2760 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2763 /* Disable VSI and free resources */
2764 if (vsi->type != ICE_VSI_LB)
2765 ice_vsi_dis_irq(vsi);
2768 /* SR-IOV determines needed MSIX resources all at once instead of per
2769 * VSI since when VFs are spawned we know how many VFs there are and how
2770 * many interrupts each VF needs. SR-IOV MSIX resources are also
2771 * cleared in the same manner.
2773 if (vsi->type != ICE_VSI_VF) {
2774 /* reclaim SW interrupts back to the common pool */
2775 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2776 pf->num_avail_sw_msix += vsi->num_q_vectors;
2779 if (!ice_is_safe_mode(pf)) {
2780 if (vsi->type == ICE_VSI_PF) {
2781 ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
2783 ice_cfg_sw_lldp(vsi, true, false);
2784 /* The Rx rule will only exist to remove if the LLDP FW
2785 * engine is currently stopped
2787 if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2788 ice_cfg_sw_lldp(vsi, false, false);
2792 ice_fltr_remove_all(vsi);
2793 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2794 ice_vsi_delete(vsi);
2795 ice_vsi_free_q_vectors(vsi);
2797 /* make sure unregister_netdev() was called by checking __ICE_DOWN */
2798 if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
2799 free_netdev(vsi->netdev);
2803 if (vsi->type == ICE_VSI_VF &&
2804 vsi->agg_node && vsi->agg_node->valid)
2805 vsi->agg_node->num_vsis--;
2806 ice_vsi_clear_rings(vsi);
2808 ice_vsi_put_qs(vsi);
2810 /* retain SW VSI data structure since it is needed to unregister and
2811 * free VSI netdev when PF is not in reset recovery pending state,\
2812 * for ex: during rmmod.
2814 if (!ice_is_reset_in_progress(pf->state))
2821 * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
2822 * @q_vector: pointer to q_vector which is being updated
2823 * @coalesce: pointer to array of struct with stored coalesce
2825 * Set coalesce param in q_vector and update these parameters in HW.
2828 ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
2829 struct ice_coalesce_stored *coalesce)
2831 struct ice_ring_container *rx_rc = &q_vector->rx;
2832 struct ice_ring_container *tx_rc = &q_vector->tx;
2833 struct ice_hw *hw = &q_vector->vsi->back->hw;
2835 tx_rc->itr_setting = coalesce->itr_tx;
2836 rx_rc->itr_setting = coalesce->itr_rx;
2838 /* dynamic ITR values will be updated during Tx/Rx */
2839 if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
2840 wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
2841 ITR_REG_ALIGN(tx_rc->itr_setting) >>
2843 if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
2844 wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
2845 ITR_REG_ALIGN(rx_rc->itr_setting) >>
2848 q_vector->intrl = coalesce->intrl;
2849 wr32(hw, GLINT_RATE(q_vector->reg_idx),
2850 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
2854 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2855 * @vsi: VSI connected with q_vectors
2856 * @coalesce: array of struct with stored coalesce
2858 * Returns array size.
2861 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
2862 struct ice_coalesce_stored *coalesce)
2866 ice_for_each_q_vector(vsi, i) {
2867 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2869 coalesce[i].itr_tx = q_vector->tx.itr_setting;
2870 coalesce[i].itr_rx = q_vector->rx.itr_setting;
2871 coalesce[i].intrl = q_vector->intrl;
2874 return vsi->num_q_vectors;
2878 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
2879 * @vsi: VSI connected with q_vectors
2880 * @coalesce: pointer to array of struct with stored coalesce
2881 * @size: size of coalesce array
2883 * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
2884 * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
2888 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
2889 struct ice_coalesce_stored *coalesce, int size)
2893 if ((size && !coalesce) || !vsi)
2896 for (i = 0; i < size && i < vsi->num_q_vectors; i++)
2897 ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
2900 /* number of q_vectors increased, so assume coalesce settings were
2901 * changed globally (i.e. ethtool -C eth0 instead of per-queue) and use
2902 * the previous settings from q_vector 0 for all of the new q_vectors
2904 for (; i < vsi->num_q_vectors; i++)
2905 ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
2910 * ice_vsi_rebuild - Rebuild VSI after reset
2911 * @vsi: VSI to be rebuild
2912 * @init_vsi: is this an initialization or a reconfigure of the VSI
2914 * Returns 0 on success and negative value on failure
2916 int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
2918 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2919 struct ice_coalesce_stored *coalesce;
2920 int prev_num_q_vectors = 0;
2921 struct ice_vf *vf = NULL;
2922 enum ice_status status;
2930 if (vsi->type == ICE_VSI_VF)
2931 vf = &pf->vf[vsi->vf_id];
2933 coalesce = kcalloc(vsi->num_q_vectors,
2934 sizeof(struct ice_coalesce_stored), GFP_KERNEL);
2936 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
2938 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2939 ice_vsi_free_q_vectors(vsi);
2941 /* SR-IOV determines needed MSIX resources all at once instead of per
2942 * VSI since when VFs are spawned we know how many VFs there are and how
2943 * many interrupts each VF needs. SR-IOV MSIX resources are also
2944 * cleared in the same manner.
2946 if (vsi->type != ICE_VSI_VF) {
2947 /* reclaim SW interrupts back to the common pool */
2948 ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2949 pf->num_avail_sw_msix += vsi->num_q_vectors;
2950 vsi->base_vector = 0;
2953 if (ice_is_xdp_ena_vsi(vsi))
2954 /* return value check can be skipped here, it always returns
2955 * 0 if reset is in progress
2957 ice_destroy_xdp_rings(vsi);
2958 ice_vsi_put_qs(vsi);
2959 ice_vsi_clear_rings(vsi);
2960 ice_vsi_free_arrays(vsi);
2961 if (vsi->type == ICE_VSI_VF)
2962 ice_vsi_set_num_qs(vsi, vf->vf_id);
2964 ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
2966 ret = ice_vsi_alloc_arrays(vsi);
2970 ice_vsi_get_qs(vsi);
2972 ice_alloc_fd_res(vsi);
2973 ice_vsi_set_tc_cfg(vsi);
2975 /* Initialize VSI struct elements and create VSI in FW */
2976 ret = ice_vsi_init(vsi, init_vsi);
2980 switch (vsi->type) {
2983 ret = ice_vsi_alloc_q_vectors(vsi);
2987 ret = ice_vsi_setup_vector_base(vsi);
2991 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2995 ret = ice_vsi_alloc_rings(vsi);
2999 ice_vsi_map_rings_to_vectors(vsi);
3000 if (ice_is_xdp_ena_vsi(vsi)) {
3001 vsi->num_xdp_txq = vsi->alloc_rxq;
3002 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
3006 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
3007 if (vsi->type != ICE_VSI_CTRL)
3008 /* Do not exit if configuring RSS had an issue, at
3009 * least receive traffic on first queue. Hence no
3010 * need to capture return value
3012 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3013 ice_vsi_cfg_rss_lut_key(vsi);
3016 ret = ice_vsi_alloc_q_vectors(vsi);
3020 ret = ice_vsi_set_q_vectors_reg_idx(vsi);
3024 ret = ice_vsi_alloc_rings(vsi);
3033 /* configure VSI nodes based on number of queues and TC's */
3034 for (i = 0; i < vsi->tc_cfg.numtc; i++) {
3035 max_txqs[i] = vsi->alloc_txq;
3037 if (ice_is_xdp_ena_vsi(vsi))
3038 max_txqs[i] += vsi->num_xdp_txq;
3041 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
3044 dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
3045 vsi->vsi_num, ice_stat_str(status));
3050 return ice_schedule_reset(pf, ICE_RESET_PFR);
3053 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
3059 ice_vsi_free_q_vectors(vsi);
3062 vsi->current_netdev_flags = 0;
3063 unregister_netdev(vsi->netdev);
3064 free_netdev(vsi->netdev);
3069 set_bit(__ICE_RESET_FAILED, pf->state);
3075 * ice_is_reset_in_progress - check for a reset in progress
3076 * @state: PF state field
3078 bool ice_is_reset_in_progress(unsigned long *state)
3080 return test_bit(__ICE_RESET_OICR_RECV, state) ||
3081 test_bit(__ICE_PFR_REQ, state) ||
3082 test_bit(__ICE_CORER_REQ, state) ||
3083 test_bit(__ICE_GLOBR_REQ, state);
3088 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3089 * @vsi: VSI being configured
3090 * @ctx: the context buffer returned from AQ VSI update command
3092 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3094 vsi->info.mapping_flags = ctx->info.mapping_flags;
3095 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3096 sizeof(vsi->info.q_mapping));
3097 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3098 sizeof(vsi->info.tc_mapping));
3102 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3103 * @vsi: VSI to be configured
3104 * @ena_tc: TC bitmap
3106 * VSI queues expected to be quiesced before calling this function
3108 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3110 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3111 struct ice_pf *pf = vsi->back;
3112 struct ice_vsi_ctx *ctx;
3113 enum ice_status status;
3118 dev = ice_pf_to_dev(pf);
3120 ice_for_each_traffic_class(i) {
3121 /* build bitmap of enabled TCs */
3122 if (ena_tc & BIT(i))
3124 /* populate max_txqs per TC */
3125 max_txqs[i] = vsi->alloc_txq;
3128 vsi->tc_cfg.ena_tc = ena_tc;
3129 vsi->tc_cfg.numtc = num_tc;
3131 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3136 ctx->info = vsi->info;
3138 ice_vsi_setup_q_map(vsi, ctx);
3140 /* must to indicate which section of VSI context are being modified */
3141 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3142 status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3144 dev_info(dev, "Failed VSI Update\n");
3149 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
3153 dev_err(dev, "VSI %d failed TC config, error %s\n",
3154 vsi->vsi_num, ice_stat_str(status));
3158 ice_vsi_update_q_map(vsi, ctx);
3159 vsi->info.valid_sections = 0;
3161 ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3166 #endif /* CONFIG_DCB */
3169 * ice_update_ring_stats - Update ring statistics
3170 * @ring: ring to update
3171 * @cont: used to increment per-vector counters
3172 * @pkts: number of processed packets
3173 * @bytes: number of processed bytes
3175 * This function assumes that caller has acquired a u64_stats_sync lock.
3178 ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
3179 u64 pkts, u64 bytes)
3181 ring->stats.bytes += bytes;
3182 ring->stats.pkts += pkts;
3183 cont->total_bytes += bytes;
3184 cont->total_pkts += pkts;
3188 * ice_update_tx_ring_stats - Update Tx ring specific counters
3189 * @tx_ring: ring to update
3190 * @pkts: number of processed packets
3191 * @bytes: number of processed bytes
3193 void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
3195 u64_stats_update_begin(&tx_ring->syncp);
3196 ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes);
3197 u64_stats_update_end(&tx_ring->syncp);
3201 * ice_update_rx_ring_stats - Update Rx ring specific counters
3202 * @rx_ring: ring to update
3203 * @pkts: number of processed packets
3204 * @bytes: number of processed bytes
3206 void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
3208 u64_stats_update_begin(&rx_ring->syncp);
3209 ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes);
3210 u64_stats_update_end(&rx_ring->syncp);
3214 * ice_status_to_errno - convert from enum ice_status to Linux errno
3215 * @err: ice_status value to convert
3217 int ice_status_to_errno(enum ice_status err)
3222 case ICE_ERR_DOES_NOT_EXIST:
3224 case ICE_ERR_OUT_OF_RANGE:
3228 case ICE_ERR_NO_MEMORY:
3230 case ICE_ERR_MAX_LIMIT:
3238 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3239 * @sw: switch to check if its default forwarding VSI is free
3241 * Return true if the default forwarding VSI is already being used, else returns
3242 * false signalling that it's available to use.
3244 bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
3246 return (sw->dflt_vsi && sw->dflt_vsi_ena);
3250 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3251 * @sw: switch for the default forwarding VSI to compare against
3252 * @vsi: VSI to compare against default forwarding VSI
3254 * If this VSI passed in is the default forwarding VSI then return true, else
3257 bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
3259 return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
3263 * ice_set_dflt_vsi - set the default forwarding VSI
3264 * @sw: switch used to assign the default forwarding VSI
3265 * @vsi: VSI getting set as the default forwarding VSI on the switch
3267 * If the VSI passed in is already the default VSI and it's enabled just return
3270 * If there is already a default VSI on the switch and it's enabled then return
3271 * -EEXIST since there can only be one default VSI per switch.
3273 * Otherwise try to set the VSI passed in as the switch's default VSI and
3274 * return the result.
3276 int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
3278 enum ice_status status;
3284 dev = ice_pf_to_dev(vsi->back);
3286 /* the VSI passed in is already the default VSI */
3287 if (ice_is_vsi_dflt_vsi(sw, vsi)) {
3288 dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
3293 /* another VSI is already the default VSI for this switch */
3294 if (ice_is_dflt_vsi_in_use(sw)) {
3295 dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
3296 sw->dflt_vsi->vsi_num);
3300 status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
3302 dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %s\n",
3303 vsi->vsi_num, ice_stat_str(status));
3308 sw->dflt_vsi_ena = true;
3314 * ice_clear_dflt_vsi - clear the default forwarding VSI
3315 * @sw: switch used to clear the default VSI
3317 * If the switch has no default VSI or it's not enabled then return error.
3319 * Otherwise try to clear the default VSI and return the result.
3321 int ice_clear_dflt_vsi(struct ice_sw *sw)
3323 struct ice_vsi *dflt_vsi;
3324 enum ice_status status;
3330 dev = ice_pf_to_dev(sw->pf);
3332 dflt_vsi = sw->dflt_vsi;
3334 /* there is no default VSI configured */
3335 if (!ice_is_dflt_vsi_in_use(sw))
3338 status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
3341 dev_err(dev, "Failed to clear the default forwarding VSI %d, error %s\n",
3342 dflt_vsi->vsi_num, ice_stat_str(status));
3346 sw->dflt_vsi = NULL;
3347 sw->dflt_vsi_ena = false;